From 90eafcd31a3e856e1306aeef1d3c143cb2e13cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Csord=C3=A1s?= Date: Fri, 17 Sep 2021 10:18:20 +0200 Subject: [PATCH] [cli][server] Refactoring code for sarif support The basic idea behind these changes is that in this patch we created a general `Report` class which holds information about bugs what are needed by the CodeChecker CLI and server. We introduced helper functions in this patch to convert analyzer result files (for now only plist) to Report objects. All the converters (html, gerrit, codeclimate, json, etc.) and report hash generation modules will work on these Report objects. This patch also tried to remove duplication between the different command line subcommands (parse, diff, store etc.). --- .github/workflows/test.yml | 22 +- .gitmessage | 1 - CONTRIBUTING.md | 2 +- Makefile | 26 +- analyzer/Makefile | 25 +- .../codechecker_analyzer/analysis_manager.py | 24 +- .../codechecker_analyzer/analyzer_context.py | 6 +- .../analyzers/clangsa/analyzer.py | 4 +- .../analyzers/clangsa/result_handler.py | 43 +- .../analyzers/clangtidy/analyzer.py | 5 +- .../analyzers/clangtidy/output_converter.py | 399 --------- .../analyzers/clangtidy/result_handler.py | 54 +- .../analyzers/result_handler_base.py | 9 +- .../buildlog/log_parser.py | 3 +- analyzer/codechecker_analyzer/cmd/analyze.py | 5 +- .../cmd/analyzer_version.py | 4 +- .../codechecker_analyzer/cmd/analyzers.py | 4 +- analyzer/codechecker_analyzer/cmd/check.py | 3 +- analyzer/codechecker_analyzer/cmd/checkers.py | 4 +- analyzer/codechecker_analyzer/cmd/fixit.py | 10 +- analyzer/codechecker_analyzer/cmd/parse.py | 822 +++--------------- .../suppress_file_handler.py | 2 +- .../codechecker_analyzer/suppress_handler.py | 15 +- analyzer/tests/functional/__init__.py | 1 + .../tests/functional/analyze/test_analyze.py | 18 +- .../test_analyze_and_parse.py | 69 +- .../test_files/compiler_error.output | 40 +- .../test_files/compiler_error_disabled.output | 8 +- .../compiler_warning_no_warn.output | 8 +- .../test_files/compiler_warning_simple.output | 40 +- .../compiler_warning_wno_group.output | 40 +- .../compiler_warning_wno_simple1.output | 40 +- .../compiler_warning_wno_simple2.output | 8 +- .../compiler_warning_wunused.output | 8 +- .../context_free_hash_clang_tidy.output | 40 +- .../context_free_hash_clangsa.output | 40 +- .../context_free_hash_v2_clang_tidy.output | 40 +- .../context_free_hash_v2_clangsa.output | 40 +- .../context_sensitive_hash_clang.output | 40 +- .../context_sensitive_hash_clang_tidy.output | 40 +- .../diagnostic_message_hash_clang_tidy.output | 40 +- .../diagnostic_message_hash_clangsa.output | 40 +- .../test_files/macros.output | 40 +- .../test_files/multi_error.en1.output | 40 +- .../test_files/multi_error.en2.output | 40 +- .../test_files/multi_error.en3.output | 40 +- .../test_files/multi_error.output | 43 +- .../test_files/multi_error.steps.output | 43 +- .../test_files/multi_error_skipped.output | 40 +- .../multi_error_skipped_in_cmd.output | 50 +- .../test_files/multi_error_suppress.output | 40 +- .../multi_error_suppress_cstyle.output | 34 +- .../multi_error_suppress_typo.output | 43 +- .../test_files/multiple_input.output | 45 +- .../test_files/nofail.output | 8 +- .../test_files/nofail.steps.output | 8 +- .../analyze_and_parse/test_files/notes.output | 40 +- .../saargs_forward.noforward.output | 8 +- .../test_files/saargs_forward.output | 40 +- .../test_files/simple1.deduplication.output | 40 +- .../test_files/simple1.output | 40 +- .../test_files/simple1.steps.output | 40 +- .../test_files/simple2.output | 40 +- .../test_files/simple2.steps.output | 40 +- .../test_files/source_code_comments.output | 40 +- .../source_code_comments_all.output | 40 +- ...urce_code_comments_all_empty_filter.output | 40 +- .../source_code_comments_confirmed.output | 40 +- ...source_code_comments_false_positive.output | 40 +- .../test_files/tidy_alias.output | 41 +- .../test_files/tidy_check.output | 40 +- analyzer/tests/unit/__init__.py | 6 +- analyzer/tests/unit/test_log_parser.py | 3 +- .../unit/test_remove_report_from_plist.py | 54 +- .../tests/unit/test_tidy_output_converter.py | 425 --------- .../unit/tidy_output_test_files/empty.plist | 10 - .../unit/tidy_output_test_files/tidy1.plist | 112 --- .../unit/tidy_output_test_files/tidy2.plist | 274 ------ .../unit/tidy_output_test_files/tidy3.plist | 422 --------- .../merge_clang_extdef_mappings/README.md | 2 +- analyzer/tools/statistics_collector/README.md | 2 +- codechecker_common/checker_labels.py | 8 +- codechecker_common/cmd_config.py | 3 +- codechecker_common/logger.py | 2 +- codechecker_common/output/json.py | 24 - codechecker_common/plist_parser.py | 375 -------- codechecker_common/report.py | 189 ---- .../requirements_py/dev/requirements.txt | 2 + codechecker_common/singleton.py | 13 +- codechecker_common/tests/Makefile | 16 + codechecker_common/tests/unit/Makefile | 27 - .../tests/unit/test_gerrit_converter.py | 314 ------- codechecker_common/util.py | 136 --- config/logger.conf | 4 + docs/tools/codechecker_report_hash.md | 59 -- docs/tools/plist_to_html.md | 45 - docs/tools/report-converter.md | 187 +++- .../requirements_py/dev/requirements.txt | 2 + tools/codechecker_report_hash/.gitignore | 3 - tools/codechecker_report_hash/.noserc | 13 - tools/codechecker_report_hash/.pylintrc | 377 -------- tools/codechecker_report_hash/.pypirc | 10 - tools/codechecker_report_hash/LICENSE.txt | 218 ----- tools/codechecker_report_hash/MANIFEST.in | 2 - tools/codechecker_report_hash/Makefile | 65 -- .../codechecker_report_hash/hash.py | 399 --------- .../requirements_py/dev/requirements.txt | 5 - tools/codechecker_report_hash/setup.py | 34 - tools/codechecker_report_hash/tests/Makefile | 47 - tools/plist_to_html/.gitignore | 4 - tools/plist_to_html/.noserc | 13 - tools/plist_to_html/.pypirc | 10 - tools/plist_to_html/LICENSE.txt | 218 ----- tools/plist_to_html/MANIFEST.in | 3 - tools/plist_to_html/Makefile | 115 --- .../plist_to_html/PlistToHtml.py | 697 --------------- .../requirements_py/dev/requirements.txt | 5 - tools/plist_to_html/setup.py | 39 - tools/plist_to_html/tests/Makefile | 50 -- tools/report-converter/.gitignore | 1 + tools/report-converter/MANIFEST.in | 3 + tools/report-converter/Makefile | 54 +- .../analyzer_result.py | 162 ---- .../analyzers}/__init__.py | 0 .../analyzers/analyzer_result.py | 162 ++++ .../analyzers/clang_tidy}/__init__.py | 0 .../analyzers/clang_tidy/analyzer_result.py | 26 + .../analyzers/clang_tidy/parser.py | 175 ++++ .../analyzers/coccinelle/__init__.py | 7 + .../analyzers/coccinelle/analyzer_result.py | 26 + .../coccinelle/parser.py} | 37 +- .../analyzers/cppcheck}/__init__.py | 0 .../analyzers/cppcheck/analyzer_result.py | 53 ++ .../analyzers/cpplint}/__init__.py | 0 .../analyzers/cpplint/analyzer_result.py | 26 + .../cpplint/parser.py} | 32 +- .../analyzers/eslint}/__init__.py | 0 .../analyzers/eslint/analyzer_result.py | 66 ++ .../golint}/__init__.py | 0 .../analyzers/golint/analyzer_result.py | 26 + .../golint/parser.py} | 37 +- .../infer}/__init__.py | 0 .../analyzers/infer/analyzer_result.py | 132 +++ .../kerneldoc}/__init__.py | 0 .../analyzers/kerneldoc/analyzer_result.py | 26 + .../kerneldoc/parser.py} | 39 +- .../markdownlint}/__init__.py | 0 .../analyzers/markdownlint/analyzer_result.py | 26 + .../markdownlint/parser.py} | 34 +- .../analyzers/parser.py | 83 ++ .../pyflakes}/__init__.py | 0 .../analyzers/pyflakes/analyzer_result.py | 26 + .../pyflakes/parser.py} | 40 +- .../{golint => analyzers/pylint}/__init__.py | 0 .../analyzers/pylint/analyzer_result.py | 64 ++ .../sanitizers}/__init__.py | 0 .../sanitizers/address}/__init__.py | 0 .../sanitizers/address/analyzer_result.py | 26 + .../analyzers/sanitizers/address/parser.py | 27 + .../sanitizers/leak}/__init__.py | 0 .../sanitizers/leak/analyzer_result.py | 27 + .../analyzers/sanitizers/leak/parser.py | 27 + .../sanitizers/memory}/__init__.py | 0 .../sanitizers/memory/analyzer_result.py | 27 + .../analyzers/sanitizers/memory/parser.py | 29 + .../analyzers/sanitizers/parser.py | 146 ++++ .../sanitizers/thread}/__init__.py | 0 .../sanitizers/thread/analyzer_result.py | 26 + .../analyzers/sanitizers/thread/parser.py | 29 + .../sanitizers/ub}/__init__.py | 0 .../sanitizers/ub/analyzer_result.py | 26 + .../sanitizers/ub/parser.py} | 65 +- .../address => analyzers/smatch}/__init__.py | 0 .../analyzers/smatch/analyzer_result.py | 26 + .../smatch/parser.py} | 38 +- .../leak => analyzers/sparse}/__init__.py | 0 .../analyzers/sparse/analyzer_result.py | 26 + .../sparse/parser.py} | 63 +- .../memory => analyzers/sphinx}/__init__.py | 0 .../analyzers/sphinx/analyzer_result.py | 26 + .../sphinx/parser.py} | 38 +- .../thread => analyzers/spotbugs}/__init__.py | 0 .../spotbugs/analyzer_result.py} | 102 ++- .../ub => analyzers/tslint}/__init__.py | 0 .../analyzers/tslint/analyzer_result.py | 67 ++ .../clang_tidy/analyzer_result.py | 37 - .../clang_tidy/output_parser.py | 124 --- .../codechecker_report_converter/cli.py | 137 ++- .../coccinelle/analyzer_result.py | 36 - .../cppcheck/analyzer_result.py | 109 --- .../cpplint/analyzer_result.py | 36 - .../eslint/analyzer_result.py | 39 - .../eslint/output_parser.py | 59 -- .../golint/analyzer_result.py | 36 - .../infer/analyzer_result.py | 39 - .../infer/output_parser.py | 128 --- .../infer/plist_converter.py | 22 - .../kerneldoc/analyzer_result.py | 36 - .../markdownlint/analyzer_result.py | 34 - .../output_parser.py | 97 --- .../plist_converter.py | 157 ---- .../pyflakes/analyzer_result.py | 36 - .../pylint/analyzer_result.py | 39 - .../pylint/output_parser.py | 57 -- .../codechecker_report_converter/report.py | 72 -- .../report/__init__.py | 549 ++++++++++++ .../report/checker_labels.py | 10 +- .../report/hash.py | 206 +++++ .../{smatch => report/output}/__init__.py | 0 .../report}/output/baseline.py | 14 +- .../report}/output/codeclimate.py | 26 +- .../report}/output/gerrit.py | 46 +- .../output/html}/__init__.py | 0 .../report/output/html/cli.py | 88 ++ .../report/output/html/html.py | 512 +++++++++++ .../output/html}/static/css/buglist.css | 0 .../output/html}/static/css/bugview.css | 0 .../report/output/html}/static/css/icon.css | 0 .../output/html}/static/css/statistics.css | 0 .../report/output/html}/static/css/style.css | 0 .../report/output/html}/static/css/table.css | 0 .../report/output/html}/static/index.html | 0 .../output/html}/static/js/browsersupport.js | 0 .../report/output/html}/static/js/buglist.js | 0 .../output/html}/static/js/bugviewer.js | 30 +- .../report/output/html}/static/layout.html | 0 .../output/html}/static/statistics.html | 4 +- .../report/output/json.py | 23 + .../report/output/plaintext.py | 201 +++++ .../{sphinx => report/parser}/__init__.py | 0 .../report/parser/base.py | 78 ++ .../report/parser/plist.py | 685 +++++++++++++++ .../report/report_file.py | 110 +++ .../report/reports.py | 128 +++ .../report/statistics.py | 73 ++ .../sanitizers/address/analyzer_result.py | 37 - .../sanitizers/address/output_parser.py | 53 -- .../sanitizers/leak/analyzer_result.py | 37 - .../sanitizers/leak/output_parser.py | 55 -- .../sanitizers/memory/analyzer_result.py | 37 - .../sanitizers/memory/output_parser.py | 55 -- .../sanitizers/output_parser.py | 91 -- .../sanitizers/thread/analyzer_result.py | 37 - .../sanitizers/thread/output_parser.py | 53 -- .../sanitizers/ub/analyzer_result.py | 37 - .../smatch/analyzer_result.py | 36 - .../source_code_comment_handler.py | 107 ++- .../sparse/analyzer_result.py | 36 - .../sphinx/analyzer_result.py | 36 - .../spotbugs/analyzer_result.py | 33 - .../spotbugs/plist_converter.py | 22 - .../tslint/analyzer_result.py | 39 - .../tslint/output_parser.py | 60 -- .../codechecker_report_converter}/twodim.py | 73 +- .../codechecker_report_converter/util.py | 151 ++++ .../requirements_py/dev/requirements.txt | 3 + tools/report-converter/setup.py | 6 +- tools/report-converter/tests/Makefile | 23 +- .../spotbugs => tests/libtest}/__init__.py | 0 .../tests/libtest/env.py | 0 .../tests/projects/cpp/multi_error.cpp | 0 .../tests/projects/cpp/multi_error.plist | 0 .../tests/projects/macros/macros.cpp | 0 .../tests/projects/macros/macros.plist | 0 .../tests/projects/notes/notes.cpp | 0 .../tests/projects/notes/notes.plist | 0 .../tests/projects/simple/simple.cpp | 0 .../tests/projects/simple/simple.plist | 0 .../unit/analyzers}/__init__.py | 0 .../asan_output_test_files/asan.out | 0 .../asan_output_test_files/asan.plist | 4 - .../asan_output_test_files/files/asan.cpp | 0 .../files/sample.c | 0 .../sample.expected.plist | 0 .../coccinelle_output_test_files/sample.out | 0 .../divide_zero.expected.plist | 88 ++ .../files/divide_zero.cpp | 0 .../out}/divide_zero.plist | 2 +- .../cpplint_output_test_files/Makefile | 0 .../files/sample.cpp | 0 .../sample.expected.plist | 0 .../cpplint_output_test_files/sample.out | 0 .../eslint_output_test_files/Makefile | 0 .../eslint_output_test_files/files/.eslintrc | 0 .../eslint_output_test_files/files/index.js | 0 .../reports.expected.plist | 0 .../eslint_output_test_files/reports.json | 0 .../golint_output_test_files/Makefile | 0 .../golint_output_test_files/files/simple.go | 0 .../simple.expected.plist | 0 .../golint_output_test_files/simple.out | 0 .../NullDereference.java.plist | 172 ++-- .../dead_store.cpp.plist | 2 - .../files/NullDereference.java | 0 .../files/dead_store.cpp | 0 .../infer-out-dead_store/report.json | 0 .../infer-out-null_dereference/report.json | 0 .../files/sample.c | 0 .../sample.expected.plist | 0 .../kerneldoc_output_test_files/sample.out | 0 .../lsan_output_test_files/files/lsan.c | 0 .../lsan_output_test_files/lsan.out | 0 .../lsan_output_test_files/lsan.plist | 4 - .../mdl_output_test_files/Makefile | 0 .../mdl_output_test_files/files/readme.md | 0 .../readme.expected.plist | 0 .../mdl_output_test_files/readme.out | 0 .../msan_output_test_files/files/msan.cpp | 0 .../msan_output_test_files/msan.out | 0 .../msan_output_test_files/msan.plist | 4 - .../pyflakes_output_test_files/Makefile | 0 .../files/simple.py | 0 .../simple.expected.plist | 0 .../pyflakes_output_test_files/simple.out | 0 .../pylint_output_test_files/Makefile | 0 .../pylint_output_test_files/files/simple.py | 0 .../simple.expected.plist | 0 .../pylint_output_test_files/simple.json | 0 .../smatch_output_test_files/files/sample.c | 0 .../sample.expected.plist | 0 .../smatch_output_test_files/sample.out | 0 .../sparse_output_test_files/Makefile | 0 .../sparse_output_test_files/files/sample.c | 0 .../sparse_output_test_files/files/sample.h | 0 .../sample.c.expected.plist | 0 .../sample.h.expected.plist | 47 - .../sparse_output_test_files/sample.out | 0 .../sphinx_output_test_files/files/sample.rst | 0 .../sample.expected.plist | 0 .../sphinx_output_test_files/sample.out | 0 .../spotbugs_output_test_files/Makefile | 0 .../spotbugs_output_test_files/assign.plist | 304 ++++--- .../spotbugs_output_test_files/assign.xml | 0 .../files/Assign.java | 0 .../tests/unit/analyzers/test_asan_parser.py | 70 ++ .../unit/analyzers/test_clang_tidy_parser.py | 104 +++ .../{ => analyzers}/test_coccinelle_parser.py | 17 +- .../{ => analyzers}/test_cppcheck_parser.py | 52 +- .../{ => analyzers}/test_cpplint_parser.py | 17 +- .../{ => analyzers}/test_eslint_parser.py | 17 +- .../{ => analyzers}/test_golint_parser.py | 17 +- .../unit/{ => analyzers}/test_infer_parser.py | 34 +- .../{ => analyzers}/test_kerneldoc_parser.py | 17 +- .../tests/unit/analyzers/test_lsan_parser.py | 73 ++ .../unit/{ => analyzers}/test_mdl_parser.py | 17 +- .../unit/{ => analyzers}/test_msan_parser.py | 50 +- .../{ => analyzers}/test_pyflakes_parser.py | 17 +- .../{ => analyzers}/test_pylint_parser.py | 17 +- .../{ => analyzers}/test_smatch_parser.py | 19 +- .../unit/analyzers/test_sparse_parser.py | 80 ++ .../{ => analyzers}/test_sphinx_parser.py | 18 +- .../{ => analyzers}/test_spotbugs_parser.py | 17 +- .../tests/unit/analyzers/test_tsan_parser.py | 72 ++ .../{ => analyzers}/test_tslint_parser.py | 17 +- .../tests/unit/analyzers/test_ubsan_parser.py | 95 ++ .../analyzers}/tidy_output_test_files/abs.out | 0 .../tidy_output_test_files/empty.plist | 0 .../tidy_output_test_files/empty1.out | 0 .../tidy_output_test_files/empty2.out | 0 .../tidy_output_test_files/files/Makefile | 0 .../tidy_output_test_files/files/test.cpp | 0 .../tidy_output_test_files/files/test2.cpp | 0 .../tidy_output_test_files/files/test3.cpp | 0 .../tidy_output_test_files/files/test3.hh | 0 .../tidy_output_test_files/files/test4.cpp | 0 .../tidy_output_test_files/tidy1.out | 0 .../tidy_output_test_files/tidy1.plist | 0 .../tidy_output_test_files/tidy1_v6.out | 0 .../tidy_output_test_files/tidy2.out | 0 .../tidy_output_test_files/tidy2.plist | 102 +-- .../tidy_output_test_files/tidy2_v6.out | 0 .../tidy_output_test_files/tidy3.out | 0 .../tidy_output_test_files/tidy3_cpp.plist | 0 .../tidy_output_test_files/tidy3_hh.plist | 248 +++--- .../tidy_output_test_files/tidy4.out | 0 .../tidy_output_test_files/tidy5.out | 0 .../tidy_output_test_files/tidy5_v6.out | 0 .../tidy_output_test_files/tidy6.out | 0 .../tsan_output_test_files/files/tsan.cpp | 0 .../tsan_output_test_files/tsan.out | 0 .../tsan_output_test_files/tsan.plist | 112 ++- .../tslint_output_test_files/Makefile | 0 .../tslint_output_test_files/files/index.ts | 0 .../files/tslint.json | 0 .../reports.expected.plist | 0 .../tslint_output_test_files/reports.json | 0 .../ubsan_output_test_files/Makefile | 0 .../ubsan_output_test_files/abs.out | 0 .../ubsan_output_test_files/empty.plist | 0 .../ubsan_output_test_files}/empty1.out | 0 .../ubsan_output_test_files}/empty2.out | 0 .../ubsan_output_test_files/ubsan1.cpp | 0 .../ubsan_output_test_files/ubsan1.out | 0 .../ubsan_output_test_files/ubsan1.plist | 0 .../ubsan_output_test_files/ubsan2.cpp | 0 .../ubsan_output_test_files/ubsan2.out | 0 .../ubsan_output_test_files/ubsan2.plist | 0 .../divide_zero.expected.plist | 79 -- .../unit/output/__init__.py} | 12 - .../tests/unit/output/gerrit}/__init__.py | 0 .../unit/output/gerrit}/test_files/lib.cpp | 0 .../unit/output/gerrit}/test_files/main.cpp | 0 .../output/gerrit/test_gerrit_converter.py | 186 ++++ .../tests/unit/output/html/__init__.py | 39 + .../unit/output/html}/plist_to_html_test.py | 67 +- .../tests/unit/parser/__init__.py | 7 + .../tests/unit/parser/plist/__init__.py | 7 + .../plist_test_files/clang-3.7-noerror.plist | 0 .../plist}/plist_test_files/clang-3.7.plist | 0 .../plist_test_files/clang-3.8-trunk.plist | 0 .../plist}/plist_test_files/clang-4.0.plist | 0 .../plist_test_files/clang-5.0-trunk.plist | 0 .../parser/plist}/plist_test_files/empty_file | 0 .../gen_plist/gen_noerror_plist | 0 .../plist_test_files/gen_plist/gen_plist | 0 .../plist_test_files/gen_plist/gen_plist.md | 0 .../plist_test_files/gen_plist/noerror.cpp | 0 .../plist_test_files/gen_plist/test.cpp | 0 .../plist}/plist_test_files/gen_plist/test.h | 0 .../unit/parser/plist/test_plist_parser.py | 277 ++++++ .../parser/plist}/test_report_path_hash.py | 19 +- .../tests/unit/report_hash}/__init__.py | 2 +- .../codechecker_report_hash_test.py | 72 +- .../report_hash}/remove_whitespace_test.py | 2 +- .../unit/source_code_comment/__init__.py | 7 + .../test_file_1 | 0 .../test_file_2 | 0 .../test_file_3 | 0 .../test_source_code_comment.py | 772 ++++++++++++++++ .../tests/unit/test_asan_parser.py | 113 --- .../tests/unit/test_clang_tidy_parser.py | 400 --------- .../tests/unit/test_lsan_parser.py | 115 --- .../tests/unit/test_sparse_parser.py | 100 --- .../tests/unit/test_tsan_parser.py | 121 --- .../tests/unit/test_ubsan_parser.py | 160 ---- .../tests/unit/tidy_output_test_files/abs.out | 9 - .../tidy_output_test_files/files/Makefile | 12 - .../tidy_output_test_files/files/test.cpp | 9 - .../tidy_output_test_files/files/test2.cpp | 14 - .../tidy_output_test_files/files/test3.cpp | 11 - .../tidy_output_test_files/files/test3.hh | 7 - .../tidy_output_test_files/files/test4.cpp | 11 - .../unit/tidy_output_test_files/tidy1.out | 9 - .../unit/tidy_output_test_files/tidy1_v6.out | 7 - .../unit/tidy_output_test_files/tidy2.out | 18 - .../unit/tidy_output_test_files/tidy2_v6.out | 18 - .../unit/tidy_output_test_files/tidy3.out | 25 - .../unit/tidy_output_test_files/tidy4.out | 7 - .../unit/tidy_output_test_files/tidy5.out | 13 - .../unit/tidy_output_test_files/tidy5_v6.out | 7 - .../unit/tidy_output_test_files/tidy6.out | 9 - .../unit/ubsan_output_test_files/empty1.out | 0 .../unit/ubsan_output_test_files/empty2.out | 13 - .../tests/unit/util/__init__.py | 7 + .../report-converter/tests/unit/util}/newline | 0 .../tests/unit/util/test_fileread.py | 2 +- .../tests/unit/util}/test_trim_path_prefix.py | 2 +- web/Makefile | 33 +- web/client/codechecker_client/cmd/store.py | 427 +++------ .../codechecker_client/cmd_line_client.py | 655 +++++--------- .../codechecker_client/credential_manager.py | 3 +- web/client/codechecker_client/metadata.py | 3 +- .../codechecker_client/product_client.py | 3 +- .../report_type_converter.py | 74 +- .../source_component_client.py | 3 +- .../suppress_file_handler.py | 2 +- web/client/codechecker_client/token_client.py | 3 +- web/client/tests/unit/__init__.py | 2 +- .../tests/unit/test_report_converter.py | 102 +-- web/codechecker_web/cmd/web_version.py | 4 +- .../shared/webserver_context.py | 6 +- .../codechecker_server/api/mass_store_run.py | 332 ++----- web/server/codechecker_server/cmd/server.py | 3 +- .../codechecker_server/instance_manager.py | 3 +- web/server/codechecker_server/metadata.py | 20 +- .../codechecker_server/session_manager.py | 3 +- web/server/tests/unit/__init__.py | 2 +- .../tests/unit/test_collect_path_events.py | 141 --- web/server/tests/unit/test_plist_parser.py | 357 -------- .../tests/unit/test_source_code_comment.py | 819 ----------------- .../functional/diff_local/test_diff_local.py | 25 +- .../test_diff_local_remote.py | 56 +- .../source_change/test_source_change.py | 2 +- web/tests/functional/store/__init__.py | 8 +- .../test_proj/{ => divide_zero}/Makefile | 5 +- .../{ => divide_zero}/divide_zero.cpp | 0 .../{ => divide_zero}/divide_zero.plist | 2 +- .../store/test_proj/{ => divide_zero}/lib.h | 0 .../{ => divide_zero}/project_info.json | 0 .../store/test_proj/double_suppress/Makefile | 15 + .../{ => double_suppress}/double_suppress.cpp | 0 .../double_suppress.plist | 0 .../double_suppress/project_info.json | 5 + web/tests/functional/store/test_store.py | 57 +- web/tests/libtest/codechecker.py | 2 +- web/tests/libtest/env.py | 8 +- 496 files changed, 9778 insertions(+), 13752 deletions(-) delete mode 100644 analyzer/codechecker_analyzer/analyzers/clangtidy/output_converter.py delete mode 100644 analyzer/tests/unit/test_tidy_output_converter.py delete mode 100644 analyzer/tests/unit/tidy_output_test_files/empty.plist delete mode 100644 analyzer/tests/unit/tidy_output_test_files/tidy1.plist delete mode 100644 analyzer/tests/unit/tidy_output_test_files/tidy2.plist delete mode 100644 analyzer/tests/unit/tidy_output_test_files/tidy3.plist delete mode 100644 codechecker_common/output/json.py delete mode 100644 codechecker_common/plist_parser.py delete mode 100644 codechecker_common/report.py create mode 100644 codechecker_common/tests/Makefile delete mode 100644 codechecker_common/tests/unit/Makefile delete mode 100644 codechecker_common/tests/unit/test_gerrit_converter.py delete mode 100644 docs/tools/codechecker_report_hash.md delete mode 100644 docs/tools/plist_to_html.md delete mode 100644 tools/codechecker_report_hash/.gitignore delete mode 100644 tools/codechecker_report_hash/.noserc delete mode 100644 tools/codechecker_report_hash/.pylintrc delete mode 100644 tools/codechecker_report_hash/.pypirc delete mode 100644 tools/codechecker_report_hash/LICENSE.txt delete mode 100644 tools/codechecker_report_hash/MANIFEST.in delete mode 100644 tools/codechecker_report_hash/Makefile delete mode 100644 tools/codechecker_report_hash/codechecker_report_hash/hash.py delete mode 100644 tools/codechecker_report_hash/requirements_py/dev/requirements.txt delete mode 100644 tools/codechecker_report_hash/setup.py delete mode 100644 tools/codechecker_report_hash/tests/Makefile delete mode 100644 tools/plist_to_html/.gitignore delete mode 100644 tools/plist_to_html/.noserc delete mode 100644 tools/plist_to_html/.pypirc delete mode 100644 tools/plist_to_html/LICENSE.txt delete mode 100644 tools/plist_to_html/MANIFEST.in delete mode 100644 tools/plist_to_html/Makefile delete mode 100755 tools/plist_to_html/plist_to_html/PlistToHtml.py delete mode 100644 tools/plist_to_html/requirements_py/dev/requirements.txt delete mode 100644 tools/plist_to_html/setup.py delete mode 100644 tools/plist_to_html/tests/Makefile create mode 100644 tools/report-converter/MANIFEST.in delete mode 100644 tools/report-converter/codechecker_report_converter/analyzer_result.py rename tools/{codechecker_report_hash/codechecker_report_hash => report-converter/codechecker_report_converter/analyzers}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py rename tools/{codechecker_report_hash/tests/unit => report-converter/codechecker_report_converter/analyzers/clang_tidy}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/analyzer_result.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/coccinelle/__init__.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/coccinelle/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{coccinelle/output_parser.py => analyzers/coccinelle/parser.py} (69%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/analyzers/cppcheck}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py rename tools/{plist_to_html/tests/libtest => report-converter/codechecker_report_converter/analyzers/cpplint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/cpplint/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{cpplint/output_parser.py => analyzers/cpplint/parser.py} (68%) rename tools/{plist_to_html/tests/unit => report-converter/codechecker_report_converter/analyzers/eslint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/eslint/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{clang_tidy => analyzers/golint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/golint/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{golint/output_parser.py => analyzers/golint/parser.py} (67%) rename tools/report-converter/codechecker_report_converter/{coccinelle => analyzers/infer}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{cppcheck => analyzers/kerneldoc}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{kerneldoc/output_parser.py => analyzers/kerneldoc/parser.py} (63%) rename tools/report-converter/codechecker_report_converter/{cpplint => analyzers/markdownlint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/markdownlint/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{markdownlint/output_parser.py => analyzers/markdownlint/parser.py} (68%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/parser.py rename tools/report-converter/codechecker_report_converter/{eslint => analyzers/pyflakes}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/pyflakes/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{pyflakes/output_parser.py => analyzers/pyflakes/parser.py} (64%) rename tools/report-converter/codechecker_report_converter/{golint => analyzers/pylint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{infer => analyzers/sanitizers}/__init__.py (100%) rename tools/report-converter/codechecker_report_converter/{kerneldoc => analyzers/sanitizers/address}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/analyzer_result.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/parser.py rename tools/report-converter/codechecker_report_converter/{markdownlint => analyzers/sanitizers/leak}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/analyzer_result.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/parser.py rename tools/report-converter/codechecker_report_converter/{pyflakes => analyzers/sanitizers/memory}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/analyzer_result.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/parser.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py rename tools/report-converter/codechecker_report_converter/{pylint => analyzers/sanitizers/thread}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/parser.py rename tools/report-converter/codechecker_report_converter/{sanitizers => analyzers/sanitizers/ub}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{sanitizers/ub/output_parser.py => analyzers/sanitizers/ub/parser.py} (50%) rename tools/report-converter/codechecker_report_converter/{sanitizers/address => analyzers/smatch}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/smatch/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{smatch/output_parser.py => analyzers/smatch/parser.py} (69%) rename tools/report-converter/codechecker_report_converter/{sanitizers/leak => analyzers/sparse}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sparse/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{sparse/output_parser.py => analyzers/sparse/parser.py} (63%) rename tools/report-converter/codechecker_report_converter/{sanitizers/memory => analyzers/sphinx}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/sphinx/analyzer_result.py rename tools/report-converter/codechecker_report_converter/{sphinx/output_parser.py => analyzers/sphinx/parser.py} (65%) rename tools/report-converter/codechecker_report_converter/{sanitizers/thread => analyzers/spotbugs}/__init__.py (100%) rename tools/report-converter/codechecker_report_converter/{spotbugs/output_parser.py => analyzers/spotbugs/analyzer_result.py} (64%) rename tools/report-converter/codechecker_report_converter/{sanitizers/ub => analyzers/tslint}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/clang_tidy/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/clang_tidy/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/coccinelle/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/cppcheck/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/cpplint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/eslint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/eslint/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/golint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/infer/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/infer/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/infer/plist_converter.py delete mode 100644 tools/report-converter/codechecker_report_converter/kerneldoc/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/markdownlint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/plist_converter.py delete mode 100644 tools/report-converter/codechecker_report_converter/pyflakes/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/pylint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/pylint/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/report.py create mode 100644 tools/report-converter/codechecker_report_converter/report/__init__.py rename codechecker_common/tests/unit/__init__.py => tools/report-converter/codechecker_report_converter/report/checker_labels.py (69%) create mode 100644 tools/report-converter/codechecker_report_converter/report/hash.py rename tools/report-converter/codechecker_report_converter/{smatch => report/output}/__init__.py (100%) rename {codechecker_common => tools/report-converter/codechecker_report_converter/report}/output/baseline.py (88%) rename {codechecker_common => tools/report-converter/codechecker_report_converter/report}/output/codeclimate.py (63%) rename {codechecker_common => tools/report-converter/codechecker_report_converter/report}/output/gerrit.py (82%) rename tools/report-converter/codechecker_report_converter/{sparse => report/output/html}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/report/output/html/cli.py create mode 100644 tools/report-converter/codechecker_report_converter/report/output/html/html.py rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/buglist.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/bugview.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/icon.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/statistics.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/style.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/css/table.css (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/index.html (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/js/browsersupport.js (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/js/buglist.js (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/js/bugviewer.js (92%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/layout.html (100%) rename tools/{plist_to_html/plist_to_html => report-converter/codechecker_report_converter/report/output/html}/static/statistics.html (91%) create mode 100644 tools/report-converter/codechecker_report_converter/report/output/json.py create mode 100644 tools/report-converter/codechecker_report_converter/report/output/plaintext.py rename tools/report-converter/codechecker_report_converter/{sphinx => report/parser}/__init__.py (100%) create mode 100644 tools/report-converter/codechecker_report_converter/report/parser/base.py create mode 100644 tools/report-converter/codechecker_report_converter/report/parser/plist.py create mode 100644 tools/report-converter/codechecker_report_converter/report/report_file.py create mode 100644 tools/report-converter/codechecker_report_converter/report/reports.py create mode 100644 tools/report-converter/codechecker_report_converter/report/statistics.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/address/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/address/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/leak/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/leak/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/memory/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/memory/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/thread/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/thread/output_parser.py delete mode 100644 tools/report-converter/codechecker_report_converter/sanitizers/ub/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/smatch/analyzer_result.py rename {codechecker_common => tools/report-converter/codechecker_report_converter}/source_code_comment_handler.py (79%) delete mode 100644 tools/report-converter/codechecker_report_converter/sparse/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/sphinx/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/spotbugs/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/spotbugs/plist_converter.py delete mode 100644 tools/report-converter/codechecker_report_converter/tslint/analyzer_result.py delete mode 100644 tools/report-converter/codechecker_report_converter/tslint/output_parser.py rename {codechecker_common/output => tools/report-converter/codechecker_report_converter}/twodim.py (79%) create mode 100644 tools/report-converter/codechecker_report_converter/util.py rename tools/report-converter/{codechecker_report_converter/spotbugs => tests/libtest}/__init__.py (100%) rename tools/{plist_to_html => report-converter}/tests/libtest/env.py (100%) rename tools/{codechecker_report_hash => report-converter}/tests/projects/cpp/multi_error.cpp (100%) rename tools/{codechecker_report_hash => report-converter}/tests/projects/cpp/multi_error.plist (100%) rename tools/{plist_to_html => report-converter}/tests/projects/macros/macros.cpp (100%) rename tools/{plist_to_html => report-converter}/tests/projects/macros/macros.plist (100%) rename tools/{plist_to_html => report-converter}/tests/projects/notes/notes.cpp (100%) rename tools/{plist_to_html => report-converter}/tests/projects/notes/notes.plist (100%) rename tools/{plist_to_html => report-converter}/tests/projects/simple/simple.cpp (100%) rename tools/{plist_to_html => report-converter}/tests/projects/simple/simple.plist (100%) rename tools/report-converter/{codechecker_report_converter/tslint => tests/unit/analyzers}/__init__.py (100%) rename tools/report-converter/tests/unit/{ => analyzers}/asan_output_test_files/asan.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/asan_output_test_files/asan.plist (96%) rename tools/report-converter/tests/unit/{ => analyzers}/asan_output_test_files/files/asan.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/coccinelle_output_test_files/files/sample.c (100%) rename tools/report-converter/tests/unit/{ => analyzers}/coccinelle_output_test_files/sample.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/coccinelle_output_test_files/sample.out (100%) create mode 100644 tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/divide_zero.expected.plist rename tools/report-converter/tests/unit/{ => analyzers}/cppcheck_output_test_files/files/divide_zero.cpp (100%) rename tools/report-converter/tests/unit/{cppcheck_output_test_files => analyzers/cppcheck_output_test_files/out}/divide_zero.plist (97%) rename tools/report-converter/tests/unit/{ => analyzers}/cpplint_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/cpplint_output_test_files/files/sample.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/cpplint_output_test_files/sample.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/cpplint_output_test_files/sample.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/eslint_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/eslint_output_test_files/files/.eslintrc (100%) rename tools/report-converter/tests/unit/{ => analyzers}/eslint_output_test_files/files/index.js (100%) rename tools/report-converter/tests/unit/{ => analyzers}/eslint_output_test_files/reports.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/eslint_output_test_files/reports.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/golint_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/golint_output_test_files/files/simple.go (100%) rename tools/report-converter/tests/unit/{ => analyzers}/golint_output_test_files/simple.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/golint_output_test_files/simple.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/NullDereference.java.plist (98%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/dead_store.cpp.plist (94%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/files/NullDereference.java (100%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/files/dead_store.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/infer-out-dead_store/report.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/infer_output_test_files/infer-out-null_dereference/report.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/kerneldoc_output_test_files/files/sample.c (100%) rename tools/report-converter/tests/unit/{ => analyzers}/kerneldoc_output_test_files/sample.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/kerneldoc_output_test_files/sample.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/lsan_output_test_files/files/lsan.c (100%) rename tools/report-converter/tests/unit/{ => analyzers}/lsan_output_test_files/lsan.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/lsan_output_test_files/lsan.plist (96%) rename tools/report-converter/tests/unit/{ => analyzers}/mdl_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/mdl_output_test_files/files/readme.md (100%) rename tools/report-converter/tests/unit/{ => analyzers}/mdl_output_test_files/readme.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/mdl_output_test_files/readme.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/msan_output_test_files/files/msan.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/msan_output_test_files/msan.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/msan_output_test_files/msan.plist (96%) rename tools/report-converter/tests/unit/{ => analyzers}/pyflakes_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pyflakes_output_test_files/files/simple.py (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pyflakes_output_test_files/simple.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pyflakes_output_test_files/simple.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pylint_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pylint_output_test_files/files/simple.py (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pylint_output_test_files/simple.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/pylint_output_test_files/simple.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/smatch_output_test_files/files/sample.c (100%) rename tools/report-converter/tests/unit/{ => analyzers}/smatch_output_test_files/sample.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/smatch_output_test_files/sample.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/files/sample.c (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/files/sample.h (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/sample.c.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/sample.h.expected.plist (69%) rename tools/report-converter/tests/unit/{ => analyzers}/sparse_output_test_files/sample.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sphinx_output_test_files/files/sample.rst (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sphinx_output_test_files/sample.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/sphinx_output_test_files/sample.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/spotbugs_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/spotbugs_output_test_files/assign.plist (94%) rename tools/report-converter/tests/unit/{ => analyzers}/spotbugs_output_test_files/assign.xml (100%) rename tools/report-converter/tests/unit/{ => analyzers}/spotbugs_output_test_files/files/Assign.java (100%) create mode 100644 tools/report-converter/tests/unit/analyzers/test_asan_parser.py create mode 100644 tools/report-converter/tests/unit/analyzers/test_clang_tidy_parser.py rename tools/report-converter/tests/unit/{ => analyzers}/test_coccinelle_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_cppcheck_parser.py (64%) rename tools/report-converter/tests/unit/{ => analyzers}/test_cpplint_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_eslint_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_golint_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_infer_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_kerneldoc_parser.py (81%) create mode 100644 tools/report-converter/tests/unit/analyzers/test_lsan_parser.py rename tools/report-converter/tests/unit/{ => analyzers}/test_mdl_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_msan_parser.py (52%) rename tools/report-converter/tests/unit/{ => analyzers}/test_pyflakes_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_pylint_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_smatch_parser.py (81%) create mode 100644 tools/report-converter/tests/unit/analyzers/test_sparse_parser.py rename tools/report-converter/tests/unit/{ => analyzers}/test_sphinx_parser.py (81%) rename tools/report-converter/tests/unit/{ => analyzers}/test_spotbugs_parser.py (82%) create mode 100644 tools/report-converter/tests/unit/analyzers/test_tsan_parser.py rename tools/report-converter/tests/unit/{ => analyzers}/test_tslint_parser.py (81%) create mode 100644 tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/abs.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tidy_output_test_files/empty.plist (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/empty1.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/empty2.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/Makefile (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/test.cpp (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/test2.cpp (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/test3.cpp (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/test3.hh (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/files/test4.cpp (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy1.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tidy_output_test_files/tidy1.plist (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy1_v6.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy2.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tidy_output_test_files/tidy2.plist (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy2_v6.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy3.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tidy_output_test_files/tidy3_cpp.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tidy_output_test_files/tidy3_hh.plist (93%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy4.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy5.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy5_v6.out (100%) rename {analyzer/tests/unit => tools/report-converter/tests/unit/analyzers}/tidy_output_test_files/tidy6.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tsan_output_test_files/files/tsan.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tsan_output_test_files/tsan.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tsan_output_test_files/tsan.plist (82%) rename tools/report-converter/tests/unit/{ => analyzers}/tslint_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tslint_output_test_files/files/index.ts (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tslint_output_test_files/files/tslint.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tslint_output_test_files/reports.expected.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/tslint_output_test_files/reports.json (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/Makefile (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/abs.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/empty.plist (100%) rename tools/report-converter/tests/unit/{tidy_output_test_files => analyzers/ubsan_output_test_files}/empty1.out (100%) rename tools/report-converter/tests/unit/{tidy_output_test_files => analyzers/ubsan_output_test_files}/empty2.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan1.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan1.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan1.plist (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan2.cpp (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan2.out (100%) rename tools/report-converter/tests/unit/{ => analyzers}/ubsan_output_test_files/ubsan2.plist (100%) delete mode 100644 tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.expected.plist rename tools/report-converter/{codechecker_report_converter/clang_tidy/plist_converter.py => tests/unit/output/__init__.py} (51%) rename tools/{plist_to_html/tests/unit/plist_to_html => report-converter/tests/unit/output/gerrit}/__init__.py (100%) rename {codechecker_common/tests/unit => tools/report-converter/tests/unit/output/gerrit}/test_files/lib.cpp (100%) rename {codechecker_common/tests/unit => tools/report-converter/tests/unit/output/gerrit}/test_files/main.cpp (100%) create mode 100644 tools/report-converter/tests/unit/output/gerrit/test_gerrit_converter.py create mode 100644 tools/report-converter/tests/unit/output/html/__init__.py rename tools/{plist_to_html/tests/unit/plist_to_html => report-converter/tests/unit/output/html}/plist_to_html_test.py (73%) create mode 100644 tools/report-converter/tests/unit/parser/__init__.py create mode 100644 tools/report-converter/tests/unit/parser/plist/__init__.py rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/clang-3.7-noerror.plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/clang-3.7.plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/clang-3.8-trunk.plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/clang-4.0.plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/clang-5.0-trunk.plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/empty_file (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/gen_noerror_plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/gen_plist (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/gen_plist.md (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/noerror.cpp (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/test.cpp (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/plist_test_files/gen_plist/test.h (100%) create mode 100644 tools/report-converter/tests/unit/parser/plist/test_plist_parser.py rename {web/server/tests/unit => tools/report-converter/tests/unit/parser/plist}/test_report_path_hash.py (68%) rename tools/{codechecker_report_hash/tests/unit/codechecker_report_hash => report-converter/tests/unit/report_hash}/__init__.py (96%) rename tools/{codechecker_report_hash/tests/unit/codechecker_report_hash => report-converter/tests/unit/report_hash}/codechecker_report_hash_test.py (63%) rename tools/{codechecker_report_hash/tests/unit/codechecker_report_hash => report-converter/tests/unit/report_hash}/remove_whitespace_test.py (97%) create mode 100644 tools/report-converter/tests/unit/source_code_comment/__init__.py rename {web/server/tests/unit => tools/report-converter/tests/unit/source_code_comment}/source_code_comment_test_files/test_file_1 (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/source_code_comment}/source_code_comment_test_files/test_file_2 (100%) rename {web/server/tests/unit => tools/report-converter/tests/unit/source_code_comment}/source_code_comment_test_files/test_file_3 (100%) create mode 100644 tools/report-converter/tests/unit/source_code_comment/test_source_code_comment.py delete mode 100644 tools/report-converter/tests/unit/test_asan_parser.py delete mode 100644 tools/report-converter/tests/unit/test_clang_tidy_parser.py delete mode 100644 tools/report-converter/tests/unit/test_lsan_parser.py delete mode 100644 tools/report-converter/tests/unit/test_sparse_parser.py delete mode 100644 tools/report-converter/tests/unit/test_tsan_parser.py delete mode 100644 tools/report-converter/tests/unit/test_ubsan_parser.py delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/abs.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/Makefile delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/test.cpp delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/test2.cpp delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/test3.cpp delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/test3.hh delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/files/test4.cpp delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy1.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy1_v6.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy2.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy2_v6.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy3.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy4.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy5.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy5_v6.out delete mode 100644 tools/report-converter/tests/unit/tidy_output_test_files/tidy6.out delete mode 100644 tools/report-converter/tests/unit/ubsan_output_test_files/empty1.out delete mode 100644 tools/report-converter/tests/unit/ubsan_output_test_files/empty2.out create mode 100644 tools/report-converter/tests/unit/util/__init__.py rename {web/server/tests/unit => tools/report-converter/tests/unit/util}/newline (100%) rename web/server/tests/unit/test_util_fileread.py => tools/report-converter/tests/unit/util/test_fileread.py (95%) rename {web/server/tests/unit => tools/report-converter/tests/unit/util}/test_trim_path_prefix.py (97%) delete mode 100644 web/server/tests/unit/test_collect_path_events.py delete mode 100644 web/server/tests/unit/test_plist_parser.py delete mode 100644 web/server/tests/unit/test_source_code_comment.py rename web/tests/functional/store/test_proj/{ => divide_zero}/Makefile (66%) rename web/tests/functional/store/test_proj/{ => divide_zero}/divide_zero.cpp (100%) rename web/tests/functional/store/test_proj/{ => divide_zero}/divide_zero.plist (99%) rename web/tests/functional/store/test_proj/{ => divide_zero}/lib.h (100%) rename web/tests/functional/store/test_proj/{ => divide_zero}/project_info.json (100%) create mode 100644 web/tests/functional/store/test_proj/double_suppress/Makefile rename web/tests/functional/store/test_proj/{ => double_suppress}/double_suppress.cpp (100%) rename web/tests/functional/store/test_proj/{ => double_suppress}/double_suppress.plist (100%) create mode 100644 web/tests/functional/store/test_proj/double_suppress/project_info.json diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index beaa4a7209..d07cf53e28 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -22,7 +22,7 @@ jobs: run: make pylint pycodestyle tools: - name: Tools (plist-to-html, report-converter, etc.) + name: Tools (report-converter, etc.) runs-on: ubuntu-18.04 steps: @@ -57,18 +57,6 @@ jobs: pip install -r requirements_py/dev/requirements.txt make test - - name: Run codechecker-report-hash tests - working-directory: tools/codechecker_report_hash - run: | - pip install -r requirements_py/dev/requirements.txt - make test - - - name: Run plist-to-html tests - working-directory: tools/plist_to_html - run: | - pip install -r requirements_py/dev/requirements.txt - make test - - name: Run report-converter tests working-directory: tools/report-converter run: | @@ -129,13 +117,9 @@ jobs: run: | pip install -r requirements_py/dev/requirements.txt - - name: Run unit tests - working-directory: codechecker_common/tests - run: make -C unit test_unit - - - name: Run unit tests coverage + - name: Run mypy tests working-directory: codechecker_common/tests - run: make -C unit test_unit_cov + run: make mypy web: name: Web diff --git a/.gitmessage b/.gitmessage index 0f257701d4..1e72ff5ab7 100644 --- a/.gitmessage +++ b/.gitmessage @@ -18,7 +18,6 @@ # gui (Changes in the web GUI) # fix (Bug fix) # feat (New feature) -# plist2html (Changes in the plist-to-html tool) # refactor (Refactoring code) # server (Server related changes) # style (Formatting, missing semi colons, etc; no code change) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 865699ec8d..c2f6c8be85 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ CodeChecker, gerrit integration, debug etc. #### `tools` This folder contains tools which are used by the `analyzer` and `web` part -of the CodeChecker such as `plist-to-html` and `tu_collector`. +of the CodeChecker such as `tu_collector`. #### `web` This folder contains source code of the CodeChecker web server and web client. diff --git a/Makefile b/Makefile index 222637f985..bdcc729c73 100644 --- a/Makefile +++ b/Makefile @@ -112,8 +112,7 @@ clean_venv: rm -rf venv PIP_DEV_DEPS_CMD = make -C $(CC_ANALYZER) pip_dev_deps && \ - make -C $(CC_WEB) pip_dev_deps && \ - make -C $(CC_TOOLS)/plist_to_html pip_dev_deps + make -C $(CC_WEB) pip_dev_deps pip_dev_deps: # Install the depencies for analyze, web and the tools. @@ -128,27 +127,20 @@ clean_venv_dev: rm -rf venv_dev $(MAKE) -C $(CC_ANALYZER) clean_venv_dev $(MAKE) -C $(CC_WEB) clean_venv_dev - $(MAKE) -C $(CC_TOOLS)/plist_to_html clean_venv_dev clean: clean_package $(MAKE) -C $(CC_WEB) clean -clean_package: clean_plist_to_html clean_tu_collector clean_report_converter clean_report_hash clean_statistics_collector +clean_package: clean_tu_collector clean_report_converter clean_statistics_collector rm -rf $(BUILD_DIR) find . -name "*.pyc" -delete -clean_plist_to_html: - $(MAKE) -C $(CC_TOOLS)/plist_to_html clean - clean_tu_collector: $(MAKE) -C $(CC_TOOLS)/tu_collector clean clean_report_converter: $(MAKE) -C $(CC_TOOLS)/report-converter clean -clean_report_hash: - $(MAKE) -C $(CC_TOOLS)/codechecker_report_hash clean - clean_statistics_collector: $(MAKE) -C $(CC_ANALYZER_TOOLS)/statistics_collector clean @@ -168,7 +160,7 @@ pylint_in_env: venv_dev PYCODE_CMD = $(MAKE) -C $(CC_ANALYZER) pycodestyle && \ $(MAKE) -C $(CC_WEB) pycodestyle && \ - pycodestyle bin codechecker_common scripts + pycodestyle bin codechecker_common pycodestyle: $(PYCODE_CMD) @@ -176,15 +168,9 @@ pycodestyle: pycodestyle_in_env: $(ACTIVATE_DEV_VENV) && $(PYCODE_CMD) -test: test_common test_analyzer test_web - -test_in_env: test_common_in_env test_analyzer_in_env test_web_in_env - -test_common: - BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_COMMON)/tests/unit test_unit +test: test_analyzer test_web -test_common_in_env: - BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_COMMON)/tests/unit test_unit_in_env +test_in_env: test_analyzer_in_env test_web_in_env test_analyzer: BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_ANALYZER) test @@ -221,12 +207,10 @@ test_web_in_env: test_unit: BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_ANALYZER) test_unit BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_WEB) test_unit - BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_COMMON)/tests/unit test_unit test_unit_cov: BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_ANALYZER) test_unit_cov BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_WEB) test_unit_cov - BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_COMMON)/tests/unit test_unit_cov test_unit_in_env: BUILD_DIR=$(BUILD_DIR) $(MAKE) -C $(CC_ANALYZER) test_unit_in_env diff --git a/analyzer/Makefile b/analyzer/Makefile index 3152249319..241cad89c6 100644 --- a/analyzer/Makefile +++ b/analyzer/Makefile @@ -50,13 +50,6 @@ package_dir_structure: mkdir -p $(CC_BUILD_BIN_DIR) && \ mkdir -p $(CC_BUILD_LIB_DIR) -build_plist_to_html: - $(MAKE) -C $(ROOT)/tools/plist_to_html build - -package_plist_to_html: build_plist_to_html package_dir_structure - # Copy plist-to-html files. - cp -r $(CC_TOOLS)/plist_to_html/build/plist_to_html/plist_to_html $(CC_BUILD_LIB_DIR) - build_tu_collector: $(MAKE) -C $(ROOT)/tools/tu_collector build @@ -67,12 +60,6 @@ package_tu_collector: build_tu_collector package_dir_structure cd $(CC_BUILD_BIN_DIR) && \ ln -sf ../lib/python3/tu_collector/tu_collector.py tu_collector -build_report_hash: - $(MAKE) -C $(CC_TOOLS)/codechecker_report_hash build - -package_report_hash: build_report_hash package_dir_structure - cp -rp $(CC_TOOLS)/codechecker_report_hash/build/codechecker_report_hash/codechecker_report_hash $(CC_BUILD_LIB_DIR) - build_report_converter: $(MAKE) -C $(CC_TOOLS)/report-converter build @@ -86,7 +73,7 @@ build_merge_clang_extdef_mappings: $(MAKE) -C $(CC_ANALYZER)/tools/merge_clang_extdef_mappings build package_merge_clang_extdef_mappings: build_merge_clang_extdef_mappings package_dir_structure - # Copy plist-to-html files. + # Copy files. cp -r tools/merge_clang_extdef_mappings/build/merge_clang_extdef_mappings/codechecker_merge_clang_extdef_mappings $(CC_BUILD_LIB_DIR) && \ chmod u+x $(CC_BUILD_LIB_DIR)/codechecker_merge_clang_extdef_mappings/cli.py && \ cd $(CC_BUILD_BIN_DIR) && \ @@ -113,9 +100,8 @@ package_bazel_compile_commands: build_bazel_compile_commands package_dir_structu ln -sf ../lib/python3/bazel_compile_commands/bazel_compile_commands.py bazel-compile-commands # This target should be used from the top level Makefile to build the package -# together with the web part. This way we will not build plist-to-html -# multiple times. -package_analyzer: package_dir_structure package_plist_to_html package_tu_collector package_report_hash package_merge_clang_extdef_mappings package_report_converter package_statistics_collector package_bazel_compile_commands +# together with the web part. This way we will not build tools multiple times. +package_analyzer: package_dir_structure package_tu_collector package_merge_clang_extdef_mappings package_report_converter package_statistics_collector package_bazel_compile_commands package: package_analyzer # Copy libraries. @@ -182,13 +168,10 @@ else endif endif -clean_package: clean_plist_to_html clean_tu_collector clean_bazel_compile_commands +clean_package: clean_tu_collector clean_bazel_compile_commands rm -rf $(BUILD_DIR) find . -name "*.pyc" -delete -clean_plist_to_html: - rm -rf $(ROOT)/tools/plist_to_html/build - clean_tu_collector: rm -rf $(ROOT)/tools/tu_collector/build diff --git a/analyzer/codechecker_analyzer/analysis_manager.py b/analyzer/codechecker_analyzer/analysis_manager.py index e874156f45..127cc4dc81 100644 --- a/analyzer/codechecker_analyzer/analysis_manager.py +++ b/analyzer/codechecker_analyzer/analysis_manager.py @@ -25,7 +25,6 @@ import psutil from codechecker_analyzer import env -from codechecker_common import plist_parser from codechecker_common.logger import get_logger from codechecker_statistics_collector.collectors.special_return_value import \ @@ -233,18 +232,13 @@ def handle_success(rh, result_file, result_base, skip_handler, save_output(os.path.join(success_dir, result_base), rh.analyzer_stdout, rh.analyzer_stderr) - rh.postprocess_result() + rh.postprocess_result(skip_handler) + # Generated reports will be handled separately at store. save_metadata(result_file, rh.analyzer_result_file, rh.analyzed_source_file) - if skip_handler: - # We need to check the plist content because skipping - # reports in headers can be done only this way. - plist_parser.skip_report_from_plist(result_file, - skip_handler) - def handle_reproducer(source_analyzer, rh, zip_file, actions_map): """ @@ -321,7 +315,9 @@ def handle_reproducer(source_analyzer, rh, zip_file, actions_map): LOG.debug("ZIP file written at '%s'", zip_file) -def handle_failure(source_analyzer, rh, zip_file, result_base, actions_map): +def handle_failure( + source_analyzer, rh, zip_file, result_base, actions_map, skip_handler +): """ If the analysis fails a debug zip is packed together which contains build, analysis information and source files to be able to @@ -335,7 +331,7 @@ def handle_failure(source_analyzer, rh, zip_file, result_base, actions_map): checks = source_analyzer.config_handler.checks() state = checks.get('clang-diagnostic-error', (CheckerState.default, ''))[0] if state != CheckerState.disabled: - rh.postprocess_result() + rh.postprocess_result(skip_handler) # Remove files that successfully analyzed earlier on. plist_file = result_base + ".plist" @@ -611,7 +607,7 @@ def handle_analysis_result(success, zip_file=zip_file): elif not generate_reproducer: handle_failure(source_analyzer, rh, os.path.join(failed_dir, zip_file), - result_base, actions_map) + result_base, actions_map, skip_handler) if rh.analyzer_returncode == 0: handle_analysis_result(success=True) @@ -679,12 +675,6 @@ def handle_analysis_result(success, zip_file=zip_file): collect_ctu_involved_files(rh, source_analyzer, output_dirs['ctu_connections']) - if skip_handler and os.path.exists(result_file): - # We need to check the plist content because skipping - # reports in headers can be done only this way. - plist_parser.skip_report_from_plist(result_file, - skip_handler) - if not quiet_output_on_stdout: if rh.analyzer_returncode: LOG.error('\n%s', rh.analyzer_stdout) diff --git a/analyzer/codechecker_analyzer/analyzer_context.py b/analyzer/codechecker_analyzer/analyzer_context.py index deafab2a22..993c4036c5 100644 --- a/analyzer/codechecker_analyzer/analyzer_context.py +++ b/analyzer/codechecker_analyzer/analyzer_context.py @@ -16,10 +16,11 @@ import os import sys +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common import logger from codechecker_common.checker_labels import CheckerLabels from codechecker_common.singleton import Singleton -from codechecker_common.util import load_json_or_empty from . import env @@ -231,7 +232,8 @@ def logger_lib_name(self): @property def path_plist_to_html_dist(self): - return os.path.join(self._lib_dir_path, 'plist_to_html', 'static') + return os.path.join(self._lib_dir_path, 'codechecker_report_converter', + 'report', 'output', 'html', 'static') @property def path_env_extra(self): diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py b/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py index 0e89bad663..1064e3c919 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py @@ -30,7 +30,7 @@ from . import config_handler from . import ctu_triple_arch from . import version -from .result_handler import ResultHandlerClangSA +from .result_handler import ClangSAResultHandler LOG = get_logger('analyzer') @@ -410,7 +410,7 @@ def construct_result_handler(self, buildaction, report_output, """ See base class for docs. """ - res_handler = ResultHandlerClangSA(buildaction, report_output, + res_handler = ClangSAResultHandler(buildaction, report_output, self.config_handler.report_hash) res_handler.checker_labels = checker_labels diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py b/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py index 15c1a947c7..cd1d5cf221 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py @@ -11,30 +11,49 @@ import os +from typing import Optional + +from codechecker_report_converter.report.parser.base import AnalyzerInfo +from codechecker_report_converter.report import report_file +from codechecker_report_converter.report.hash import get_report_hash, HashType from codechecker_common.logger import get_logger -from codechecker_report_hash.hash import HashType, replace_report_hash +from codechecker_common.skiplist_handler import SkipListHandler from ..result_handler_base import ResultHandler LOG = get_logger('report') -class ResultHandlerClangSA(ResultHandler): +class ClangSAResultHandler(ResultHandler): """ - Use context free hash if enabled. + Create analyzer result file for Clang Static Analyzer output. """ - def postprocess_result(self): + def __init__(self, *args, **kwargs): + self.analyzer_info = AnalyzerInfo(name='clangsa') + + super(ClangSAResultHandler, self).__init__(*args, **kwargs) + + def postprocess_result(self, skip_handler: Optional[SkipListHandler]): """ - Override the context sensitive issue hash in the plist files to - context insensitive if it is enabled during analysis. + Generate analyzer result output file which can be parsed and stored + into the database. """ if os.path.exists(self.analyzer_result_file): + reports = report_file.get_reports( + self.analyzer_result_file, self.checker_labels) + reports = [r for r in reports if not r.skip(skip_handler)] + + hash_type = None if self.report_hash_type in ['context-free', 'context-free-v2']: - replace_report_hash( - self.analyzer_result_file, - HashType.CONTEXT_FREE) + hash_type = HashType.CONTEXT_FREE elif self.report_hash_type == 'diagnostic-message': - replace_report_hash( - self.analyzer_result_file, - HashType.DIAGNOSTIC_MESSAGE) + hash_type = HashType.DIAGNOSTIC_MESSAGE + + if hash_type is not None: + for report in reports: + report.report_hash = get_report_hash(report, hash_type) + + report_file.create( + self.analyzer_result_file, reports, self.checker_labels, + self.analyzer_info) diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py index b76b986ecb..3cd768ffae 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py @@ -327,9 +327,8 @@ def construct_result_handler(self, buildaction, report_output, See base class for docs. """ report_hash = self.config_handler.report_hash - res_handler = result_handler.ClangTidyPlistToFile(buildaction, - report_output, - report_hash) + res_handler = result_handler.ClangTidyResultHandler( + buildaction, report_output, report_hash) res_handler.checker_labels = checker_labels res_handler.skiplist_handler = skiplist_handler diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/output_converter.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/output_converter.py deleted file mode 100644 index 98b3d7b371..0000000000 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/output_converter.py +++ /dev/null @@ -1,399 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" -This module is responsible for parsing clang-tidy output and generating plist -for the plist_parser module. -""" - - -import copy -import json -import os -import plistlib -import re - -from codechecker_common.logger import get_logger -from codechecker_report_hash.hash import get_report_hash, HashType - -LOG = get_logger('analyzer.tidy') - - -class Note: - """ - Represents a note and also this is the base class of Message. - """ - - def __init__(self, path, line, column, message): - self.path = path - self.line = line - self.column = column - self.message = message - - def __eq__(self, other): - return self.path == other.path and \ - self.line == other.line and \ - self.column == other.column and \ - self.message == other.message - - def __str__(self): - return 'path=%s, line=%d, column=%s, message=%s' % \ - (self.path, self.line, self.column, self.message) - - -class Message(Note): - """ - Represents a clang-tidy message with an optional fixit message. - """ - - def __init__(self, path, line, column, message, checker, fixits=None, - notes=None): - super(Message, self).__init__(path, line, column, message) - self.checker = checker - self.fixits = fixits if fixits else [] - self.notes = notes if notes else [] - - def __eq__(self, other): - return super(Message, self).__eq__(other) and \ - self.checker == other.checker and \ - self.fixits == other.fixits and \ - self.notes == other.notes - - def __str__(self): - return '%s, checker=%s, fixits=%s, notes=%s' % \ - (super(Message, self).__str__(), self.checker, - [str(fixit) for fixit in self.fixits], - [str(note) for note in self.notes]) - - -class OutputParser: - """ - Parser for clang-tidy console output. - """ - - # Regex for parsing a clang-tidy message. - message_line_re = re.compile( - # File path followed by a ':'. - r'^(?P[\S ]+?):' - # Line number followed by a ':'. - r'(?P\d+?):' - # Column number followed by a ':' and a space. - r'(?P\d+?): ' - # Severity followed by a ':'. - r'(?P(error|warning)):' - # Checker message. - r'(?P[\S \t]+)\s*' - # Checker name. - r'\[(?P.*)\]') - - # Matches a note. - note_line_re = re.compile( - # File path followed by a ':'. - r'^(?P[\S ]+?):' - # Line number followed by a ':'. - r'(?P\d+?):' - # Column number followed by a ':' and a space. - r'(?P\d+?): ' - # Severity == note. - r'note:' - # Checker message. - r'(?P.*)') - - def __init__(self): - self.messages = [] - - def parse_messages_from_file(self, path): - """ - Parse clang-tidy output dump (redirected output). - """ - - with open(path, 'r', encoding="utf-8", errors="ignore") as file: - return self.parse_messages(file) - - def parse_messages(self, tidy_out): - """ - Parse the given clang-tidy output. This method calls iter(tidy_out). - The iterator should return lines. - - Parameters: - tidy_out: something iterable (e.g.: a file object) - """ - - titer = iter(tidy_out) - try: - next_line = next(titer) - while True: - message, next_line = self._parse_message(titer, next_line) - if message is not None: - self.messages.append(message) - except StopIteration: - pass - - return self.messages - - def _parse_message(self, titer, line): - """ - Parse the given line. Returns a (message, next_line) pair or throws a - StopIteration. The message could be None. - - Parameters: - titer: clang-tidy output iterator - line: the current line - """ - - match = OutputParser.message_line_re.match(line) - if match is None: - return None, next(titer) - - message = Message( - os.path.abspath(match.group('path')), - int(match.group('line')), - int(match.group('column')), - match.group('message').strip(), - match.group('checker').strip()) - - try: - line = next(titer) - line = self._parse_code(message, titer, line) - line = self._parse_fixits(message, titer, line) - line = self._parse_notes(message, titer, line) - - return message, line - except StopIteration: - return message, '' - - @staticmethod - def _parse_code(message, titer, line): - # Eat code line. - if OutputParser.note_line_re.match(line) or \ - OutputParser.message_line_re.match(line): - LOG.debug("Unexpected line: %s. Expected a code line!", line) - return line - - # Eat arrow line. - # FIXME: range support? - line = next(titer) - if '^' not in line: - LOG.debug("Unexpected line: %s. Expected an arrow line!", line) - return line - return next(titer) - - @staticmethod - def _parse_fixits(message, titer, line): - """Parses fixit messages.""" - - while OutputParser.message_line_re.match(line) is None and \ - OutputParser.note_line_re.match(line) is None: - message_text = line.strip() - - if message_text != '': - message.fixits.append(Note(message.path, message.line, - line.find(message_text) + 1, - message_text)) - line = next(titer) - return line - - def _parse_notes(self, message, titer, line): - """Parses note messages.""" - - while OutputParser.message_line_re.match(line) is None: - match = OutputParser.note_line_re.match(line) - if match is None: - LOG.debug("Unexpected line: %s", line) - return next(titer) - - message.notes.append(Note(os.path.abspath(match.group('path')), - int(match.group('line')), - int(match.group('column')), - match.group('message').strip())) - line = next(titer) - line = self._parse_code(message, titer, line) - return line - - -class PListConverter: - """ - Clang-tidy messages to plist converter. - """ - - def __init__(self): - self.plist = { - 'files': [], - 'diagnostics': [] - } - - def _add_files_from_messages(self, messages): - """ - Adds the new files from the given message array to the plist's "files" - key, and returns a path to file index dictionary. - """ - - fmap = {} - for message in messages: - try: - # This file is already in the plist. - idx = self.plist['files'].index(message.path) - fmap[message.path] = idx - except ValueError: - # New file. - fmap[message.path] = len(self.plist['files']) - self.plist['files'].append(message.path) - - # Collect file paths from the message notes. - for nt in message.notes: - try: - # This file is already in the plist. - idx = self.plist['files'].index(nt.path) - fmap[nt.path] = idx - except ValueError: - # New file. - fmap[nt.path] = len(self.plist['files']) - self.plist['files'].append(nt.path) - - return fmap - - def _add_diagnostics(self, messages, files): - """ - Adds the messages to the plist as diagnostics. - """ - - fmap = self._add_files_from_messages(messages) - for message in messages: - diagnostics = PListConverter._create_diags(message, fmap, files) - self.plist['diagnostics'].extend(diagnostics) - - @staticmethod - def _get_checker_category(checker): - """ - Returns the check's category. - """ - - parts = checker.split('-') - if not parts: - # I don't know if it's possible. - return 'unknown' - else: - return parts[0] - - @staticmethod - def _create_diags(message, fmap, files): - """ - Creates new plist diagnostics from a single clang-tidy message. - """ - diagnostics = [] - - checker_names = sorted(message.checker.split(',')) - for checker_name in checker_names: - diag = {'location': PListConverter._create_location(message, fmap), - 'check_name': checker_name, - 'description': message.message, - 'category': PListConverter._get_checker_category( - checker_name), - 'type': 'clang-tidy', - 'path': []} - - PListConverter._add_fixits(diag, message, fmap) - PListConverter._add_notes(diag, message, fmap) - - # The original message should be the last part of the path. This is - # displayed by quick check, and this is the main event displayed by - # the web interface. FIXME: notes and fixits should not be events. - diag['path'].append(PListConverter._create_event_from_note(message, - fmap)) - - source_file = files[diag['location']['file']] - diag['issue_hash_content_of_line_in_context'] \ - = get_report_hash(diag, source_file, HashType.PATH_SENSITIVE) - - diagnostics.append(diag) - - return diagnostics - - @staticmethod - def _create_location(note, fmap): - return { - 'line': note.line, - 'col': note.column, - 'file': fmap[note.path] - } - - @staticmethod - def _create_event_from_note(note, fmap): - return { - 'kind': 'event', - 'location': PListConverter._create_location(note, fmap), - 'depth': 0, # I don't know WTF is this. - 'message': note.message - } - - @staticmethod - def _create_edge(start_note, end_note, fmap): - start_loc = PListConverter._create_location(start_note, fmap) - end_loc = PListConverter._create_location(end_note, fmap) - return { - 'start': [start_loc, start_loc], - 'end': [end_loc, end_loc] - } - - @staticmethod - def _add_fixits(diag, message, fmap): - """ - Adds fixits as events to the diagnostics. - """ - - for fixit in message.fixits: - mf = copy.deepcopy(fixit) - mf.message = '%s (fixit)' % fixit.message - diag['path'].append(PListConverter._create_event_from_note( - mf, fmap)) - - @staticmethod - def _add_notes(diag, message, fmap): - """ - Adds notes as events to the diagnostics. It also creates edges between - the notes. - """ - - edges = [] - last = None - for note in message.notes: - if last is not None: - edges.append(PListConverter._create_edge(last, note, fmap)) - diag['path'].append(PListConverter._create_event_from_note( - note, fmap)) - last = note - - # Add control items only if there is any. - if edges: - diag['path'].append({ - 'kind': 'control', - 'edges': edges - }) - - def add_messages(self, messages): - """ - Adds the given clang-tidy messages to the plist. - """ - - self._add_diagnostics(messages, self.plist['files']) - - def write_to_file(self, path): - """ - Writes out the plist XML to the given path. - """ - - with open(path, 'wb') as file: - self.write(file) - - def write(self, file): - """ - Writes out the plist XML using the given file object. - """ - plistlib.dump(self.plist, file) - - def __str__(self): - return str(json.dumps(self.plist, indent=4, separators=(',', ': '))) diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py index cda1936e0b..03b5a34b56 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py @@ -9,45 +9,43 @@ Result handler for Clang Tidy. """ +from typing import Optional + +from codechecker_report_converter.analyzers.clang_tidy.analyzer_result import \ + AnalyzerResult +from codechecker_report_converter.analyzers.clang_tidy.parser import Parser +from codechecker_report_converter.report.parser.base import AnalyzerInfo +from codechecker_report_converter.report import report_file +from codechecker_report_converter.report.hash import get_report_hash, HashType from codechecker_common.logger import get_logger -from codechecker_report_hash.hash import HashType, replace_report_hash +from codechecker_common.skiplist_handler import SkipListHandler from ..result_handler_base import ResultHandler -from . import output_converter - LOG = get_logger('report') -def generate_plist_from_tidy_result(output_file, tidy_stdout): +class ClangTidyResultHandler(ResultHandler): """ - Generate a plist file from the clang tidy analyzer results. + Create analyzer result file for Clang Tidy output. """ - parser = output_converter.OutputParser() - - messages = parser.parse_messages(tidy_stdout) - - plist_converter = output_converter.PListConverter() - plist_converter.add_messages(messages) - - plist_converter.write_to_file(output_file) + def __init__(self, *args, **kwargs): + self.analyzer_info = AnalyzerInfo(name=AnalyzerResult.TOOL_NAME) -class ClangTidyPlistToFile(ResultHandler): - """ - Create a plist file from clang-tidy results. - """ + super(ClangTidyResultHandler, self).__init__(*args, **kwargs) - def postprocess_result(self): + def postprocess_result(self, skip_handler: Optional[SkipListHandler]): """ - Generate plist file which can be parsed and processed for - results which can be stored into the database. + Generate analyzer result output file which can be parsed and stored + into the database. """ - output_file = self.analyzer_result_file LOG.debug_analyzer(self.analyzer_stdout) tidy_stdout = self.analyzer_stdout.splitlines() - generate_plist_from_tidy_result(output_file, tidy_stdout) + + reports = Parser().get_reports_from_iter(tidy_stdout) + reports = [r for r in reports if not r.skip(skip_handler)] # In the earlier versions of CodeChecker Clang Tidy never used context # free hash even if we enabled it with '--report-hash context-free' @@ -55,7 +53,15 @@ def postprocess_result(self): # automatically when using this option we introduced a new choice for # --report-hash option ('context-free-v2') and we still do not use # context free hash for 'context-free' choice. + hash_type = HashType.PATH_SENSITIVE if self.report_hash_type == 'context-free-v2': - replace_report_hash(output_file, HashType.CONTEXT_FREE) + hash_type = HashType.CONTEXT_FREE elif self.report_hash_type == 'diagnostic-message': - replace_report_hash(output_file, HashType.DIAGNOSTIC_MESSAGE) + hash_type = HashType.DIAGNOSTIC_MESSAGE + + for report in reports: + report.report_hash = get_report_hash(report, hash_type) + + report_file.create( + self.analyzer_result_file, reports, self.checker_labels, + self.analyzer_info) diff --git a/analyzer/codechecker_analyzer/analyzers/result_handler_base.py b/analyzer/codechecker_analyzer/analyzers/result_handler_base.py index db23ec9732..fc5e6acedd 100644 --- a/analyzer/codechecker_analyzer/analyzers/result_handler_base.py +++ b/analyzer/codechecker_analyzer/analyzers/result_handler_base.py @@ -9,13 +9,16 @@ Result handlers to manage the output of the static analyzers. """ - -from abc import ABCMeta import hashlib import os import shlex +from abc import ABCMeta +from typing import Optional + from codechecker_common.logger import get_logger +from codechecker_common.skiplist_handler import SkipListHandler + LOG = get_logger('analyzer') @@ -167,7 +170,7 @@ def clean_results(self): # There might be no result file if analysis failed. LOG.debug(oserr) - def postprocess_result(self): + def postprocess_result(self, skip_handler: Optional[SkipListHandler]): """ Postprocess result if needed. Should be called after the analyses finished. diff --git a/analyzer/codechecker_analyzer/buildlog/log_parser.py b/analyzer/codechecker_analyzer/buildlog/log_parser.py index b7807b1809..4beed3557e 100644 --- a/analyzer/codechecker_analyzer/buildlog/log_parser.py +++ b/analyzer/codechecker_analyzer/buildlog/log_parser.py @@ -22,10 +22,11 @@ import tempfile import traceback +from codechecker_report_converter.util import load_json_or_empty + from codechecker_analyzer.analyzers import clangsa from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty from .. import gcc_toolchain from .build_action import BuildAction diff --git a/analyzer/codechecker_analyzer/cmd/analyze.py b/analyzer/codechecker_analyzer/cmd/analyze.py index 24c14e2631..8e09168da4 100644 --- a/analyzer/codechecker_analyzer/cmd/analyze.py +++ b/analyzer/codechecker_analyzer/cmd/analyze.py @@ -19,13 +19,14 @@ import shutil import sys +from codechecker_report_converter.util import load_json_or_empty + from codechecker_analyzer import analyzer, analyzer_context, env from codechecker_analyzer.analyzers import analyzer_types, clangsa from codechecker_analyzer.arg import OrderedCheckersAction from codechecker_analyzer.buildlog import log_parser from codechecker_common import arg, logger, skiplist_handler, cmd_config -from codechecker_common.util import load_json_or_empty LOG = logger.get_logger('system') @@ -743,7 +744,7 @@ def add_arguments_to_parser(parser): func=main, func_process_config_file=cmd_config.process_config_file) -def __get_skip_handler(args): +def __get_skip_handler(args) -> skiplist_handler.SkipListHandler: """ Initialize and return a skiplist handler if there is a skip list file in the arguments or files options is provided. diff --git a/analyzer/codechecker_analyzer/cmd/analyzer_version.py b/analyzer/codechecker_analyzer/cmd/analyzer_version.py index 5addeb31b3..c733424b90 100644 --- a/analyzer/codechecker_analyzer/cmd/analyzer_version.py +++ b/analyzer/codechecker_analyzer/cmd/analyzer_version.py @@ -15,8 +15,10 @@ from codechecker_analyzer import analyzer_context +from codechecker_report_converter import twodim + from codechecker_common import logger -from codechecker_common.output import USER_FORMATS, twodim +from codechecker_common.output import USER_FORMATS def get_argparser_ctor_args(): diff --git a/analyzer/codechecker_analyzer/cmd/analyzers.py b/analyzer/codechecker_analyzer/cmd/analyzers.py index d878617b9e..5e0f900736 100644 --- a/analyzer/codechecker_analyzer/cmd/analyzers.py +++ b/analyzer/codechecker_analyzer/cmd/analyzers.py @@ -15,12 +15,14 @@ import subprocess import sys +from codechecker_report_converter import twodim + from codechecker_analyzer import analyzer_context from codechecker_analyzer import env from codechecker_analyzer.analyzers import analyzer_types from codechecker_common import logger -from codechecker_common.output import twodim, USER_FORMATS +from codechecker_common.output import USER_FORMATS LOG = logger.get_logger('system') diff --git a/analyzer/codechecker_analyzer/cmd/check.py b/analyzer/codechecker_analyzer/cmd/check.py index 4e7269f4ef..0e23b8ca7a 100644 --- a/analyzer/codechecker_analyzer/cmd/check.py +++ b/analyzer/codechecker_analyzer/cmd/check.py @@ -23,7 +23,8 @@ from codechecker_analyzer.arg import OrderedCheckersAction from codechecker_common import arg, cmd_config, logger -from codechecker_common.source_code_comment_handler import REVIEW_STATUS_VALUES +from codechecker_report_converter.source_code_comment_handler import \ + REVIEW_STATUS_VALUES from codechecker_analyzer.cmd.analyze import \ epilog_env_var as analyzer_epilog_env_var, \ diff --git a/analyzer/codechecker_analyzer/cmd/checkers.py b/analyzer/codechecker_analyzer/cmd/checkers.py index bafc0fe529..52068a0d84 100644 --- a/analyzer/codechecker_analyzer/cmd/checkers.py +++ b/analyzer/codechecker_analyzer/cmd/checkers.py @@ -18,13 +18,15 @@ from collections import defaultdict from typing import Dict, Iterable, Tuple +from codechecker_report_converter import twodim + from codechecker_analyzer import analyzer_context from codechecker_analyzer.analyzers import analyzer_types from codechecker_analyzer.analyzers.clangsa.analyzer import ClangSA from codechecker_analyzer.analyzers.clangtidy.analyzer import ClangTidy from codechecker_common import arg, logger -from codechecker_common.output import USER_FORMATS, twodim +from codechecker_common.output import USER_FORMATS from codechecker_common.checker_labels import CheckerLabels from codechecker_analyzer import env from codechecker_analyzer.analyzers.config_handler import CheckerState diff --git a/analyzer/codechecker_analyzer/cmd/fixit.py b/analyzer/codechecker_analyzer/cmd/fixit.py index 3ad4564cb3..83167ed773 100644 --- a/analyzer/codechecker_analyzer/cmd/fixit.py +++ b/analyzer/codechecker_analyzer/cmd/fixit.py @@ -18,8 +18,10 @@ import tempfile import yaml +from codechecker_report_converter.util import get_last_mod_time + from codechecker_analyzer import analyzer_context -from codechecker_common import arg, logger, util +from codechecker_common import arg, logger LOG = logger.get_logger('system') @@ -163,7 +165,7 @@ def check_modification_and_collect(file_path): Return True if the file was not modified after the given timestamp otherwise collect it to a set named modified_files. """ - if util.get_last_mod_time(file_path) > modification_time: + if get_last_mod_time(file_path) > modification_time: modified_files.add(file_path) return False return True @@ -232,7 +234,7 @@ def list_fixits(inputs, checker_names, file_paths, interactive, reports): fixit_file = os.path.join(fixit_dir, fixit_file) with open(fixit_file, encoding='utf-8', errors='ignore') as f: content = yaml.load(f, Loader=yaml.BaseLoader) - fixit_mtime = util.get_last_mod_time(fixit_file) + fixit_mtime = get_last_mod_time(fixit_file) existing, not_existing, modified = clang_tidy_fixit_filter( content, checker_names, file_paths, reports, fixit_mtime, @@ -281,7 +283,7 @@ def apply_fixits(inputs, checker_names, file_paths, interactive, reports): with open(os.path.join(fixit_dir, fixit_file), encoding='utf-8', errors='ignore') as f: content = yaml.load(f, Loader=yaml.BaseLoader) - fixit_mtime = util.get_last_mod_time( + fixit_mtime = get_last_mod_time( os.path.join(fixit_dir, fixit_file)) existing, not_existing, modified = clang_tidy_fixit_filter( diff --git a/analyzer/codechecker_analyzer/cmd/parse.py b/analyzer/codechecker_analyzer/cmd/parse.py index 7a420a5aa4..2c3480b41c 100644 --- a/analyzer/codechecker_analyzer/cmd/parse.py +++ b/analyzer/codechecker_analyzer/cmd/parse.py @@ -11,33 +11,39 @@ """ -from collections import defaultdict import argparse -import json -import math import os -from operator import itemgetter import sys -import traceback -from typing import Callable, Dict, List, Optional, Set, Tuple, Union +from typing import Dict, Optional, Set + +from codechecker_report_converter.util import dump_json_output, \ + load_json_or_empty +from codechecker_report_converter.report import report_file, \ + reports as reports_helper +from codechecker_report_converter.report.output import baseline, codeclimate, \ + gerrit, json as report_to_json, plaintext +from codechecker_report_converter.report.output.html import \ + html as report_to_html +from codechecker_report_converter.report.statistics import Statistics +from codechecker_report_converter.source_code_comment_handler import \ + REVIEW_STATUS_VALUES -from plist_to_html import PlistToHtml from codechecker_analyzer import analyzer_context, suppress_handler -from codechecker_common import arg, logger, plist_parser, util, cmd_config -from codechecker_common.checker_labels import CheckerLabels -from codechecker_common.output import baseline, json as out_json, twodim, \ - codeclimate, gerrit +from codechecker_common import arg, logger, cmd_config from codechecker_common.skiplist_handler import SkipListHandler -from codechecker_common.source_code_comment_handler import \ - REVIEW_STATUS_VALUES, SourceCodeCommentHandler, SpellException -from codechecker_common.report import Report -from codechecker_report_hash.hash import get_report_path_hash LOG = logger.get_logger('system') + +def init_logger(level, stream=None, logger_name='system'): + logger.setup_logger(level, stream) + global LOG + LOG = logger.get_logger(logger_name) + + EXPORT_TYPES = ['html', 'json', 'codeclimate', 'gerrit', 'baseline'] epilog_env_var = f""" @@ -57,320 +63,6 @@ """ -class PlistToPlaintextFormatter: - """ - Parse and format plist reports to a more human readable format. - """ - - def __init__(self, - src_comment_handler, - skip_handler: Callable[[str], bool], - checker_labels, - processed_path_hashes, - trim_path_prefixes, - src_comment_status_filter=None): - - self.__checker_labels = checker_labels - self.print_steps = False - self.src_comment_handler = src_comment_handler - self._skip_handler = skip_handler - self.src_comment_status_filter = src_comment_status_filter - self._processed_path_hashes = processed_path_hashes - self._trim_path_prefixes = trim_path_prefixes - - @staticmethod - def __format_location(event, source_file): - loc = event['location'] - line = util.get_line(source_file, loc['line']) - if line == '': - return line - - marker_line = line[0:(loc['col'] - 1)] - marker_line = ' ' * (len(marker_line) + marker_line.count('\t')) - return '%s%s^' % (line.replace('\t', ' '), marker_line) - - @staticmethod - def __format_bug_event(name, severity, event, source_file, - review_status=None): - - loc = event['location'] - if name: - out = '[%s] %s:%d:%d: %s [%s]' % (severity, - source_file, - loc['line'], - loc['col'], - event['message'], - name) - if review_status: - rw_status = review_status.capitalize().replace('_', ' ') - out = '%s [%s]' % (out, rw_status) - - return out - else: - fname = os.path.basename(source_file) - return '%s:%d:%d: %s' % (fname, - loc['line'], - loc['col'], - event['message']) - - @staticmethod - def __format_bug_note(note, source_file): - """ - Format bug notes. - """ - loc = note['location'] - file_name = os.path.basename(source_file) - return '%s:%d:%d: %s' % (file_name, - loc['line'], - loc['col'], - note['message']) - - @staticmethod - def __format_macro_expansion(macro, source_file): - """ - Format macro expansions. - """ - loc = macro['location'] - file_name = os.path.basename(source_file) - return "%s:%d:%d: Macro '%s' expanded to '%s'" % (file_name, - loc['line'], - loc['col'], - macro['name'], - macro['expansion']) - - @staticmethod - def parse(plist_file) -> Tuple[Dict[int, str], List[Report]]: - """ - Parse a plist report file. - """ - files, reports = {}, [] - try: - files, reports = plist_parser.parse_plist_file(plist_file) - except Exception as ex: - traceback.print_stack() - LOG.error('The generated plist is not valid!') - LOG.error(ex) - finally: - return files, reports - - def write(self, - file_report_map: Dict[str, List[Report]], - output=sys.stdout): - """ - Format an already parsed plist report file to a more - human readable format. - The formatted text is written to the output. - During writing the output statistics are collected. - - Write out the bugs to the output and collect report statistics. - """ - - severity_stats = defaultdict(int) - file_stats = defaultdict(int) - report_count = defaultdict(int) - - for file_path in sorted(file_report_map, - key=lambda key: len(file_report_map[key])): - - non_suppressed = 0 - sorted_reports = sorted(file_report_map[file_path], - key=lambda r: r.main['location']['line']) - - for report in sorted_reports: - path_hash = get_report_path_hash(report) - if path_hash in self._processed_path_hashes: - LOG.debug("Not showing report because it is a " - "deduplication of an already processed report!") - LOG.debug("Path hash: %s", path_hash) - LOG.debug(report) - continue - - self._processed_path_hashes.add(path_hash) - - events = [i for i in report.bug_path - if i.get('kind') == 'event'] - f_path = report.files[events[-1]['location']['file']] - if self._skip_handler(f_path): - LOG.debug("Skipped report in '%s'", f_path) - LOG.debug(report) - continue - - last_report_event = report.bug_path[-1] - source_file = \ - report.files[last_report_event['location']['file']] - - report_line = last_report_event['location']['line'] - report_hash = \ - report.main['issue_hash_content_of_line_in_context'] - checker_name = report.main['check_name'] - - skip, source_code_comments = \ - skip_report(report_hash, - source_file, - report_line, - checker_name, - self.src_comment_handler, - self.src_comment_status_filter) - - if self.src_comment_handler and source_code_comments: - self.src_comment_handler.store_suppress_bug_id( - report_hash, os.path.basename(source_file), - source_code_comments[0]['message'], - source_code_comments[0]['status']) - - if skip: - continue - - if self._trim_path_prefixes: - report.trim_path_prefixes(self._trim_path_prefixes) - - trimmed_source_file = \ - report.files[last_report_event['location']['file']] - - file_stats[f_path] += 1 - severity = self.__checker_labels.severity(checker_name) - severity_stats[severity] += 1 - report_count["report_count"] += 1 - - review_status = None - if len(source_code_comments) == 1: - review_status = source_code_comments[0]['status'] - - output.write(self.__format_bug_event(checker_name, - severity, - last_report_event, - trimmed_source_file, - review_status)) - output.write('\n') - - # Print source code comments. - for source_code_comment in source_code_comments: - output.write(source_code_comment['line'].rstrip()) - output.write('\n') - - output.write(self.__format_location(last_report_event, - source_file)) - output.write('\n') - - if self.print_steps: - output.write(' Report hash: ' + report_hash + '\n') - - # Print out macros. - macros = report.macro_expansions - if macros: - output.write(' Macro expansions:\n') - - index_format = ' %%%dd, ' % \ - int(math.floor( - math.log10(len(macros))) + 1) - - for index, macro in enumerate(macros): - output.write(index_format % (index + 1)) - source = report.files[ - macro['location']['file']] - output.write(self.__format_macro_expansion(macro, - source)) - output.write('\n') - - # Print out notes. - notes = report.notes - if notes: - output.write(' Notes:\n') - - index_format = ' %%%dd, ' % \ - int(math.floor( - math.log10(len(notes))) + 1) - - for index, note in enumerate(notes): - output.write(index_format % (index + 1)) - source_file = report.files[ - note['location']['file']] - output.write(self.__format_bug_note(note, - source_file)) - output.write('\n') - - output.write(' Steps:\n') - - index_format = ' %%%dd, ' % \ - int(math.floor(math.log10(len(events))) + 1) - - for index, event in enumerate(events): - output.write(index_format % (index + 1)) - source_file = report.files[event['location']['file']] - output.write( - self.__format_bug_event(None, - None, - event, - source_file)) - output.write('\n') - output.write('\n') - - non_suppressed += 1 - - base_file = os.path.basename(file_path) - if non_suppressed == 0: - output.write('Found no defects in %s\n' % base_file) - else: - output.write('Found %d defect(s) in %s\n\n' % - (non_suppressed, base_file)) - - return {"severity": severity_stats, - "files": file_stats, - "reports": report_count} - - -def skip_report(report_hash, source_file, report_line, checker_name, - src_comment_handler=None, src_comment_status_filter=None): - """ - Returns a tuple where the first value will be True if the report was - suppressed in the source code, otherwise False. The second value will be - the list of available source code comments. - """ - bug = {'hash_value': report_hash, 'file_path': source_file} - if src_comment_handler and src_comment_handler.get_suppressed(bug): - LOG.debug("Suppressed by suppress file: %s:%s [%s] %s", source_file, - report_line, checker_name, report_hash) - return True, [] - - sc_handler = SourceCodeCommentHandler() - - src_comment_data = [] - # Check for source code comment. - with open(source_file, encoding='utf-8', errors='ignore') as sf: - try: - src_comment_data = sc_handler.filter_source_line_comments( - sf, - report_line, - checker_name) - except SpellException as ex: - LOG.warning("%s contains %s", - os.path.basename(source_file), - str(ex)) - - if not src_comment_data: - skip = True if src_comment_status_filter and \ - 'unreviewed' not in src_comment_status_filter else False - return skip, src_comment_data - - num_of_suppress_comments = len(src_comment_data) - if num_of_suppress_comments == 1: - status = src_comment_data[0]['status'] - - LOG.debug("Suppressed by source code comment.") - - if src_comment_status_filter and \ - status not in src_comment_status_filter: - return True, src_comment_data - - if num_of_suppress_comments > 1: - LOG.error("Multiple source code comment can be found " - "for '%s' checker in '%s' at line %d.", - checker_name, source_file, report_line) - sys.exit(1) - - return False, src_comment_data - - def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an @@ -537,175 +229,30 @@ def add_arguments_to_parser(parser): func=main, func_process_config_file=cmd_config.process_config_file) -def parse_with_plt_formatter(plist_file: str, - metadata: Dict, - plist_pltf: PlistToPlaintextFormatter, - file_report_map: Dict[str, List[Report]]) -> Set: - """Parse a plist with plaintext formatter and collect changed source files. - - Returns the report statistics collected by the result handler. - """ - - if not plist_file.endswith(".plist"): - LOG.debug("Skipping input file '%s' as it is not a plist.", plist_file) - return set() - - LOG.debug("Parsing input file '%s'", plist_file) - - result_source_files = {} - if 'result_source_files' in metadata: - result_source_files = metadata['result_source_files'] - else: - for tool in metadata.get('tools', {}): - result_src_files = tool.get('result_source_files', {}) - result_source_files.update(result_src_files.items()) - - if plist_file in result_source_files: - analyzed_source_file = \ - result_source_files[plist_file] - - if analyzed_source_file not in file_report_map: - file_report_map[analyzed_source_file] = [] - - files, reports = plist_pltf.parse(plist_file) - plist_mtime = util.get_last_mod_time(plist_file) - - changed_files = set() - for _, source_file in files.items(): - if plist_mtime is None: - # Failed to get the modification time for - # a file mark it as changed. - changed_files.add(source_file) - LOG.warning('%s is missing since the last analysis.', source_file) - continue - - file_mtime = util.get_last_mod_time(source_file) - if not file_mtime: - changed_files.add(source_file) - LOG.warning('%s does not exist.', source_file) - continue - - if file_mtime > plist_mtime: - changed_files.add(source_file) - LOG.warning('%s did change since the last analysis.', source_file) - - if not changed_files: - for report in reports: - file_path = report.file_path - if file_path not in file_report_map: - file_report_map[file_path] = [] - - file_report_map[file_path].append(report) - - return changed_files - - -def _parse_convert_reports( - input_dirs: List[str], - out_format: str, - checker_labels: CheckerLabels, - trim_path_prefixes: Optional[List[str]], - skip_handler: Callable[[str], bool]) \ - -> Tuple[Union[Dict, List], int]: - """Parse and convert the reports from the input dirs to the out_format. - - Retuns a dictionary which can be converted to the out_format type of - json to be printed out or saved on the disk. - """ - - assert(out_format in [fmt for fmt in EXPORT_TYPES if fmt != 'html']) - - input_files = set() - for input_path in input_dirs: - input_path = os.path.abspath(input_path) - if os.path.isfile(input_path): - input_files.add(input_path) - elif os.path.isdir(input_path): - _, _, file_names = next(os.walk(input_path), ([], [], [])) - input_paths = [os.path.join(input_path, file_name) for file_name - in file_names] - input_files.update(input_paths) - - all_reports = [] - for input_file in input_files: - if not input_file.endswith('.plist'): - continue - _, reports = plist_parser.parse_plist_file(input_file) - reports = [report for report in reports - if not skip_handler(report.file_path)] - all_reports.extend(reports) - - if trim_path_prefixes: - for report in all_reports: - report.trim_path_prefixes(trim_path_prefixes) - - number_of_reports = len(all_reports) - if out_format == "baseline": - return (baseline.convert(all_reports), number_of_reports) - - if out_format == "codeclimate": - return (codeclimate.convert(all_reports, checker_labels), - number_of_reports) - - if out_format == "gerrit": - return gerrit.convert(all_reports, checker_labels), number_of_reports - - if out_format == "json": - return [out_json.convert_to_parse(r) for r in all_reports], \ - number_of_reports - - -def _generate_json_output( - checker_labels: CheckerLabels, - input_dirs: List[str], - output_type: str, - output_file_path: Optional[str], - trim_path_prefixes: Optional[List[str]], - skip_handler: Callable[[str], bool] -) -> int: - """ - Generates JSON based appearance of analyzing and optionally saves it to - file. - - This function only responsible for saving and returning data. The data - conversion performed by underlying utility function. - - Parameters - ---------- - checker_labels : CheckerLabels - Binary format of a piece of configuration. - input_dirs : List[str] - Directories where the underlying analyzer processes have placed the - result of analyzing. - output_type : str - Specifies the type of output. It can be gerrit, json, codeclimate. - output_file_path : Optional[str] - Path of the output file. If it contains file name then generated output - will be written into. - trim_path_prefixes : Optional[List[str]] - A list of path fragments that will be trimmed from beginning of source - file names before file names will be written to the output. - skip_handler : Callable[[str], bool] - A callable that call with a file name and returns a bool that indicates - that the file should skip or not from the output. +def ch_workdir(metadata: Optional[Dict]): + """ Change working directory to the one noted in metadata.json if this + file exists and contains "working_directory". """ + if not metadata or 'working_directory' not in metadata: + return + working_dir = metadata['working_directory'] try: - reports, number_of_reports = _parse_convert_reports( - input_dirs, output_type, checker_labels, trim_path_prefixes, - skip_handler) - output_text = json.dumps(reports) + os.chdir(working_dir) + except OSError as oerr: + LOG.debug(oerr) + LOG.error("Working directory %s is missing.\nCan not parse reports " + "safely.", working_dir) + sys.exit(1) - if output_file_path: - with open(output_file_path, mode='w', encoding='utf-8', - errors="ignore") as output_f: - output_f.write(output_text) - print(output_text) - return 2 if number_of_reports else 0 - except Exception as ex: - LOG.error(ex) - return 1 +def get_metadata(dir_path: str) -> Optional[Dict]: + """ Get metadata from the given dir path or None if not exists. """ + metadata_file = os.path.join(dir_path, "metadata.json") + if os.path.exists(metadata_file): + return load_json_or_empty(metadata_file) + + return None def main(args): @@ -713,8 +260,13 @@ def main(args): Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ + # If the given output format is not 'table', redirect logger's output to + # the stderr. + stream = None + if 'export' in args and args.export not in [None, 'table', 'html']: + stream = 'stderr' - logger.setup_logger(args.verbose if 'verbose' in args else None) + init_logger(args.verbose if 'verbose' in args else None, stream) try: cmd_config.check_config_file(args) @@ -743,8 +295,6 @@ def main(args): if isinstance(args.input, str): args.input = [args.input] - original_cwd = os.getcwd() - src_comment_status_filter = args.review_status suppr_handler = None @@ -774,19 +324,6 @@ def main(args): "SUPPRESS_FILE' is also given.") sys.exit(1) - processed_path_hashes = set() - - skip_file_content = "" - if 'skipfile' in args: - with open(args.skipfile, 'r', - encoding='utf-8', errors='ignore') as skip_file: - skip_file_content = skip_file.read() - - skip_handler = SkipListHandler(skip_file_content) - - trim_path_prefixes = args.trim_path_prefix if \ - 'trim_path_prefix' in args else None - output_dir_path = None output_file_path = None if 'output_path' in args: @@ -812,7 +349,7 @@ def main(args): if not os.path.exists(output_dir_path): os.makedirs(output_dir_path) - def get_output_file_path(default_file_name) -> Optional[str]: + def get_output_file_path(default_file_name: str) -> Optional[str]: """ Return an output file path. """ if output_file_path: return output_file_path @@ -820,190 +357,85 @@ def get_output_file_path(default_file_name) -> Optional[str]: if output_dir_path: return os.path.join(output_dir_path, default_file_name) - if export: - if export == 'baseline': - report_hashes, number_of_reports = _parse_convert_reports( - args.input, export, context.checker_labels, trim_path_prefixes, - skip_handler) - - output_path = get_output_file_path("reports.baseline") - if output_path: - baseline.write(output_path, report_hashes) - - sys.exit(2 if number_of_reports else 0) - - # The HTML part will be handled separately below. - if export != 'html': - output_path = get_output_file_path("reports.json") - sys.exit(_generate_json_output( - context.checker_labels, args.input, export, output_path, - trim_path_prefixes, skip_handler)) - - html_builder = None - report_count = 0 - - def skip_html_report_data_handler(report_hash, source_file, report_line, - checker_name, diag, files): - """ - Report handler which skips bugs which were suppressed by source code - comments. This function will return a tuple. The first element - will decide whether the report should be skipped or not and the second - element will be a list of source code comments related to the actual - report. - """ - files_dict = {k: v for k, v in enumerate(files)} - report = Report({'check_name': checker_name}, - diag['path'], - files_dict, - metadata=None) - path_hash = get_report_path_hash(report) - if path_hash in processed_path_hashes: - LOG.debug("Skip report because it is a deduplication of an " - "already processed report!") - LOG.debug("Path hash: %s", path_hash) - LOG.debug(diag) - return True, [] - - skip, source_code_comments = skip_report(report_hash, - source_file, - report_line, - checker_name, - suppr_handler, - src_comment_status_filter) - - if suppr_handler and source_code_comments: - suppr_handler.store_suppress_bug_id( - report_hash, os.path.basename(source_file), - source_code_comments[0]['message'], - source_code_comments[0]['status']) - - skip |= skip_handler(source_file) - - if not skip: - processed_path_hashes.add(path_hash) - nonlocal report_count - report_count += 1 - - return skip, source_code_comments - - file_change = set() - severity_stats = defaultdict(int) - file_stats = defaultdict(int) - - for input_path in args.input: - input_path = os.path.abspath(input_path) - os.chdir(original_cwd) - LOG.debug("Parsing input argument: '%s'", input_path) + skip_file_content = "" + if 'skipfile' in args: + with open(args.skipfile, 'r', + encoding='utf-8', errors='ignore') as skip_file: + skip_file_content = skip_file.read() - if export == 'html': - if not html_builder: - html_builder = \ - PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, - context.checker_labels) - - LOG.info("Generating html output files:") - PlistToHtml.parse(input_path, - output_dir_path, - context.path_plist_to_html_dist, - skip_html_report_data_handler, - html_builder, - util.TrimPathPrefixHandler(trim_path_prefixes)) - continue - - files = [] - metadata_dict = {} - if os.path.isfile(input_path): - files.append(input_path) - - elif os.path.isdir(input_path): - metadata_file = os.path.join(input_path, "metadata.json") - if os.path.exists(metadata_file): - metadata_dict = util.load_json_or_empty(metadata_file) - LOG.debug(metadata_dict) - - if 'working_directory' in metadata_dict: - working_dir = metadata_dict['working_directory'] - try: - os.chdir(working_dir) - except OSError as oerr: - LOG.debug(oerr) - LOG.error("Working directory %s is missing.\n" - "Can not parse reports safely.", working_dir) - sys.exit(1) - - _, _, file_names = next(os.walk(input_path), ([], [], [])) - files = [os.path.join(input_path, file_name) for file_name - in file_names] - - file_report_map = defaultdict(list) - - plist_pltf = PlistToPlaintextFormatter(suppr_handler, - skip_handler, - context.checker_labels, - processed_path_hashes, - trim_path_prefixes, - src_comment_status_filter) - plist_pltf.print_steps = 'print_steps' in args - - for file_path in files: - f_change = parse_with_plt_formatter(file_path, - metadata_dict, - plist_pltf, - file_report_map) - file_change = file_change.union(f_change) - - report_stats = plist_pltf.write(file_report_map) - sev_stats = report_stats.get('severity') - for severity in sev_stats: - severity_stats[severity] += sev_stats[severity] - - f_stats = report_stats.get('files') - for file_path in f_stats: - file_stats[file_path] += f_stats[file_path] - - rep_stats = report_stats.get('reports') - report_count += rep_stats.get("report_count", 0) - - # Create index.html and statistics.html for the generated html files. - if html_builder: - html_builder.create_index_html(output_dir_path) - html_builder.create_statistics_html(output_dir_path) - - print('\nTo view statistics in a browser run:\n> firefox {0}'.format( - os.path.join(output_dir_path, 'statistics.html'))) - - print('\nTo view the results in a browser run:\n> firefox {0}'.format( - os.path.join(output_dir_path, 'index.html'))) - else: - print("\n----==== Summary ====----") - if file_stats: - vals = [[os.path.basename(k), v] for k, v in - dict(file_stats).items()] - vals.sort(key=itemgetter(0)) - keys = ['Filename', 'Report count'] - table = twodim.to_str('table', keys, vals, 1, True) - print(table) - - if severity_stats: - vals = [[k, v] for k, v in dict(severity_stats).items()] - vals.sort(key=itemgetter(0)) - keys = ['Severity', 'Report count'] - table = twodim.to_str('table', keys, vals, 1, True) - print(table) - - print("----=================----") - print("Total number of reports: {}".format(report_count)) - print("----=================----") - - if file_change: - changed_files = '\n'.join([' - ' + f for f in file_change]) - LOG.warning("The following source file contents changed since the " - "latest analysis:\n%s\nMultiple reports were not " - "shown and skipped from the statistics. Please " - "analyze your project again to update the " - "reports!", changed_files) - - os.chdir(original_cwd) - - if report_count != 0: + skip_handler = SkipListHandler(skip_file_content) + + trim_path_prefixes = args.trim_path_prefix if \ + 'trim_path_prefix' in args else None + + all_reports = [] + statistics = Statistics() + file_cache = {} # For memory effiency. + changed_files: Set[str] = set() + processed_path_hashes = set() + processed_file_paths = set() + html_builder: Optional[report_to_html.HtmlBuilder] = None + print_steps = 'print_steps' in args + + for dir_path, file_paths in report_file.analyzer_result_files(args.input): + metadata = get_metadata(dir_path) + for file_path in file_paths: + reports = report_file.get_reports( + file_path, context.checker_labels, file_cache) + + reports = reports_helper.skip( + reports, processed_path_hashes, skip_handler, suppr_handler, + src_comment_status_filter) + + statistics.num_of_analyzer_result_files += 1 + for report in reports: + if report.changed_files: + changed_files.update(report.changed_files) + + statistics.add_report(report) + + if trim_path_prefixes: + report.trim_path_prefixes(trim_path_prefixes) + + all_reports.extend(reports) + + # Print reports continously. + if not export: + file_report_map = plaintext.get_file_report_map( + reports, file_path, metadata) + plaintext.convert( + file_report_map, processed_file_paths, print_steps) + elif export == 'html': + if not html_builder: + html_builder = report_to_html.HtmlBuilder( + context.path_plist_to_html_dist, + context.checker_labels) + + print(f"Parsing input file '{file_path}'.") + report_to_html.convert( + file_path, reports, output_dir_path, + html_builder) + + if export is None: # Plain text output + statistics.write() + elif export == 'html': + html_builder.finish(output_dir_path, statistics) + elif export == 'json': + data = report_to_json.convert(all_reports) + dump_json_output(data, get_output_file_path("reports.json")) + elif export == 'codeclimate': + data = codeclimate.convert(all_reports) + dump_json_output(data, get_output_file_path("reports.json")) + elif export == 'gerrit': + data = gerrit.convert(all_reports) + dump_json_output(data, get_output_file_path("reports.json")) + elif export == 'baseline': + data = baseline.convert(all_reports) + output_path = get_output_file_path("reports.baseline") + if output_path: + baseline.write(output_path, data) + + reports_helper.dump_changed_files(changed_files) + + if statistics.num_of_reports: sys.exit(2) diff --git a/analyzer/codechecker_analyzer/suppress_file_handler.py b/analyzer/codechecker_analyzer/suppress_file_handler.py index 1a8f606f68..54fcbd09b5 100644 --- a/analyzer/codechecker_analyzer/suppress_file_handler.py +++ b/analyzer/codechecker_analyzer/suppress_file_handler.py @@ -24,7 +24,7 @@ import re from codechecker_common.logger import get_logger -from codechecker_common.source_code_comment_handler import \ +from codechecker_report_converter.source_code_comment_handler import \ SourceCodeCommentHandler LOG = get_logger('system') diff --git a/analyzer/codechecker_analyzer/suppress_handler.py b/analyzer/codechecker_analyzer/suppress_handler.py index e8d0653a40..63fa7ae489 100644 --- a/analyzer/codechecker_analyzer/suppress_handler.py +++ b/analyzer/codechecker_analyzer/suppress_handler.py @@ -9,11 +9,9 @@ Handler for suppressing a bug. """ - -import os +from codechecker_report_converter.report import Report from codechecker_analyzer import suppress_file_handler - from codechecker_common.logger import get_logger # Warning! this logger should only be used in this module. @@ -36,7 +34,6 @@ def __init__(self, suppress_file, allow_write, src_comment_status_filter): self.__revalidate_suppress_data() else: self.__have_memory_backend = False - self.__arrow_write = False if allow_write: raise ValueError("Can't create allow_write=True suppress " @@ -79,16 +76,16 @@ def store_suppress_bug_id(self, bug_id, file_name, comment, status): self.__revalidate_suppress_data() return ret - def skip_suppress_status(self, status): + def skip_suppress_status(self, status) -> bool: """ Returns True if the given status should be skipped. """ if not self.src_comment_status_filter: return False return status not in self.src_comment_status_filter - def get_suppressed(self, bug): - + def get_suppressed(self, report: Report) -> bool: + """ True if the given report is suppressed. """ return any([suppress for suppress in self.__suppress_info - if suppress[0] == bug['hash_value'] and - suppress[1] == os.path.basename(bug['file_path']) and + if suppress[0] == report.report_hash and + suppress[1] == report.file.name and self.skip_suppress_status(suppress[3])]) diff --git a/analyzer/tests/functional/__init__.py b/analyzer/tests/functional/__init__.py index f96c89876a..ba8fe8b555 100644 --- a/analyzer/tests/functional/__init__.py +++ b/analyzer/tests/functional/__init__.py @@ -19,3 +19,4 @@ PKG_ROOT = os.path.join(REPO_ROOT, 'build', 'CodeChecker') sys.path.append(os.path.join(REPO_ROOT)) +sys.path.append(os.path.join(PKG_ROOT, 'lib', 'python3')) diff --git a/analyzer/tests/functional/analyze/test_analyze.py b/analyzer/tests/functional/analyze/test_analyze.py index 34b96a1cb1..b3f1ceed31 100644 --- a/analyzer/tests/functional/analyze/test_analyze.py +++ b/analyzer/tests/functional/analyze/test_analyze.py @@ -970,9 +970,9 @@ def test_analyzer_and_checker_config(self): out, _ = process.communicate() print(out) - # First it's printed as the member of enabled checkers at the beginning - # of the output. Second it is printed as a found report. - self.assertEqual(out.count('hicpp-use-nullptr'), 1) + + # It's printed as a found report and in the checker statistics. + self.assertEqual(out.count('hicpp-use-nullptr'), 2) analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json, "--analyzers", "clang-tidy", "-o", self.report_dir, @@ -991,9 +991,9 @@ def test_analyzer_and_checker_config(self): errors="ignore") out, _ = process.communicate() - # First it's printed as the member of enabled checkers at the beginning - # of the output. Second and third it is printed as a found report. - self.assertEqual(out.count('hicpp-use-nullptr'), 2) + # It's printed as the member of enabled checkers at the beginning + # of the output, a found report and in the checker statistics. + self.assertEqual(out.count('hicpp-use-nullptr'), 3) analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json, "--analyzers", "clangsa", "-o", self.report_dir, @@ -1011,9 +1011,9 @@ def test_analyzer_and_checker_config(self): errors="ignore") out, _ = process.communicate() print(out) - # First it's printed as the member of enabled checkers at the beginning - # of the output. Second it is printed as a found report. - self.assertEqual(out.count('UninitializedObject'), 2) + # It's printed as the member of enabled checkers at the beginning + # of the output, a found report and in the checker statistics. + self.assertEqual(out.count('UninitializedObject'), 3) analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json, "--analyzers", "clangsa", "-o", self.report_dir, diff --git a/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py b/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py index 879a5789ed..12107c7e1d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py +++ b/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py @@ -24,7 +24,7 @@ from libtest import project from libtest.codechecker import call_command -from codechecker_common.output import baseline +from codechecker_report_converter.report.output import baseline class AnalyzeParseTestCaseMeta(type): @@ -249,17 +249,16 @@ def test_json_output_for_macros(self): self.assertEqual(result, 2, "Parsing not found any issue.") res = json.loads(out) - self.assertEqual(len(res), 1) - res = res[0] - - self.assertIn('check_name', res) - self.assertIn('issue_hash_content_of_line_in_context', res) + reports = res["reports"] + self.assertEqual(len(reports), 1) + res = reports[0] - self.assertIn('files', res) - self.assertEqual(len(res['files']), 1) + self.assertIn('checker_name', res) + self.assertIn('report_hash', res) + self.assertIn('file', res) - self.assertIn('path', res) - self.assertTrue(res['path']) + self.assertIn('bug_path_events', res) + self.assertTrue(res['bug_path_events']) self.assertIn('macro_expansions', res) self.assertTrue(res['macro_expansions']) @@ -277,17 +276,16 @@ def test_json_output_for_notes(self): self.assertEqual(result, 2, "Parsing not found any issue.") res = json.loads(out) - self.assertEqual(len(res), 1) - res = res[0] - - self.assertIn('check_name', res) - self.assertIn('issue_hash_content_of_line_in_context', res) + reports = res["reports"] + self.assertEqual(len(reports), 1) + res = reports[0] - self.assertIn('files', res) - self.assertEqual(len(res['files']), 1) + self.assertIn('checker_name', res) + self.assertIn('report_hash', res) + self.assertIn('file', res) - self.assertIn('path', res) - self.assertTrue(res['path']) + self.assertIn('bug_path_events', res) + self.assertTrue(res['bug_path_events']) self.assertIn('notes', res) self.assertTrue(res['notes']) @@ -382,11 +380,10 @@ def test_invalid_plist_file(self): errors="ignore") as invalid_plist_f: invalid_plist_f.write("Invalid plist file.") - extract_cmd = ['CodeChecker', 'parse', - invalid_plist_file] + extract_cmd = ['CodeChecker', 'parse', invalid_plist_file] - out, _, result = call_command(extract_cmd, cwd=self.test_dir, - env=self.env) + out, _, result = call_command( + extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Parsing failed.") self.assertTrue("Invalid plist file" in out) @@ -420,15 +417,15 @@ def test_codeclimate_export(self): test_project_notes, "--output", output_file_path, '--trim-path-prefix', test_project_notes] - out, _, result = call_command(extract_cmd, cwd=self.test_dir, - env=self.env) + _, _, result = call_command(extract_cmd, cwd=self.test_dir, + env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") - result_from_stdout = json.loads(out) + with open(output_file_path, 'r', encoding='utf-8', errors='ignore') \ - as handle: - result_from_file = json.load(handle) + as f: + results = json.load(f) - self.assertEqual(result_from_stdout, result_from_file) + self.assertTrue(results) def test_codeclimate_export_exit_code_when_all_skipped(self): """ Test exporting codeclimate output into the filesystem when all @@ -501,8 +498,10 @@ def test_json_export_exit_code_when_all_skipped(self): standard_output, _, result = call_command( extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Parsing should not found any issue.") - self.assertEqual("[]\n", standard_output, - "Result should be an empty json array.") + + data = json.loads(standard_output) + self.assertEqual(data["version"], 1) + self.assertFalse(data["reports"]) def test_parse_exit_code(self): """ Test exit code of parsing. """ @@ -604,10 +603,10 @@ def test_invalid_baseline_file_extension(self): "CodeChecker", "parse", "-e", "baseline", "-o", out_file_path, test_project_notes] - out, _, result = call_command( + _, err, result = call_command( parse_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 1) - self.assertIn("Baseline files must have '.baseline' extensions", out) + self.assertIn("Baseline files must have '.baseline' extensions", err) # Try to create baseline file in a directory which exists. os.makedirs(output_path) @@ -615,10 +614,10 @@ def test_invalid_baseline_file_extension(self): "CodeChecker", "parse", "-e", "baseline", "-o", output_path, test_project_notes] - out, _, result = call_command( + _, err, result = call_command( parse_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 1) - self.assertIn("Please provide a file path instead of a directory", out) + self.assertIn("Please provide a file path instead of a directory", err) def test_custom_baseline_file(self): """ Test parse baseline custom output file. """ diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error.output index 3051792ac4..da6525d596 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error.output @@ -23,17 +23,33 @@ NORMAL#CodeChecker parse $OUTPUT$ Found 1 defect(s) in compiler_error.cpp -----==== Summary ====---- ---------------------------------- -Filename | Report count ---------------------------------- -compiler_error.cpp | 1 ---------------------------------- ------------------------ -Severity | Report count ------------------------ -CRITICAL | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +CRITICAL | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +----------------------------------------------------- +Checker name | Severity | Number of reports +----------------------------------------------------- +clang-diagnostic-error | CRITICAL | 1 +----------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------- +File name | Number of reports +-------------------------------------- +compiler_error.cpp | 1 +-------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error_disabled.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error_disabled.output index 9752dae8a6..d90ab1c80c 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error_disabled.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_error_disabled.output @@ -17,7 +17,9 @@ NORMAL#CodeChecker parse $OUTPUT$ [] - See --help and the user guide for further options about parsing and storing the reports. [] - ----=================---- -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 0 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_no_warn.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_no_warn.output index 579c8bdb20..6d31170cc5 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_no_warn.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_no_warn.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make compiler_warning_simple" --output $OUTPUT$ [] - ----=================---- Found no defects in compiler_warning.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_simple.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_simple.output index a6c526c749..85f9c0867d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_simple.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_simple.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make compiler_warning_simple" --output $OUTPUT$ Found 1 defect(s) in compiler_warning.cpp -----==== Summary ====---- ------------------------------------ -Filename | Report count ------------------------------------ -compiler_warning.cpp | 1 ------------------------------------ ------------------------ -Severity | Report count ------------------------ -MEDIUM | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +MEDIUM | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------------- +clang-diagnostic-unused-variable | MEDIUM | 1 +--------------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +---------------------------------------- +File name | Number of reports +---------------------------------------- +compiler_warning.cpp | 1 +---------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_group.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_group.output index 3554e2749f..5f3218f7ed 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_group.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_group.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make compiler_warning_wno_group" --output $OUTP Found 1 defect(s) in compiler_warning.cpp -----==== Summary ====---- ------------------------------------ -Filename | Report count ------------------------------------ -compiler_warning.cpp | 1 ------------------------------------ ------------------------ -Severity | Report count ------------------------ -MEDIUM | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +MEDIUM | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------------- +clang-diagnostic-unused-variable | MEDIUM | 1 +--------------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +---------------------------------------- +File name | Number of reports +---------------------------------------- +compiler_warning.cpp | 1 +---------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple1.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple1.output index a55d4e7eb0..42cad1d0b2 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple1.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple1.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make compiler_warning_wno_simple" --output $OUT Found 1 defect(s) in compiler_warning.cpp -----==== Summary ====---- ------------------------------------ -Filename | Report count ------------------------------------ -compiler_warning.cpp | 1 ------------------------------------ ------------------------ -Severity | Report count ------------------------ -MEDIUM | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +MEDIUM | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------------- +clang-diagnostic-unused-variable | MEDIUM | 1 +--------------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +---------------------------------------- +File name | Number of reports +---------------------------------------- +compiler_warning.cpp | 1 +---------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple2.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple2.output index c1ed532ab6..1384ac094a 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple2.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wno_simple2.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make compiler_warning_unused" --output $OUTPUT$ [] - ----=================---- Found no defects in compiler_warning.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wunused.output b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wunused.output index 726f1dff65..8cd8758bcf 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wunused.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/compiler_warning_wunused.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make compiler_warning_unused" --output $OUTPUT$ [] - ----=================---- Found no defects in compiler_warning.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clang_tidy.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clang_tidy.output index fa6c7a9c93..94cdd24320 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clang_tidy.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clang_tidy.output @@ -41,17 +41,33 @@ std::memset(buf, 0, sizeof(BUFLEN)); // sizeof(42) ==> sizeof(int) Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 3 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clangsa.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clangsa.output index 7e970a6da4..4f2e499ca8 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clangsa.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_clangsa.output @@ -41,17 +41,33 @@ CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet -- Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 3 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clang_tidy.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clang_tidy.output index cc1aa36226..4f69d4ee55 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clang_tidy.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clang_tidy.output @@ -41,17 +41,33 @@ std::memset(buf, 0, sizeof(BUFLEN)); // sizeof(42) ==> sizeof(int) Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 3 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clangsa.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clangsa.output index 833e822cb1..2ef599e6ad 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clangsa.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_free_hash_v2_clangsa.output @@ -41,17 +41,33 @@ CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet -- Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 3 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang.output index f3cf8afae0..eb666cc8f1 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang.output @@ -41,17 +41,33 @@ CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet -- Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 3 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang_tidy.output b/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang_tidy.output index 4ee0c35ac4..753724c25a 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang_tidy.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/context_sensitive_hash_clang_tidy.output @@ -41,17 +41,33 @@ std::memset(buf, 0, sizeof(BUFLEN)); // sizeof(42) ==> sizeof(int) Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 3 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clang_tidy.output b/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clang_tidy.output index b0009c2a59..47895c580d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clang_tidy.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clang_tidy.output @@ -41,17 +41,33 @@ std::memset(buf, 0, sizeof(BUFLEN)); // sizeof(42) ==> sizeof(int) Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 3 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clangsa.output b/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clangsa.output index 2bac064ee5..f3d6cf4183 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clangsa.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/diagnostic_message_hash_clangsa.output @@ -41,17 +41,33 @@ CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet -- Found 3 defect(s) in context_hash.cpp -----==== Summary ====---- -------------------------------- -Filename | Report count -------------------------------- -context_hash.cpp | 3 -------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 3 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 3 +---------------------------- ----=================---- -Total number of reports: 3 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 3 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------ +File name | Number of reports +------------------------------------ +context_hash.cpp | 3 +------------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/macros.output b/analyzer/tests/functional/analyze_and_parse/test_files/macros.output index f8fdb65823..473269a343 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/macros.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/macros.output @@ -13,17 +13,33 @@ NORMAL#CodeChecker parse $WORKSPACE$/test_files/macros/macros.plist --print-step Found 1 defect(s) in macros.cpp -----==== Summary ====---- -------------------------- -Filename | Report count -------------------------- -macros.cpp | 1 -------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------- +core.NullDereference | HIGH | 1 +--------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------ +File name | Number of reports +------------------------------ +macros.cpp | 1 +------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en1.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en1.output index 20282f48ea..62e6c8c891 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en1.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en1.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make multi_error" --output $OUTPUT$ --quiet --a Found 1 defect(s) in multi_error.cpp -----==== Summary ====---- ------------------------------- -Filename | Report count ------------------------------- -multi_error.cpp | 1 ------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------------- +File name | Number of reports +----------------------------------- +multi_error.cpp | 1 +----------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en2.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en2.output index fa171c2858..ca4b87ed46 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en2.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en2.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make multi_error" --output $OUTPUT$ --quiet --a Found 1 defect(s) in multi_error.cpp -----==== Summary ====---- ------------------------------- -Filename | Report count ------------------------------- -multi_error.cpp | 1 ------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 1 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------------- +File name | Number of reports +----------------------------------- +multi_error.cpp | 1 +----------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en3.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en3.output index 932b63795d..635a826ccd 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en3.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.en3.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make multi_error" --output $OUTPUT$ --quiet --a Found 1 defect(s) in multi_error.cpp -----==== Summary ====---- ------------------------------- -Filename | Report count ------------------------------- -multi_error.cpp | 1 ------------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +deadcode.DeadStores | LOW | 1 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------------- +File name | Number of reports +----------------------------------- +multi_error.cpp | 1 +----------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.output index 9b1c4709f6..7beb8dd564 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.output @@ -28,18 +28,35 @@ CHECK#CodeChecker check --build "make multi_error" --output $OUTPUT$ --quiet --a Found 2 defect(s) in multi_error.cpp -----==== Summary ====---- ------------------------------- -Filename | Report count ------------------------------- -multi_error.cpp | 2 ------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +core.DivideZero | HIGH | 1 +deadcode.DeadStores | LOW | 1 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------------- +File name | Number of reports +----------------------------------- +multi_error.cpp | 2 +----------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.steps.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.steps.output index 8b87d1b894..cb687d4db8 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.steps.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error.steps.output @@ -36,18 +36,35 @@ CHECK#CodeChecker check --build "make multi_error" --output $OUTPUT$ --quiet --a Found 2 defect(s) in multi_error.cpp -----==== Summary ====---- ------------------------------- -Filename | Report count ------------------------------- -multi_error.cpp | 2 ------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +core.DivideZero | HIGH | 1 +deadcode.DeadStores | LOW | 1 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------------- +File name | Number of reports +----------------------------------- +multi_error.cpp | 2 +----------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped.output index 6c5deda807..72f992ef04 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped.output @@ -25,17 +25,33 @@ CHECK#CodeChecker check --build "make multi_error simple1" --output $OUTPUT$ --q Found 1 defect(s) in simple1.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple1.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple1.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped_in_cmd.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped_in_cmd.output index 087bf8a677..6409af2062 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped_in_cmd.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_skipped_in_cmd.output @@ -24,32 +24,48 @@ CHECK#CodeChecker check --build "make tidy_check multi_error_skipped_in_cmd" --o Found 1 defect(s) in multi_error_skipped_in_cmd.cpp -[HIGH] multi_error_skipped_in_cmd.HPP:5:3: suspicious usage of 'sizeof(K)'; did you mean 'K'? [bugprone-sizeof-expression] +[HIGH] multi_error_skipped_in_cmd.h:5:3: suspicious usage of 'sizeof(K)'; did you mean 'K'? [bugprone-sizeof-expression] sizeof(42); ^ -Found 1 defect(s) in multi_error_skipped_in_cmd.HPP +Found 1 defect(s) in multi_error_skipped_in_cmd.h -[HIGH] multi_error_skipped_in_cmd.h:5:3: suspicious usage of 'sizeof(K)'; did you mean 'K'? [bugprone-sizeof-expression] +[HIGH] multi_error_skipped_in_cmd.HPP:5:3: suspicious usage of 'sizeof(K)'; did you mean 'K'? [bugprone-sizeof-expression] sizeof(42); ^ -Found 1 defect(s) in multi_error_skipped_in_cmd.h +Found 1 defect(s) in multi_error_skipped_in_cmd.HPP -----==== Summary ====---- ---------------------------------------------- -Filename | Report count +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 3 +---------------------------- +----=================---- + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 3 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------------- +File name | Number of reports +-------------------------------------------------- +multi_error_skipped_in_cmd.HPP | 1 +multi_error_skipped_in_cmd.cpp | 1 +multi_error_skipped_in_cmd.h | 1 +-------------------------------------------------- +----=================---- + +----======== Summary ========---- --------------------------------------------- -multi_error_skipped_in_cmd.HPP | 1 -multi_error_skipped_in_cmd.cpp | 1 -multi_error_skipped_in_cmd.h | 1 +Number of processed analyzer result files | 1 +Number of analyzer reports | 3 --------------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 3 ------------------------ -----=================---- -Total number of reports: 3 ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress.output index d4597a9568..381f7b93b1 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make multi_error_suppress" --output $OUTPUT$ -- Found 1 defect(s) in multi_error_suppress.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -multi_error_suppress.cpp | 1 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +multi_error_suppress.cpp | 1 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_cstyle.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_cstyle.output index ee884fb07f..ad4a77e34d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_cstyle.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_cstyle.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make multi_error_suppress_cstyle" --output $OUT Found 1 defect(s) in multi_error_suppress_cstyle.cpp -----==== Summary ====---- +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- +----=================---- + +----==== Checker Statistics ====---- ---------------------------------------------- -Filename | Report count +Checker name | Severity | Number of reports ---------------------------------------------- -multi_error_suppress_cstyle.cpp | 1 +core.DivideZero | HIGH | 1 ---------------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ ----=================---- -Total number of reports: 1 + +----==== File Statistics ====---- +--------------------------------------------------- +File name | Number of reports +--------------------------------------------------- +multi_error_suppress_cstyle.cpp | 1 +--------------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_typo.output b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_typo.output index 62d1caf975..e9e00d49f5 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_typo.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multi_error_suppress_typo.output @@ -29,18 +29,35 @@ CHECK#CodeChecker check --build "make multi_error_suppress_typo" --output $OUTPU Found 2 defect(s) in multi_error_suppress_typo.cpp -----==== Summary ====---- --------------------------------------------- -Filename | Report count --------------------------------------------- -multi_error_suppress_typo.cpp | 2 --------------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +-------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------- +core.DivideZero | HIGH | 1 +deadcode.DeadStores | LOW | 1 +-------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------------------------- +File name | Number of reports +------------------------------------------------- +multi_error_suppress_typo.cpp | 2 +------------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/multiple_input.output b/analyzer/tests/functional/analyze_and_parse/test_files/multiple_input.output index 6030fac84c..a92e3cec8e 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/multiple_input.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/multiple_input.output @@ -13,19 +13,36 @@ int max(int a, int b) { // expected-warning{{Duplicate code detected}} Found 1 defect(s) in notes.cpp -----==== Summary ====---- -------------------------- -Filename | Report count -------------------------- -macros.cpp | 1 -notes.cpp | 1 -------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +------------------------------------------------------- +Checker name | Severity | Number of reports +------------------------------------------------------- +core.NullDereference | HIGH | 1 +alpha.clone.CloneChecker | LOW | 1 +------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------ +File name | Number of reports +------------------------------ +macros.cpp | 1 +notes.cpp | 1 +------------------------------ +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 2 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/nofail.output b/analyzer/tests/functional/analyze_and_parse/test_files/nofail.output index 73b2977260..7c93c5ad30 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/nofail.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/nofail.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make nofail" --output $OUTPUT$ --quiet --analyz [] - ----=================---- Found no defects in nofail.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/nofail.steps.output b/analyzer/tests/functional/analyze_and_parse/test_files/nofail.steps.output index e3698c2e18..cb97756bab 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/nofail.steps.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/nofail.steps.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make nofail" --output $OUTPUT$ --quiet --analyz [] - ----=================---- Found no defects in nofail.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/notes.output b/analyzer/tests/functional/analyze_and_parse/test_files/notes.output index 7932d87b50..22307377bd 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/notes.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/notes.output @@ -12,17 +12,33 @@ int max(int a, int b) { // expected-warning{{Duplicate code detected}} Found 1 defect(s) in notes.cpp -----==== Summary ====---- ------------------------- -Filename | Report count ------------------------- -notes.cpp | 1 ------------------------- ------------------------ -Severity | Report count ------------------------ -LOW | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +LOW | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +------------------------------------------------------- +Checker name | Severity | Number of reports +------------------------------------------------------- +alpha.clone.CloneChecker | LOW | 1 +------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +----------------------------- +File name | Number of reports +----------------------------- +notes.cpp | 1 +----------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.noforward.output b/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.noforward.output index 6621f55020..0a305e7b8d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.noforward.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.noforward.output @@ -19,7 +19,9 @@ CHECK#CodeChecker check --build "make saargs_forward" --output $OUTPUT$ --quiet [] - ----=================---- Found no defects in saargs_forward.cpp -----==== Summary ====---- -----=================---- -Total number of reports: 0 +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 0 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.output b/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.output index cf0a8faddc..3901f4b29e 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/saargs_forward.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make saargs_forward" --output $OUTPUT$ --quiet Found 1 defect(s) in saargs_forward.cpp -----==== Summary ====---- ---------------------------------- -Filename | Report count ---------------------------------- -saargs_forward.cpp | 1 ---------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------- +core.NullDereference | HIGH | 1 +--------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------- +File name | Number of reports +-------------------------------------- +saargs_forward.cpp | 1 +-------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.deduplication.output b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.deduplication.output index 92c45e5303..3095094c9c 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.deduplication.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.deduplication.output @@ -25,17 +25,33 @@ CHECK#CodeChecker check --build "make deduplication" --output $OUTPUT$ --quiet - Found 1 defect(s) in simple1.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple1.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple1.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 2 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.output b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.output index b634712fb2..a32fcbee79 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make simple1" --output $OUTPUT$ --quiet --analy Found 1 defect(s) in simple1.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple1.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple1.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.steps.output b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.steps.output index b82adcf824..ad1a7ca82d 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/simple1.steps.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/simple1.steps.output @@ -33,17 +33,33 @@ CHECK#CodeChecker check --build "make simple1" --output $OUTPUT$ --quiet --analy Found 1 defect(s) in simple1.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple1.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple1.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/simple2.output b/analyzer/tests/functional/analyze_and_parse/test_files/simple2.output index 18550c9d3d..1dce8ceba9 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/simple2.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/simple2.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make simple2" --output $OUTPUT$ --quiet --analy Found 1 defect(s) in simple2.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple2.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple2.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/simple2.steps.output b/analyzer/tests/functional/analyze_and_parse/test_files/simple2.steps.output index f3658e1951..3fbc87065b 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/simple2.steps.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/simple2.steps.output @@ -33,17 +33,33 @@ CHECK#CodeChecker check --build "make simple2" --output $OUTPUT$ --quiet --analy Found 1 defect(s) in simple2.cpp -----==== Summary ====---- --------------------------- -Filename | Report count --------------------------- -simple2.cpp | 1 --------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +---------------------------------------------- +Checker name | Severity | Number of reports +---------------------------------------------- +core.DivideZero | HIGH | 1 +---------------------------------------------- +----=================---- + +----==== File Statistics ====---- +------------------------------- +File name | Number of reports +------------------------------- +simple2.cpp | 1 +------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments.output b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments.output index 869f11aabf..39f76537ed 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments.output @@ -29,17 +29,33 @@ CHECK#CodeChecker check --build "make source_code_comments" --output $OUTPUT$ -- Found 2 defect(s) in source_code_comments.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -source_code_comments.cpp | 2 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 2 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 2 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 2 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +source_code_comments.cpp | 2 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all.output b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all.output index 8466e133be..2a59853c90 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all.output @@ -44,17 +44,33 @@ CHECK#CodeChecker check --build "make source_code_comments" --output $OUTPUT$ -- Found 5 defect(s) in source_code_comments.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -source_code_comments.cpp | 5 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 5 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 5 +---------------------------- ----=================---- -Total number of reports: 5 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 5 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +source_code_comments.cpp | 5 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 5 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all_empty_filter.output b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all_empty_filter.output index 2e37c4db09..c9a2c8bd49 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all_empty_filter.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_all_empty_filter.output @@ -44,17 +44,33 @@ CHECK#CodeChecker check --build "make source_code_comments" --output $OUTPUT$ -- Found 5 defect(s) in source_code_comments.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -source_code_comments.cpp | 5 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 5 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 5 +---------------------------- ----=================---- -Total number of reports: 5 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 5 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +source_code_comments.cpp | 5 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 5 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_confirmed.output b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_confirmed.output index a59612e4ba..785f76023e 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_confirmed.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_confirmed.output @@ -25,17 +25,33 @@ CHECK#CodeChecker check --build "make source_code_comments" --output $OUTPUT$ -- Found 1 defect(s) in source_code_comments.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -source_code_comments.cpp | 1 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 1 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +source_code_comments.cpp | 1 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_false_positive.output b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_false_positive.output index d1d4fa2d27..356134b499 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_false_positive.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/source_code_comments_false_positive.output @@ -30,17 +30,33 @@ CHECK#CodeChecker check --build "make source_code_comments" --output $OUTPUT$ -- Found 2 defect(s) in source_code_comments.cpp -----==== Summary ====---- ---------------------------------------- -Filename | Report count ---------------------------------------- -source_code_comments.cpp | 2 ---------------------------------------- ------------------------ -Severity | Report count ------------------------ -HIGH | 2 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 2 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 2 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +-------------------------------------------- +File name | Number of reports +-------------------------------------------- +source_code_comments.cpp | 2 +-------------------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/tidy_alias.output b/analyzer/tests/functional/analyze_and_parse/test_files/tidy_alias.output index 0d71f4804c..1f04e77d3f 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/tidy_alias.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/tidy_alias.output @@ -28,17 +28,34 @@ CHECK#CodeChecker check --build "make tidy_alias" --output $OUTPUT$ --quiet --an Found 2 defect(s) in tidy_alias.cpp -----==== Summary ====---- ------------------------------ -Filename | Report count ------------------------------ -tidy_alias.cpp | 2 ------------------------------ ------------------------ -Severity | Report count ------------------------ -STYLE | 2 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +STYLE | 2 +---------------------------- ----=================---- -Total number of reports: 2 + +----==== Checker Statistics ====---- +-------------------------------------------------------------------- +Checker name | Severity | Number of reports +-------------------------------------------------------------------- +cppcoreguidelines-avoid-magic-numbers | STYLE | 1 +readability-magic-numbers | STYLE | 1 +-------------------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +---------------------------------- +File name | Number of reports +---------------------------------- +tidy_alias.cpp | 2 +---------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 2 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/functional/analyze_and_parse/test_files/tidy_check.output b/analyzer/tests/functional/analyze_and_parse/test_files/tidy_check.output index b0aa11f148..880f74148f 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_files/tidy_check.output +++ b/analyzer/tests/functional/analyze_and_parse/test_files/tidy_check.output @@ -24,17 +24,33 @@ CHECK#CodeChecker check --build "make tidy_check" --output $OUTPUT$ --quiet --an Found 1 defect(s) in tidy_check.cpp -----==== Summary ====---- ------------------------------ -Filename | Report count ------------------------------ -tidy_check.cpp | 1 ------------------------------ ------------------------ -Severity | Report count ------------------------ -HIGH | 1 ------------------------ +----==== Severity Statistics ====---- +---------------------------- +Severity | Number of reports +---------------------------- +HIGH | 1 +---------------------------- ----=================---- -Total number of reports: 1 + +----==== Checker Statistics ====---- +--------------------------------------------------------- +Checker name | Severity | Number of reports +--------------------------------------------------------- +bugprone-sizeof-expression | HIGH | 1 +--------------------------------------------------------- +----=================---- + +----==== File Statistics ====---- +---------------------------------- +File name | Number of reports +---------------------------------- +tidy_check.cpp | 1 +---------------------------------- +----=================---- + +----======== Summary ========---- +--------------------------------------------- +Number of processed analyzer result files | 1 +Number of analyzer reports | 1 +--------------------------------------------- ----=================---- diff --git a/analyzer/tests/unit/__init__.py b/analyzer/tests/unit/__init__.py index 5e173885e2..e9961e3287 100644 --- a/analyzer/tests/unit/__init__.py +++ b/analyzer/tests/unit/__init__.py @@ -17,6 +17,6 @@ PKG_ROOT = os.path.join(REPO_ROOT, 'build', 'CodeChecker') sys.path.append(REPO_ROOT) -sys.path.append(os.path.join(REPO_ROOT, 'tools', 'codechecker_report_hash')) -sys.path.append(os.path.join(REPO_ROOT, 'analyzer', 'tools', - 'statistics_collector')) +sys.path.append(os.path.join( + REPO_ROOT, 'analyzer', 'tools', 'statistics_collector')) +sys.path.append(os.path.join(REPO_ROOT, 'tools', 'report-converter')) diff --git a/analyzer/tests/unit/test_log_parser.py b/analyzer/tests/unit/test_log_parser.py index 447b97f989..ee7185ff22 100644 --- a/analyzer/tests/unit/test_log_parser.py +++ b/analyzer/tests/unit/test_log_parser.py @@ -15,8 +15,9 @@ import tempfile import unittest +from codechecker_report_converter.util import load_json_or_empty + from codechecker_analyzer.buildlog import log_parser -from codechecker_common.util import load_json_or_empty from codechecker_common import skiplist_handler diff --git a/analyzer/tests/unit/test_remove_report_from_plist.py b/analyzer/tests/unit/test_remove_report_from_plist.py index 652cf2cec2..7a22dede0f 100644 --- a/analyzer/tests/unit/test_remove_report_from_plist.py +++ b/analyzer/tests/unit/test_remove_report_from_plist.py @@ -11,8 +11,11 @@ import os import unittest -from codechecker_common import skiplist_handler -from codechecker_common.plist_parser import remove_report_from_plist +from codechecker_report_converter.report import report_file, \ + reports as reports_handler + +from codechecker_common.skiplist_handler import SkipListHandler + OLD_PWD = None @@ -34,44 +37,43 @@ def teardown_module(): class TestRemoveReportFromPlist(unittest.TestCase): """ Test skipping header files. """ + def __test_skip_reports( + self, + plist_file_path: str, + expected_plist_file_path: str, + skip_handler: SkipListHandler + ): + """ Test skipping reports from a plist file. """ + reports = report_file.get_reports(plist_file_path) + reports = reports_handler.skip(reports, skip_handler=skip_handler) + + expected_reports = report_file.get_reports(expected_plist_file_path) + + self.assertEqual(reports, expected_reports) + def test_skip_x_header(self): """ Test skipping a header file. """ with open('skip_x_header.txt', encoding="utf-8", errors="ignore") as skip_file: - skip_handler = skiplist_handler.SkipListHandler(skip_file.read()) - - with open('x.plist', 'rb') as plist_data: - data = remove_report_from_plist(plist_data, skip_handler) + skip_handler = SkipListHandler(skip_file.read()) - with open('skip_x_header.expected.plist', 'rb') as plist_file: - expected = plist_file.read() - - self.assertEqual(data, expected) + self.__test_skip_reports( + 'x.plist', 'skip_x_header.expected.plist', skip_handler) def test_skip_all_header(self): """ Test skipping all header files. """ with open('skip_all_header.txt', encoding="utf-8", errors="ignore") as skip_file: - skip_handler = skiplist_handler.SkipListHandler(skip_file.read()) - - with open('x.plist', 'rb') as plist_data: - data = remove_report_from_plist(plist_data, skip_handler) - - with open('skip_all_header.expected.plist', 'rb') as plist_file: - expected = plist_file.read() + skip_handler = SkipListHandler(skip_file.read()) - self.assertEqual(data, expected) + self.__test_skip_reports( + 'x.plist', 'skip_all_header.expected.plist', skip_handler) def test_keep_only_empty(self): """ Test skipping all files except empty. """ with open('keep_only_empty.txt', encoding="utf-8", errors="ignore") as skip_file: - skip_handler = skiplist_handler.SkipListHandler(skip_file.read()) - - with open('x.plist', 'rb') as plist_data: - data = remove_report_from_plist(plist_data, skip_handler) - - with open('keep_only_empty.expected.plist', 'rb') as plist_file: - expected = plist_file.read() + skip_handler = SkipListHandler(skip_file.read()) - self.assertEqual(data, expected) + self.__test_skip_reports( + 'x.plist', 'keep_only_empty.expected.plist', skip_handler) diff --git a/analyzer/tests/unit/test_tidy_output_converter.py b/analyzer/tests/unit/test_tidy_output_converter.py deleted file mode 100644 index 725f10f841..0000000000 --- a/analyzer/tests/unit/test_tidy_output_converter.py +++ /dev/null @@ -1,425 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform a Clang Tidy output file to a plist file. -""" -import copy -import os -import unittest - -import io - -import codechecker_analyzer.analyzers.clangtidy.output_converter as \ - tidy_out_conv - -OLD_PWD = None - - -def setup_module(): - """Setup the test tidy reprs for the test classes in the module.""" - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), 'tidy_output_test_files')) - - # tidy1.out Message/Note representation - tidy1_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test.cpp'), - 8, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - None, - [tidy_out_conv.Note( - os.path.abspath('files/test.cpp'), - 8, 12, - 'Division by zero')]), - tidy_out_conv.Message( - os.path.abspath('files/test.cpp'), - 8, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy2.out Message/Note representation - tidy2_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test2.cpp'), - 5, 7, - "unused variable 'y'", - 'clang-diagnostic-unused-variable'), - tidy_out_conv.Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - None, - [ - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 9, 7, - "Left side of '||' is false"), - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 9, 3, - 'Taking false branch'), - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero') - ]), - tidy_out_conv.Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero'), - ] - - # tidy2_v6.out Message/Note representation - tidy2_v6_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - None, - [ - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 9, 7, - "Left side of '||' is false"), - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 9, 16, - "Assuming 'x' is 0"), - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 9, 3, - 'Taking false branch'), - tidy_out_conv.Note( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero') - ]), - tidy_out_conv.Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero'), - ] - - # tidy3.out Message/Note representation - tidy3_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test3.cpp'), - 4, 12, - 'use nullptr', - 'modernize-use-nullptr', - [tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 4, 12, - 'nullptr')]), - tidy_out_conv.Message( - os.path.abspath('files/test3.hh'), - 6, 6, - "Dereference of null pointer (loaded from variable 'x')", - 'clang-analyzer-core.NullDereference', - None, - [ - tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 4, 3, - "'x' initialized to a null pointer value"), - tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 6, 11, - "Assuming 'argc' is > 3"), - tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 6, 3, - 'Taking true branch'), - tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 7, 9, - "Passing null pointer value via 1st parameter 'x'"), - tidy_out_conv.Note( - os.path.abspath('files/test3.cpp'), - 7, 5, - "Calling 'bar'"), - tidy_out_conv.Note( - os.path.abspath('files/test3.hh'), - 6, 6, - "Dereference of null pointer (loaded from variable 'x')") - ]) - ] - - # tidy5.out Message/Note representation - tidy5_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test4.cpp'), - 3, 26, - 'identifier after literal will be treated ' - 'as a reserved user-defined literal suffix in C++11', - 'clang-diagnostic-c++11-compat-reserved-user-defined-literal', - None, None), - tidy_out_conv.Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - None, - [tidy_out_conv.Note( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'Division by zero')]), - tidy_out_conv.Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy5_v6.out Message/Note representation - tidy5_v6_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test4.cpp'), - 3, 26, - 'invalid suffix on literal; C++11 requires a space ' - 'between literal and identifier', - 'clang-diagnostic-reserved-user-defined-literal', - None, None), - tidy_out_conv.Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy6.out Message/Note representation - tidy6_repr = [ - tidy_out_conv.Message( - os.path.abspath('files/test5.cpp'), - 10, 9, - 'no matching function for call to \'get_type\'', - 'clang-diagnostic-error', - None, - [ - tidy_out_conv.Note( - os.path.abspath('files/test5.cpp'), - 2, 18, - 'candidate template ignored: substitution failure ' - '[with T = int *]: type \'int *\' cannot be used prior to ' - '\'::\' because it has no members'), - tidy_out_conv.Note( - os.path.abspath('files/test5.cpp'), - 5, 6, - 'candidate template ignored: substitution failure ' - '[with T = int]: array size is negative'), - ] - )] - - TidyOutputParserTestCase.tidy1_repr = tidy1_repr - TidyOutputParserTestCase.tidy2_repr = tidy2_repr - TidyOutputParserTestCase.tidy2_v6_repr = tidy2_v6_repr - TidyOutputParserTestCase.tidy3_repr = tidy3_repr - TidyOutputParserTestCase.tidy5_repr = tidy5_repr - TidyOutputParserTestCase.tidy6_repr = tidy6_repr - TidyOutputParserTestCase.tidy5_v6_repr = tidy5_v6_repr - TidyPListConverterTestCase.tidy1_repr = tidy1_repr - TidyPListConverterTestCase.tidy2_repr = tidy2_repr - TidyPListConverterTestCase.tidy3_repr = tidy3_repr - - -def teardown_module(): - """Restore environment after tests have ran.""" - global OLD_PWD - os.chdir(OLD_PWD) - - -class TidyOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts a Clang Tidy output - file to zero or more tidy_output_converter.Message. - """ - - def setUp(self): - """Setup the OutputParser.""" - self.parser = tidy_out_conv.OutputParser() - - def test_absolute_path(self): - """Test for absolute paths in Messages.""" - for tfile in ['abs.out', 'tidy1.out']: - messages = self.parser.parse_messages_from_file(tfile) - self.assertNotEqual(len(messages), 0) - for message in messages: - self.assertTrue(os.path.isabs(message.path)) - - def test_empty1(self): - """Test an empty ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('empty1.out') - self.assertEqual(messages, []) - - def test_empty2(self): - """Test a ClangTidy output file that only contains empty lines.""" - messages = self.parser.parse_messages_from_file('empty2.out') - self.assertEqual(messages, []) - - def test_tidy1(self): - """Test the generated Messages of tidy1.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy1.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy1_v6(self): - """Test the generated Messages of tidy1.out ClangTidy v6 output - file.""" - messages = self.parser.parse_messages_from_file('tidy1_v6.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy2(self): - """Test the generated Messages of tidy2.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy2.out') - self.assertEqual(len(messages), len(self.tidy2_repr)) - for message in messages: - self.assertIn(message, self.tidy2_repr) - - def test_tidy2_v6(self): - """Test the generated Messages of tidy2.out ClangTidy v6 output - file.""" - messages = self.parser.parse_messages_from_file('tidy2_v6.out') - self.assertEqual(len(messages), len(self.tidy2_v6_repr)) - for message in messages: - self.assertIn(message, self.tidy2_v6_repr) - - def test_tidy3(self): - """Test the generated Messages of tidy3.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy3.out') - self.assertEqual(len(messages), len(self.tidy3_repr)) - for message in messages: - self.assertIn(message, self.tidy3_repr) - - def test_tidy4(self): - """ - Test the generated Messages of tidy4.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy4.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy5(self): - """ - Test the grenerated Messages of tidy5.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy5.out') - for message in messages: - self.assertIn(message, self.tidy5_repr) - - def test_tidy5_v6(self): - """ - Test the grenerated Messages of tidy5_v6.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy5_v6.out') - for message in messages: - self.assertIn(message, self.tidy5_v6_repr) - - def test_tidy6(self): - """ - Test the generated Messages of tidy6.out ClangTidy output file. - """ - messages = self.parser.parse_messages_from_file('tidy6.out') - for message in messages: - self.assertIn(message, self.tidy6_repr) - - -class TidyPListConverterTestCase(unittest.TestCase): - """ - Test the output of the PListConverter, which converts Messages to plist - format. - """ - - def setUp(self): - """Setup the PListConverter.""" - self.plist_conv = tidy_out_conv.PListConverter() - - def test_empty(self): - """Test for empty Messages.""" - orig_plist = copy.deepcopy(self.plist_conv.plist) - - self.plist_conv.add_messages([]) - self.assertDictEqual(orig_plist, self.plist_conv.plist) - - output = io.BytesIO() - self.plist_conv.write(output) - - with open('empty.plist', 'rb') as pfile: - exp = pfile.read() - print(exp.decode('utf-8')) - print(output.getvalue().decode('utf-8')) - self.assertEqual(exp, output.getvalue()) - - output.close() - - def test_tidy1(self): - """Test for the tidy1.plist file.""" - self.plist_conv.add_messages(self.tidy1_repr) - - # use relative path for this test - self.plist_conv.plist['files'][0] = 'files/test.cpp' - - output = io.BytesIO() - self.plist_conv.write(output) - - with open('tidy1.plist', 'rb') as pfile: - exp = pfile.read() - self.assertEqual(exp, output.getvalue()) - - output.close() - - def test_tidy2(self): - """Test for the tidy2.plist file.""" - self.plist_conv.add_messages(self.tidy2_repr) - - # use relative path for this test - self.plist_conv.plist['files'][0] = 'files/test2.cpp' - - output = io.BytesIO() - self.plist_conv.write(output) - - with open('tidy2.plist', 'rb') as pfile: - exp = pfile.read() - self.assertEqual(exp, output.getvalue()) - - output.close() - - def test_tidy3(self): - """Test for the tidy3.plist file.""" - self.plist_conv.add_messages(self.tidy3_repr) - - # use relative path for this test - self.plist_conv.plist['files'][0] = 'files/test3.cpp' - self.plist_conv.plist['files'][1] = 'files/test3.hh' - - output = io.BytesIO() - self.plist_conv.write(output) - - with open('tidy3.plist', 'rb') as pfile: - exp = pfile.read() - self.assertEqual(exp, output.getvalue()) - - output.close() diff --git a/analyzer/tests/unit/tidy_output_test_files/empty.plist b/analyzer/tests/unit/tidy_output_test_files/empty.plist deleted file mode 100644 index 1c2afac6f6..0000000000 --- a/analyzer/tests/unit/tidy_output_test_files/empty.plist +++ /dev/null @@ -1,10 +0,0 @@ - - - - - diagnostics - - files - - - diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy1.plist b/analyzer/tests/unit/tidy_output_test_files/tidy1.plist deleted file mode 100644 index c7af19b172..0000000000 --- a/analyzer/tests/unit/tidy_output_test_files/tidy1.plist +++ /dev/null @@ -1,112 +0,0 @@ - - - - - diagnostics - - - category - clang - check_name - clang-analyzer-core.DivideZero - description - Division by zero - issue_hash_content_of_line_in_context - 79cc349883cacbd7d2a4301ddb79bc68 - location - - col - 12 - file - 0 - line - 8 - - path - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 8 - - message - Division by zero - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 8 - - message - Division by zero - - - type - clang-tidy - - - category - clang - check_name - clang-diagnostic-division-by-zero - description - remainder by zero is undefined - issue_hash_content_of_line_in_context - 9785900b66c87f89edcc9daaab59647a - location - - col - 12 - file - 0 - line - 8 - - path - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 8 - - message - remainder by zero is undefined - - - type - clang-tidy - - - files - - files/test.cpp - - - diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy2.plist b/analyzer/tests/unit/tidy_output_test_files/tidy2.plist deleted file mode 100644 index bf2c7dcb46..0000000000 --- a/analyzer/tests/unit/tidy_output_test_files/tidy2.plist +++ /dev/null @@ -1,274 +0,0 @@ - - - - - diagnostics - - - category - clang - check_name - clang-diagnostic-unused-variable - description - unused variable 'y' - issue_hash_content_of_line_in_context - 0afd6d1f5bf7f1856d9f01e4ac92534e - location - - col - 7 - file - 0 - line - 5 - - path - - - depth - 0 - kind - event - location - - col - 7 - file - 0 - line - 5 - - message - unused variable 'y' - - - type - clang-tidy - - - category - clang - check_name - clang-analyzer-core.DivideZero - description - Division by zero - issue_hash_content_of_line_in_context - 7cc9ef198741b4dff762b1a8578b4546 - location - - col - 12 - file - 0 - line - 13 - - path - - - depth - 0 - kind - event - location - - col - 7 - file - 0 - line - 9 - - message - Left side of '||' is false - - - depth - 0 - kind - event - location - - col - 3 - file - 0 - line - 9 - - message - Taking false branch - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 13 - - message - Division by zero - - - edges - - - end - - - col - 3 - file - 0 - line - 9 - - - col - 3 - file - 0 - line - 9 - - - start - - - col - 7 - file - 0 - line - 9 - - - col - 7 - file - 0 - line - 9 - - - - - end - - - col - 12 - file - 0 - line - 13 - - - col - 12 - file - 0 - line - 13 - - - start - - - col - 3 - file - 0 - line - 9 - - - col - 3 - file - 0 - line - 9 - - - - - kind - control - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 13 - - message - Division by zero - - - type - clang-tidy - - - category - clang - check_name - clang-diagnostic-division-by-zero - description - remainder by zero is undefined - issue_hash_content_of_line_in_context - cac9b3630bea66ab98dfefacc2a8f6d0 - location - - col - 12 - file - 0 - line - 13 - - path - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 13 - - message - remainder by zero is undefined - - - type - clang-tidy - - - files - - files/test2.cpp - - - diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy3.plist b/analyzer/tests/unit/tidy_output_test_files/tidy3.plist deleted file mode 100644 index e9753e0798..0000000000 --- a/analyzer/tests/unit/tidy_output_test_files/tidy3.plist +++ /dev/null @@ -1,422 +0,0 @@ - - - - - diagnostics - - - category - modernize - check_name - modernize-use-nullptr - description - use nullptr - issue_hash_content_of_line_in_context - 881bd0b5111814b3eaab59f4229eccba - location - - col - 12 - file - 0 - line - 4 - - path - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 4 - - message - nullptr (fixit) - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 4 - - message - use nullptr - - - type - clang-tidy - - - category - clang - check_name - clang-analyzer-core.NullDereference - description - Dereference of null pointer (loaded from variable 'x') - issue_hash_content_of_line_in_context - 42a599e2e32611711ec3ffce39cd61ce - location - - col - 6 - file - 1 - line - 6 - - path - - - depth - 0 - kind - event - location - - col - 3 - file - 0 - line - 4 - - message - 'x' initialized to a null pointer value - - - depth - 0 - kind - event - location - - col - 11 - file - 0 - line - 6 - - message - Assuming 'argc' is > 3 - - - depth - 0 - kind - event - location - - col - 3 - file - 0 - line - 6 - - message - Taking true branch - - - depth - 0 - kind - event - location - - col - 9 - file - 0 - line - 7 - - message - Passing null pointer value via 1st parameter 'x' - - - depth - 0 - kind - event - location - - col - 5 - file - 0 - line - 7 - - message - Calling 'bar' - - - depth - 0 - kind - event - location - - col - 6 - file - 1 - line - 6 - - message - Dereference of null pointer (loaded from variable 'x') - - - edges - - - end - - - col - 11 - file - 0 - line - 6 - - - col - 11 - file - 0 - line - 6 - - - start - - - col - 3 - file - 0 - line - 4 - - - col - 3 - file - 0 - line - 4 - - - - - end - - - col - 3 - file - 0 - line - 6 - - - col - 3 - file - 0 - line - 6 - - - start - - - col - 11 - file - 0 - line - 6 - - - col - 11 - file - 0 - line - 6 - - - - - end - - - col - 9 - file - 0 - line - 7 - - - col - 9 - file - 0 - line - 7 - - - start - - - col - 3 - file - 0 - line - 6 - - - col - 3 - file - 0 - line - 6 - - - - - end - - - col - 5 - file - 0 - line - 7 - - - col - 5 - file - 0 - line - 7 - - - start - - - col - 9 - file - 0 - line - 7 - - - col - 9 - file - 0 - line - 7 - - - - - end - - - col - 6 - file - 1 - line - 6 - - - col - 6 - file - 1 - line - 6 - - - start - - - col - 5 - file - 0 - line - 7 - - - col - 5 - file - 0 - line - 7 - - - - - kind - control - - - depth - 0 - kind - event - location - - col - 6 - file - 1 - line - 6 - - message - Dereference of null pointer (loaded from variable 'x') - - - type - clang-tidy - - - files - - files/test3.cpp - files/test3.hh - - - diff --git a/analyzer/tools/merge_clang_extdef_mappings/README.md b/analyzer/tools/merge_clang_extdef_mappings/README.md index 878fe2046f..a9206eeed9 100644 --- a/analyzer/tools/merge_clang_extdef_mappings/README.md +++ b/analyzer/tools/merge_clang_extdef_mappings/README.md @@ -17,7 +17,7 @@ tool into a global one. make venv source $PWD/venv/bin/activate -# Build and install plist-to-html package. +# Build and install merge-clang-extdef-mappings package. make package ``` diff --git a/analyzer/tools/statistics_collector/README.md b/analyzer/tools/statistics_collector/README.md index 2bffcecd33..0a533fcf04 100644 --- a/analyzer/tools/statistics_collector/README.md +++ b/analyzer/tools/statistics_collector/README.md @@ -11,7 +11,7 @@ can be parsed by statistics checkers. make venv source $PWD/venv/bin/activate -# Build and install plist-to-html package. +# Build and install post-process-stats package. make package ``` diff --git a/codechecker_common/checker_labels.py b/codechecker_common/checker_labels.py index ff644b9872..6093ceb222 100644 --- a/codechecker_common/checker_labels.py +++ b/codechecker_common/checker_labels.py @@ -1,8 +1,10 @@ import os from collections import defaultdict -from typing import Any, DefaultDict, Dict, Iterable, List, Optional, Set, \ - Tuple, Union, cast -from codechecker_common.util import load_json_or_empty + +from typing import Any, cast, DefaultDict, Dict, Iterable, List, Optional, \ + Set, Tuple, Union + +from codechecker_report_converter.util import load_json_or_empty # TODO: Most of the methods of this class get an optional analyzer name. If diff --git a/codechecker_common/cmd_config.py b/codechecker_common/cmd_config.py index a625bd0be0..fea12413b9 100644 --- a/codechecker_common/cmd_config.py +++ b/codechecker_common/cmd_config.py @@ -9,7 +9,8 @@ from typing import List -from codechecker_common.util import load_json_or_empty +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common import logger LOG = logger.get_logger('system') diff --git a/codechecker_common/logger.py b/codechecker_common/logger.py index 36a7d60612..d1d733b68d 100644 --- a/codechecker_common/logger.py +++ b/codechecker_common/logger.py @@ -27,7 +27,7 @@ CMDLINE_LOG_LEVELS = ['info', 'debug_analyzer', 'debug'] -DEBUG_ANALYZER = logging.DEBUG_ANALYZER = 15 +DEBUG_ANALYZER = logging.DEBUG_ANALYZER = 15 # type: ignore logging.addLevelName(DEBUG_ANALYZER, 'DEBUG_ANALYZER') diff --git a/codechecker_common/output/json.py b/codechecker_common/output/json.py deleted file mode 100644 index c602d49568..0000000000 --- a/codechecker_common/output/json.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -"""Helper and converter functions for json output format.""" - -from typing import Dict -from codechecker_common.report import Report - - -def convert_to_parse(report: Report) -> Dict: - """Converts to a special json format for the parse command. - - This format is used by the parse command when the reports are printed - to the stdout in json format. - """ - ret = report.main - ret["path"] = report.bug_path - ret["files"] = [v for k, v in report.files.items()] - - return ret diff --git a/codechecker_common/plist_parser.py b/codechecker_common/plist_parser.py deleted file mode 100644 index b2d57bcc98..0000000000 --- a/codechecker_common/plist_parser.py +++ /dev/null @@ -1,375 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" -Parse the plist output of an analyzer and convert it to a report for -further processing. - -With the newer clang releases more information is available in the plist files. - -* Before Clang v3.7: - - Checker name is misssing (tried to detect based on the description) - - Report hash is not avilable (generated based on the report path elemens - see report handling and plist parsing modules for more details. - -* Clang v3.7: - - Checker name is available in the plist - - Report hash is still missing (hash is generated as before) - -* After Clang v3.8: - - Checker name is available - - Report hash is available - -* Clang-tidy: - - No plist format is provided in the available releases (v3.9 and before) - - Checker name can be parsed from the output - - Report hash is generated based on the report path elements the same way as - for Clang versions before v3.7 - -""" -import importlib -import os -import sys -import traceback -import plistlib -from typing import List, Dict, Tuple -from xml.parsers.expat import ExpatError - -from codechecker_common.logger import get_logger -from codechecker_common.report import Report -from codechecker_report_hash.hash import get_report_hash, HashType - -LOG = get_logger('report') - - -class LXMLPlistEventHandler: - """ - Basic lxml event handler. - """ - def start(self, tag, attrib): - pass - - def end(self, tag): - pass - - def data(self, data): - pass - - def comment(self, text): - pass - - def close(self): - return "closed!" - - -class LXMLPlistParser(plistlib._PlistParser): - """ - Plist parser which uses the lxml library to parse XML data. - - The benefit of this library that this is faster than other libraries so it - will improve the performance of the plist parsing. - """ - def __init__(self, dict_type=dict): - # Since Python 3.9 plistlib._PlistParser.__init__ has changed: - # https://github.com/python/cpython/commit/ce81a925ef - # To be backward compatible with old interpreters we need to call this - # function based on conditions: - params = plistlib._PlistParser.__init__.__code__.co_varnames - if len(params) == 3 and "use_builtin_types" in params: - # Before 3.9 interpreter. - plistlib._PlistParser.__init__(self, True, dict_type) - else: - plistlib._PlistParser.__init__( # pylint: disable=E1120 - self, dict_type) - - self.event_handler = LXMLPlistEventHandler() - self.event_handler.start = self.handle_begin_element - self.event_handler.end = self.handle_end_element - self.event_handler.data = self.handle_data - - from lxml.etree import XMLParser - self.parser = XMLParser(target=self.event_handler) - - def parse(self, fileobj): - from lxml.etree import parse, XMLSyntaxError - - try: - parse(fileobj, self.parser) - except XMLSyntaxError as ex: - LOG.error("Invalid plist file '%s': %s", fileobj.name, ex) - return - - return self.root - - -def parse_plist(plist_file_obj): - """ - Read a .plist file. Return the unpacked root object (which usually is a - dictionary). - - Use 'lxml' library to read the given plist file if it is available, - otherwise use 'plistlib' library. - """ - try: - importlib.import_module('lxml') - parser = LXMLPlistParser() - return parser.parse(plist_file_obj) - except (ExpatError, TypeError, AttributeError) as err: - LOG.warning('Invalid plist file') - LOG.warning(err) - return - except ImportError: - LOG.debug("lxml library is not available. Use plistlib to parse plist " - "files.") - - try: - return plistlib.load(plist_file_obj) - except (ExpatError, TypeError, AttributeError, ValueError, - plistlib.InvalidFileException) as err: - LOG.warning('Invalid plist file') - LOG.warning(err) - return - - -def get_checker_name(diagnostic, path=""): - """ - Check if checker name is available in the report. - Checker name was not available in older clang versions before 3.7. - """ - checker_name = diagnostic.get('check_name') - if not checker_name: - LOG.warning("Check name wasn't found in the plist file '%s'. It is " - "available since 'Clang v3.7'.", path) - checker_name = "unknown" - return checker_name - - -def parse_plist_file(path: str, - allow_plist_update=True) \ - -> Tuple[Dict[int, str], List[Report]]: - """ - Parse the reports from a plist file. - One plist file can contain multiple reports. - """ - LOG.debug("Parsing plist: %s", path) - - reports = [] - source_files = {} - - try: - plist = None - with open(path, 'rb') as plist_file_obj: - plist = parse_plist(plist_file_obj) - - if not plist: - LOG.error("Failed to parse plist %s", path) - return {}, [] - - metadata = plist.get('metadata') - - mentioned_files = plist.get('files', []) - - # file index to filepath that bugpath events refer to - source_files = \ - {i: filepath for i, filepath in enumerate(mentioned_files)} - diag_changed = False - for diag in plist.get('diagnostics', []): - - available_keys = list(diag.keys()) - - main_section = {} - for key in available_keys: - # Skip path it is handled separately. - if key != 'path': - main_section.update({key: diag[key]}) - - # We need to extend information for plist files generated - # by older clang version (before 3.7). - main_section['check_name'] = get_checker_name(diag, path) - - report_hash = diag.get('issue_hash_content_of_line_in_context') - - if not report_hash: - file_path = os.path.join( - os.path.dirname(path), - mentioned_files[diag['location']['file']]) - - # Generate hash value if it is missing from the report. - report_hash = get_report_hash(diag, file_path, - HashType.PATH_SENSITIVE) - - main_section['issue_hash_content_of_line_in_context'] = \ - report_hash - - if 'issue_hash_content_of_line_in_context' not in diag: - # If the report hash was not in the plist, we set it in the - # diagnostic section for later update. - diag['issue_hash_content_of_line_in_context'] = report_hash - diag_changed = True - - bug_path_items = [item for item in diag['path']] - reports.append(Report(main_section, - bug_path_items, - source_files, - metadata)) - - if diag_changed and allow_plist_update: - # If the diagnostic section has changed we update the plist file. - # This way the client will always send a plist file where the - # report hash field is filled. - with open(path, 'wb') as plist_file: - plistlib.dump(plist, plist_file) - except IndexError as iex: - LOG.warning('Indexing error during processing plist file %s', path) - LOG.warning(type(iex)) - LOG.warning(repr(iex)) - _, _, exc_traceback = sys.exc_info() - traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) - except Exception as ex: - LOG.warning('Error during processing reports from the plist file: %s', - path) - traceback.print_exc() - LOG.warning(type(ex)) - LOG.warning(ex) - finally: - return source_files, reports - - -def fids_in_range(rng): - """ - Get the file ids from a range. - """ - fids = [] - for r in rng: - for line in r: - fids.append(line['file']) - return fids - - -def fids_in_edge(edges): - """ - Get the file ids from an edge. - """ - fids = [] - for e in edges: - start = e['start'] - end = e['end'] - for line in start: - fids.append(line['file']) - for line in end: - fids.append(line['file']) - return fids - - -def transform_diag_element(element, file_ids_to_remove, new_file_ids): - """ - This function will update every file attribute of the given diagnostic - element. - On the first call it will get a diagnostic section dictionary and - recursively traverse all children of it. If the child element is a file - attribute it will update it by using the 'new_file_ids' dictionary. - - It will return False if one of the file attribute is in the removable file - list. Otherwise it will return True. - """ - if isinstance(element, dict): - for k, v in element.items(): - if k == 'file': - if v in file_ids_to_remove: - return False - else: - element['file'] = new_file_ids[v] - else: - if not transform_diag_element(v, file_ids_to_remove, - new_file_ids): - return False - elif isinstance(element, list) or isinstance(element, tuple): - for v in element: - if not transform_diag_element(v, file_ids_to_remove, new_file_ids): - return False - - return True - - -def get_kept_report_data(report_data, file_ids_to_remove): - """ - This function will iterate over the diagnostic section of the given - report data and returns the list of diagnostics and files which should - be kept. - """ - kept_files = [] - new_file_ids = {} - all_files = report_data['files'] - for idx, file in enumerate(all_files): - if idx not in file_ids_to_remove: - new_file_ids[idx] = len(kept_files) - kept_files.append(file) - - kept_diagnostics = [] - for diag in report_data['diagnostics']: - if transform_diag_element(diag, file_ids_to_remove, new_file_ids): - kept_diagnostics.append(diag) - - return kept_diagnostics, kept_files - - -def remove_report_from_plist(plist_file_obj, skip_handler): - """ - Parse the original plist content provided by the analyzer - and return a new plist content where reports were removed - if they should be skipped. If the remove failed for some reason None - will be returned. - - WARN !!!! - If the 'files' array in the plist is modified all of the - diagnostic section (control, event ...) nodes should be - re indexed to use the proper file array indexes!!! - """ - report_data = None - try: - report_data = parse_plist(plist_file_obj) - if not report_data: - return - except Exception as ex: - LOG.error("Plist parsing error") - LOG.error(ex) - return - - file_ids_to_remove = [] - - try: - for i, f in enumerate(report_data['files']): - if skip_handler.should_skip(f): - file_ids_to_remove.append(i) - - kept_diagnostics, kept_files = get_kept_report_data(report_data, - file_ids_to_remove) - report_data['diagnostics'] = kept_diagnostics - report_data['files'] = kept_files if kept_diagnostics else [] - - return plistlib.dumps(report_data) - - except KeyError: - LOG.error("Failed to modify plist content, " - "keeping the original version") - return - - -def skip_report_from_plist(plist_file, skip_handler): - """ - Rewrites the provided plist file where reports - were removed if they should be skipped. - """ - new_plist_content = None - with open(plist_file, 'rb') as plist: - new_plist_content = remove_report_from_plist(plist, - skip_handler) - if new_plist_content: - with open(plist_file, 'wb') as plist: - plist.write(new_plist_content) - else: - LOG.error("Failed to skip report from the plist file: %s", plist_file) diff --git a/codechecker_common/report.py b/codechecker_common/report.py deleted file mode 100644 index 0875637f03..0000000000 --- a/codechecker_common/report.py +++ /dev/null @@ -1,189 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" -Parsers for the analyzer output formats (plist ...) should create this -type of Report which will be printed or stored. -""" - -from typing import Dict, List -import json -import os - -from codechecker_common.logger import get_logger -from codechecker_common.source_code_comment_handler import \ - SourceCodeCommentHandler, SpellException -from codechecker_common import util - -LOG = get_logger('report') - - -class Report: - """Represents an analyzer report. - - The main section is where the analyzer reported the issue. - The bugpath contains additional locations (and messages) which lead to - the main section. - """ - - def __init__(self, - main: Dict, - bugpath: Dict, - files: Dict[int, str], - metadata: Dict[str, str]): - - # Dictionary containing checker name, report hash, - # main report position, report message ... - self.__main = main - - # Dictionary containing bug path related data - # with control, event ... sections. - self.__bug_path = bugpath - - # Dictionary fileid to filepath that bugpath events refer to - self.__files = files - - # Can contain the source line where the main section was reported. - self.__source_line = "" - - # Dictionary containing metadata information (analyzer name, version). - self.__metadata = metadata - - self.__source_code_comments = None - self.__sc_handler = SourceCodeCommentHandler() - - @property - def line(self) -> int: - return self.__main['location']['line'] - - @property - def col(self) -> int: - return self.__main['location']['col'] - - @property - def description(self) -> str: - return self.__main['description'] - - @property - def main(self) -> Dict: - return self.__main - - @property - def report_hash(self) -> str: - return self.__main['issue_hash_content_of_line_in_context'] - - @property - def check_name(self) -> str: - return self.__main['check_name'] - - @property - def bug_path(self) -> Dict: - return self.__bug_path - - @property - def notes(self) -> List[str]: - return self.__main.get('notes', []) - - @property - def macro_expansions(self) -> List[str]: - return self.__main.get('macro_expansions', []) - - @property - def files(self) -> Dict[int, str]: - return self.__files - - @property - def file_path(self) -> str: - """ Get the filepath for the main report location. """ - return self.files[self.__main['location']['file']] - - @property - def source_line(self) -> str: - """Get the source line for the main location. - - If the source line is already set returns that - if not tries to read it from the disk. - """ - if not self.__source_line: - self.__source_line = util.get_line(self.file_path, self.line) - - return self.__source_line - - @source_line.setter - def source_line(self, sl): - self.__source_line = sl - - @property - def metadata(self) -> Dict: - return self.__metadata - - @property - def source_code_comments(self): - """ - Get source code comments for the report. - It will read the source file only once. - """ - if self.__source_code_comments is not None: - return self.__source_code_comments - - self.__source_code_comments = [] - - if not os.path.exists(self.file_path): - return self.__source_code_comments - - with open(self.file_path, encoding='utf-8', errors='ignore') as sf: - try: - self.__source_code_comments = \ - self.__sc_handler.filter_source_line_comments( - sf, self.line, self.check_name) - except SpellException as ex: - LOG.warning("%s contains %s", os.path.basename(self.file_path), - str(ex)) - - if len(self.__source_code_comments) == 1: - LOG.debug("Report %s is suppressed in code. file: %s Line %s", - self.report_hash, self.file_path, self.line) - elif len(self.__source_code_comments) > 1: - LOG.warning( - "Multiple source code comment can be found " - "for '%s' checker in '%s' at line %s. " - "This bug will not be suppressed!", - self.check_name, self.file_path, self.line) - - return self.__source_code_comments - - def check_source_code_comments(self, comment_types: List[str]): - """ - True if it doesn't have a source code comment or if every comments have - specified comment types. - """ - if not self.source_code_comments: - return True - - return all(c['status'] in comment_types - for c in self.source_code_comments) - - def __str__(self): - msg = json.dumps(self.__main, sort_keys=True, indent=2) - msg += str(self.__files) - return msg - - def trim_path_prefixes(self, path_prefixes=None): - """ Removes the longest matching leading path from the file paths. """ - self.__files = {i: util.trim_path_prefixes(file_path, path_prefixes) - for i, file_path in self.__files.items()} - - def to_json(self): - """Converts to a special json format. - - This format is used by the parse command when the reports are printed - to the stdout in json format.""" - ret = self.__main - ret["path"] = self.bug_path - ret["files"] = self.files.values() - - return ret diff --git a/codechecker_common/requirements_py/dev/requirements.txt b/codechecker_common/requirements_py/dev/requirements.txt index d8111d9143..29c533892b 100644 --- a/codechecker_common/requirements_py/dev/requirements.txt +++ b/codechecker_common/requirements_py/dev/requirements.txt @@ -1,3 +1,5 @@ nose==1.3.7 portalocker==2.2.1 coverage==5.5.0 +mypy==0.812 +mypy_extensions==0.4.3 diff --git a/codechecker_common/singleton.py b/codechecker_common/singleton.py index ab21047c03..44a39ae21a 100644 --- a/codechecker_common/singleton.py +++ b/codechecker_common/singleton.py @@ -1,7 +1,18 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import Dict + + class Singleton(type): """ Helper type to create singleton classes. """ - _instances = {} + _instances: Dict = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: diff --git a/codechecker_common/tests/Makefile b/codechecker_common/tests/Makefile new file mode 100644 index 0000000000..676ae18bf8 --- /dev/null +++ b/codechecker_common/tests/Makefile @@ -0,0 +1,16 @@ +# Environment variables to run tests. + +CURRENT_DIR = ${CURDIR} +# Root of the repository. +REPO_ROOT ?= $(CURRENT_DIR)/../.. + +# Nose test runner configuration options. +NOSECFG = --config .noserc + +MYPY_CMD = mypy --ignore-missing-imports $(REPO_ROOT)/codechecker_common + +mypy: + $(MYPY_CMD) + +mypy_in_env: venv_dev + $(ACTIVATE_DEV_VENV) && $(MYPY_CMD) diff --git a/codechecker_common/tests/unit/Makefile b/codechecker_common/tests/unit/Makefile deleted file mode 100644 index 5a893c8454..0000000000 --- a/codechecker_common/tests/unit/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# Environment variables to run tests. - -CURRENT_DIR = ${CURDIR} -# Root of the repository. -REPO_ROOT ?= REPO_ROOT=$(CURRENT_DIR)/../../../ - -# Nose test runner configuration options. -NOSECFG = --config .noserc - -test_in_env: test_unit_in_env - -test: test_unit - -UNIT_TEST_CMD = $(REPO_ROOT) nosetests $(NOSECFG) . -UNIT_TEST_COV_CMD = $(REPO_ROOT) coverage run -m nose $(NOSECFG) . && coverage report && coverage html - -test_unit: - $(UNIT_TEST_CMD) - -test_unit_cov: - $(UNIT_TEST_COV_CMD) - -test_unit_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(UNIT_TEST_CMD) - -test_unit_cov_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(UNIT_TEST_COV_CMD) diff --git a/codechecker_common/tests/unit/test_gerrit_converter.py b/codechecker_common/tests/unit/test_gerrit_converter.py deleted file mode 100644 index 419a6d583a..0000000000 --- a/codechecker_common/tests/unit/test_gerrit_converter.py +++ /dev/null @@ -1,314 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -"""Tests for gerrit output conversion.""" - - -import os -import unittest -import tempfile -import json - -from codechecker_common.output import gerrit -from codechecker_common.report import Report - - -class TestReportToGerrit(unittest.TestCase): - @classmethod - def setup_class(cls): - class CheckerLabels: - def severity(self, checker): - if checker == 'my_checker': - return 'LOW' - - # This assertion warns when a new test-case in the future - # intends to query the severity of another checker. The - # original behavior of this function is to return the - # defult 'UNSPECIFIED' value by defult when the severity is - # not provided in the config file. - assert False, \ - 'Currently no test-case quieries other labels for ' \ - 'other checkers.' - - cls.checker_labels = CheckerLabels() - - def test_report_to_gerrit_conversion(self): - """Conversion without directory path just the source filename.""" - - main = { - "location": {"file": 0, "line": 3, "col": 3}, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - bugpath = {} - files = {0: "test_files/main.cpp"} - metadata = {} - - report_to_convert = Report(main, bugpath, files, metadata) - - got = gerrit.convert([report_to_convert], self.checker_labels) - expected = { - "tag": "jenkins", - "message": "CodeChecker found 1 issue(s) in the code.", - "labels": {"Code-Review": -1, "Verified": -1}, - "comments": { - "test_files/main.cpp": [ - { - "range": { - "start_line": 3, - "start_character": 3, - "end_line": 3, - "end_character": 3, - }, - "message": "[LOW] test_files/main.cpp:3:3: " - "some description [my_checker]\n sizeof(42);\n", - } - ] - }, - } - self.assertEquals(got, expected) - - def test_report_to_gerrit_conversion_abs_filepath(self): - """Conversion report with absolute filepath""" - - main = { - "location": { - "file": 0, - "line": 3, - "col": 3, - }, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - - bugpath = {} - metadata = {} - - file_path = os.path.abspath("test_files/main.cpp") - files = {0: file_path} - - report_to_convert = Report(main, bugpath, files, metadata) - - got = gerrit.convert([report_to_convert], self.checker_labels) - expected = { - "tag": "jenkins", - "message": "CodeChecker found 1 issue(s) in the code.", - "labels": {"Code-Review": -1, "Verified": -1}, - "comments": { - file_path: [ - { - "range": { - "start_line": 3, - "start_character": 3, - "end_line": 3, - "end_character": 3, - }, - "message": "[LOW] {0}:3:3: some description " - "[my_checker]\n sizeof(42);\n".format( - file_path), - } - ] - }, - } - self.assertEquals(got, expected) - - def test_report_to_gerrit_conversion_repo_dir(self): - """Conversion report with absolute filepath and CC_REPO_DIR env""" - - main = { - "location": { - "file": 0, - "line": 3, - "col": 3, - }, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - bugpath = {} - metadata = {} - - file_path = os.path.abspath("test_files/main.cpp") - files = {0: file_path} - - report_to_convert = Report(main, bugpath, files, metadata) - os.environ["CC_REPO_DIR"] = os.path.dirname(os.path.realpath(__file__)) - - got = gerrit.convert([report_to_convert], self.checker_labels) - os.environ.pop("CC_REPO_DIR") - - expected = { - "tag": "jenkins", - "message": "CodeChecker found 1 issue(s) in the code.", - "labels": {"Code-Review": -1, "Verified": -1}, - "comments": { - "test_files/main.cpp": [ - { - "range": { - "start_line": 3, - "start_character": 3, - "end_line": 3, - "end_character": 3, - }, - "message": "[LOW] test_files/main.cpp:3:3: " - "some description [my_checker]\n" - " sizeof(42);\n".format( - file_path), - } - ] - }, - } - self.assertEquals(got, expected) - - def test_report_to_gerrit_conversion_report_url(self): - """Conversion report with absolute filepath and CC_REPORT_URL env""" - - main = { - "location": { - "file": 0, - "line": 3, - "col": 3, - }, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - bugpath = {} - files = {0: "test_files/main.cpp"} - metadata = {} - - report_to_convert = Report(main, bugpath, files, metadata) - os.environ["CC_REPORT_URL"] = "localhost:8080/index.html" - got = gerrit.convert([report_to_convert], self.checker_labels) - - # Remove environment variable not to influence the other tests. - os.environ.pop("CC_REPORT_URL") - - expected = { - "tag": "jenkins", - "message": "CodeChecker found 1 issue(s) in the code. " - "See: 'localhost:8080/index.html'", - "labels": {"Code-Review": -1, "Verified": -1}, - "comments": { - "test_files/main.cpp": [ - { - "range": { - "start_line": 3, - "start_character": 3, - "end_line": 3, - "end_character": 3, - }, - "message": "[LOW] test_files/main.cpp:3:3: " - "some description [my_checker]\n sizeof(42);\n", - } - ] - }, - } - self.assertEquals(got, expected) - - def test_report_to_gerrit_conversion_filter_changed_files(self): - """Conversion report with changed files filter. - - Reports from the lib.cpp file should be not in the converted list. - """ - - reports_to_convert = [] - - # Empty for all reports. - bugpath = {} - metadata = {} - - main = { - "location": { - "file": 0, - "line": 3, - "col": 3, - }, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - - main_file_path = os.path.abspath("test_files/main.cpp") - files = {0: main_file_path} - - main_report = Report(main, bugpath, files, metadata) - reports_to_convert.append(main_report) - reports_to_convert.append(main_report) - - main = { - "location": { - "file": 0, - "line": 3, - "col": 3, - }, - "description": "some description", - "check_name": "my_checker", - "issue_hash_content_of_line_in_context": "dummy_hash", - "notes": [], - "macro_expansions": [], - } - - lib_file_path = os.path.abspath("test_files/lib.cpp") - files = {0: lib_file_path} - - lib_report = Report(main, bugpath, files, metadata) - reports_to_convert.append(lib_report) - - dummy_changed_files_content = { - "/COMMIT_MSG": { - "status": "A", - "lines_inserted": 1, - "size_delta": 1, - "size": 100, - }, - "main.cpp": { - "lines_inserted": 1, - "lines_deleted": 1, - "size_delta": 1, - "size": 100, - } - } - fd, changed_files_file = tempfile.mkstemp() - os.write(fd, json.dumps(dummy_changed_files_content).encode("utf-8")) - os.close(fd) - - os.environ["CC_CHANGED_FILES"] = changed_files_file - - got = gerrit.convert(reports_to_convert, self.checker_labels) - os.remove(os.environ["CC_CHANGED_FILES"]) - - # Remove environment variable not to influence the other tests. - os.environ.pop("CC_CHANGED_FILES") - - review_comments = got["comments"] - - # Reports were found in two source files. - self.assertEquals(len(review_comments), 1) - - # Two reports in the main.cpp file. - self.assertEquals(len(review_comments[main_file_path]), 2) - - self.assertIn( - "CodeChecker found 3 issue(s) in the code.", got["message"]) - self.assertIn( - "following reports are introduced in files which are not changed", - got["message"]) - self.assertIn(lib_file_path, got["message"]) diff --git a/codechecker_common/util.py b/codechecker_common/util.py index 6eff8f8ae4..b59ab6b40d 100644 --- a/codechecker_common/util.py +++ b/codechecker_common/util.py @@ -11,10 +11,6 @@ import itertools -import json -import os -import portalocker -from typing import List from codechecker_common.logger import get_logger @@ -34,138 +30,6 @@ def arg_match(options, args): return matched_args -def get_linef(fp, line_no): - """'fp' should be (readable) file object. - Return the line content at line_no or an empty line - if there is less lines than line_no. - """ - fp.seek(0) - for line in fp: - line_no -= 1 - if line_no == 0: - return line - return '' - - -def get_line(file_name, line_no, errors='ignore'): - """ - Return the given line from the file. If line_no is larger than the number - of lines in the file then empty string returns. - If the file can't be opened for read, the function also returns empty - string. - - Try to encode every file as utf-8 to read the line content do not depend - on the platform settings. By default locale.getpreferredencoding() is used - which depends on the platform. - - Changing the encoding error handling can influence the hash content! - """ - try: - with open(file_name, mode='r', - encoding='utf-8', - errors=errors) as source_file: - for line in source_file: - line_no -= 1 - if line_no == 0: - return line - return '' - except IOError: - LOG.error("Failed to open file %s", file_name) - return '' - - -def load_json_or_empty(path, default=None, kind=None, lock=False): - """ - Load the contents of the given file as a JSON and return it's value, - or default if the file can't be loaded. - """ - - ret = default - try: - with open(path, 'r', encoding='utf-8', errors='ignore') as handle: - if lock: - portalocker.lock(handle, portalocker.LOCK_SH) - - ret = json.loads(handle.read()) - - if lock: - portalocker.unlock(handle) - except IOError as ex: - LOG.warning("Failed to open %s file: %s", - kind if kind else 'json', - path) - LOG.warning(ex) - except OSError as ex: - LOG.warning("Failed to open %s file: %s", - kind if kind else 'json', - path) - LOG.warning(ex) - except ValueError as ex: - LOG.warning("'%s' is not a valid %s file.", - kind if kind else 'json', - path) - LOG.warning(ex) - except TypeError as ex: - LOG.warning('Failed to process json file: %s', path) - LOG.warning(ex) - - return ret - - -def get_last_mod_time(file_path): - """ - Return the last modification time of a file. - """ - try: - return os.stat(file_path).st_mtime - except OSError as oerr: - LOG.debug(oerr) - LOG.debug("File is missing") - return None - - -def trim_path_prefixes(path: str, prefixes: List[str]) -> bool: - """ - Removes the longest matching leading path from the file path. - """ - - # If no prefixes are specified. - if not prefixes: - return path - - # Find the longest matching prefix in the path. - longest_matching_prefix = None - for prefix in prefixes: - if not prefix.endswith('/'): - prefix += '/' - - if path.startswith(prefix) and (not longest_matching_prefix or - longest_matching_prefix < prefix): - longest_matching_prefix = prefix - - # If no prefix found or the longest prefix is the root do not trim the - # path. - if not longest_matching_prefix or longest_matching_prefix == '/': - return path - - return path[len(longest_matching_prefix):] - - -class TrimPathPrefixHandler: - """ - Functor to remove the longest matching leading path from the file path. - """ - def __init__(self, prefixes: List[str]): - self.__prefixes = prefixes - - def __call__(self, source_file_path: str) -> str: - """ - Callback to trim_path_prefixes to prevent module dependency - of plist_to_html. - """ - return trim_path_prefixes(source_file_path, self.__prefixes) - - def chunks(iterator, n): """ Yield the next chunk if an iterable object. A chunk consists of maximum n diff --git a/config/logger.conf b/config/logger.conf index 61b69ffaa5..192f659b22 100644 --- a/config/logger.conf +++ b/config/logger.conf @@ -43,6 +43,10 @@ "system": { "level": "INFO", "handlers": ["console"] + }, + "report-converter": { + "level": "INFO", + "handlers": ["console"] } }, "handlers": { diff --git a/docs/tools/codechecker_report_hash.md b/docs/tools/codechecker_report_hash.md deleted file mode 100644 index 79250a3943..0000000000 --- a/docs/tools/codechecker_report_hash.md +++ /dev/null @@ -1,59 +0,0 @@ - -## Report hash generation module -Multiple hash types are available: -- [`CONTEXT_FREE`](#generate-path-sensitive-report-hash) -- [`PATH_SENSITIVE`](#generate-context-sensitive-report-hash) -- [`DIAGNOSTIC_MESSAGE`](#generate-diagnostic-message-hash) - -You can use this library to generate report hash for these types by using the -`get_report_hash` function. - -### Generate path sensitive report hash -`get_report_hash` function can be used to generate report hash with bug path -if the hash type parameter is `PATH_SENSITIVE`. - -High level overview of the hash content: -* `file_name` from the main diag section. -* `checker name`. -* `checker message`. -* `line content` from the source file if can be read up. -* `column numbers` from the *main diag section*. -* `range column numbers` only from the control diag sections if column number - in the range is not the same as the previous control diag section number in - the bug path. If there are no control sections event section column numbers - are used. - -*Note*: as the *main diagnostic section* the last element from the bug path is -used. - -### Generate context sensitive report hash -`get_report_hash` function can be used to generate report hash without bug path -if the hash type parameter is `CONTEXT_FREE`. - -High level overview of the hash content: -* `file_name` from the main diag section. -* `checker message`. -* `line content` from the source file if can be read up. All the whitespaces - from the source content are removed. -* `column numbers` from the main diag sections location. - -### Generate diagnostic message hash -`get_report_hash` function can be used to generate report hash with bug event -messages if the hash type parameter is `DIAGNOSTIC_MESSAGE`. - -High level overview of the hash content: -* Same as `context-free-v2` (*file name*, *checker message* etc.) -* `bug step messages` from all events. - -**Note**: this is an experimental hash and it is not recommended to use it on -your project because this hash can change very easily for example on variable / -function renames. - -### Generate path hash -`get_report_path_hash` can be used to get path hash for the given bug path -which can be used to filter deduplications of multiple reports. - -## License - -The project is licensed under Apache License v2.0 with LLVM Exceptions. -See LICENSE.TXT for details. \ No newline at end of file diff --git a/docs/tools/plist_to_html.md b/docs/tools/plist_to_html.md deleted file mode 100644 index 472889124c..0000000000 --- a/docs/tools/plist_to_html.md +++ /dev/null @@ -1,45 +0,0 @@ -# plist-to-html -`plist-to-html` is a python tool which parses and creates HTML files from one -or more `.plist` result files. - -## Install guide -```sh -# Create a Python virtualenv and set it as your environment. -make venv -source $PWD/venv/bin/activate - -# Build and install plist-to-html package. -make package -``` - -## Usage -
- - $ plist-to-html --help (click to expand) - - -``` -usage: plist-to-html [-h] -o OUTPUT_DIR [-l LAYOUT_DIR] - file/folder [file/folder ...] - -Parse and create HTML files from one or more '.plist' result files. - -positional arguments: - file/folder The plist files and/or folders containing analysis - results which should be parsed. - -optional arguments: - -h, --help show this help message and exit - -o OUTPUT_DIR, --output OUTPUT_DIR - Generate HTML output files in the given folder. - (default: None) - -l LAYOUT_DIR, --layout LAYOUT_DIR - Directory which contains dependency HTML, CSS and - JavaScript files. (default: plist_to_html/../static) -``` -
- -## License - -The project is licensed under Apache License v2.0 with LLVM Exceptions. -See LICENSE.TXT for details. \ No newline at end of file diff --git a/docs/tools/report-converter.md b/docs/tools/report-converter.md index 723b39f4f8..0a1bf3991a 100644 --- a/docs/tools/report-converter.md +++ b/docs/tools/report-converter.md @@ -6,27 +6,35 @@ a CodeChecker server. ## Table of Contents * [Install guide](#install-guide) * [Usage](#usage) -* [Sanitizers](#sanitizers) - * [Undefined Behaviour Sanitizer](#undefined-behaviour-sanitizer) - * [Address Sanitizer](#address-sanitizer) - * [Memory Sanitizer](#memory-sanitizer) - * [Thread Sanitizer](#thread-sanitizer) - * [Leak Sanitizer](#leak-sanitizer) -* [Cppcheck](#cppcheck) -* [Spotbugs](#spotbugs) -* [Facebook Infer](#facebook-infer) -* [ESLint](#eslint) -* [Pylint](#pylint) -* [TSLint](#tslint) -* [Golint](#golint) -* [Pyflakes](#pyflakes) -* [Markdownlint](#markdownlint) -* [Coccinelle](#coccinelle) -* [Smatch](#smatch) -* [Kernel-Doc](#kernel-doc) -* [Sphinx](#sphinx) -* [Sparse](#sparse) -* [cpplint](#cpplint) +* [Supported analyzer outputs](#supported-analyzer-outputs) + * [Sanitizers](#sanitizers) + * [Undefined Behaviour Sanitizer](#undefined-behaviour-sanitizer) + * [Address Sanitizer](#address-sanitizer) + * [Memory Sanitizer](#memory-sanitizer) + * [Thread Sanitizer](#thread-sanitizer) + * [Leak Sanitizer](#leak-sanitizer) + * [Cppcheck](#cppcheck) + * [Spotbugs](#spotbugs) + * [Facebook Infer](#facebook-infer) + * [ESLint](#eslint) + * [Pylint](#pylint) + * [TSLint](#tslint) + * [Golint](#golint) + * [Pyflakes](#pyflakes) + * [Markdownlint](#markdownlint) + * [Coccinelle](#coccinelle) + * [Smatch](#smatch) + * [Kernel-Doc](#kernel-doc) + * [Sphinx](#sphinx) + * [Sparse](#sparse) + * [cpplint](#cpplint) +* [Plist to html tool](plist-to-html) + * [Usage](#plist-to-html-usage) +* [Report hash generation module](#report-hash-generation-module) + * [Generate path sensitive report hash](#generate-path-sensitive-report-hash) + * [Generate context sensitive report hash](#generate-context-sensitive-report-hash) + * [Generate diagnostic message hash](#generate-diagnostic-message-hash) + * [Generate path hash](#generate-path-hash) * [License](#license) ## Install guide @@ -113,8 +121,9 @@ Supported analyzers: ``` -## Sanitizers -### [Undefined Behaviour Sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html) +## Supported analyzer outputs +### Sanitizers +#### [Undefined Behaviour Sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html) - Compile with `-g` and `-fno-omit-frame-pointer` to get proper debug information in your binary. - Run your program with environment variable @@ -138,7 +147,7 @@ UBSAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer \ report-converter -t ubsan -o ./ubsan_results ubsan.output ``` -### [Address Sanitizer](https://clang.llvm.org/docs/AddressSanitizer.html) +#### [Address Sanitizer](https://clang.llvm.org/docs/AddressSanitizer.html) - Compile with `-g` and `-fno-omit-frame-pointer` to get proper debug information in your binary. - Set the `ASAN_SYMBOLIZER_PATH` environment variable to point to the @@ -156,7 +165,7 @@ ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer \ report-converter -t asan -o ./asan_results asan.output ``` -### [Memory Sanitizer](https://clang.llvm.org/docs/MemorySanitizer.html) +#### [Memory Sanitizer](https://clang.llvm.org/docs/MemorySanitizer.html) - Compile with `-g` and `-fno-omit-frame-pointer` to get proper debug information in your binary. - Set the `MSAN_SYMBOLIZER_PATH` environment variable to point to the @@ -174,7 +183,7 @@ MSAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer \ report-converter -t msan -o ./msan_results msan.output ``` -### [Thread Sanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html) +#### [Thread Sanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html) - Compile with `-g` to get proper debug information in your binary. ```sh @@ -188,7 +197,7 @@ clang++ -fsanitize=thread -g tsan.cpp report-converter -t tsan -o ./tsan_results tsan.output ``` -### [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) +#### [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) - Compile with `-g` and `-fsanitize=address` to get proper debug information in your binary. ```sh # Compile your program. @@ -201,7 +210,7 @@ ASAN_OPTIONS=detect_leaks=1 ./a.out > lsan.output 2>&1 report-converter -t lsan -o ./lsan_results lsan.output ``` -## [Cppcheck](http://cppcheck.sourceforge.net/) +### [Cppcheck](http://cppcheck.sourceforge.net/) [Cppcheck](http://cppcheck.sourceforge.net/) is a static analysis tool for `C/C++` code. @@ -242,7 +251,7 @@ CppCheck: `analysis statistics`, `analysis duration`, `cppcheck command` etc. For more information about logging checkout the log section in the [user guide](/docs/usage.md). -## [Spotbugs](https://spotbugs.github.io/) +### [Spotbugs](https://spotbugs.github.io/) [Spotbugs](https://spotbugs.github.io/) is a static analysis tool for `Java` code. @@ -265,7 +274,7 @@ report-converter -t spotbugs -o ./codechecker_spotbugs_reports ./bugs.xml CodeChecker store ./codechecker_spotbugs_reports -n spotbugs ``` -## [Facebook Infer](https://fbinfer.com/) +### [Facebook Infer](https://fbinfer.com/) [Facebook Infer](https://fbinfer.com/) is a static analysis tool developed by Facebook which supports multiple programming languages such as `C/C++`, `Java` etc. @@ -289,7 +298,7 @@ report-converter -t fbinfer -o ./codechecker_fbinfer_reports ./infer-out CodeChecker store ./codechecker_fbinfer_reports -n fbinfer ``` -## [ESLint](https://eslint.org) +### [ESLint](https://eslint.org) [ESLint](https://eslint.org) is a static analysis tool for `JavaScript`. The recommended way of running the ESLint tool is to generate a json output @@ -310,7 +319,7 @@ report-converter -t eslint -o ./codechecker_eslint_reports ./eslint_reports.json CodeChecker store ./codechecker_eslint_reports -n eslint ``` -## [Pylint](https://www.pylint.org) +### [Pylint](https://www.pylint.org) [Pylint](https://www.pylint.org) is a static analysis tool for `Python`. The recommended way of running the Pylint tool is to generate a `json` output @@ -331,7 +340,7 @@ report-converter -t pylint -o ./codechecker_pylint_reports ./pylint_reports.json CodeChecker store ./codechecker_pylint_reports -n pylint ``` -## [Pyflakes](https://github.com/PyCQA/pyflakes) +### [Pyflakes](https://github.com/PyCQA/pyflakes) [Pyflakes](https://github.com/PyCQA/pyflakes) is a static analysis tool for `Python` code. @@ -353,7 +362,7 @@ report-converter -t pyflakes -o ./codechecker_pyflakes_reports ./pyflakes_report CodeChecker store ./codechecker_pyflakes_reports -n pyflakes ``` -## [TSLint](https://palantir.github.io/tslint) +### [TSLint](https://palantir.github.io/tslint) [TSLint](https://palantir.github.io/tslint) is a static analysis tool for `TypeScript`. @@ -375,7 +384,7 @@ report-converter -t tslint -o ./codechecker_tslint_reports ./tslint_reports.json CodeChecker store ./codechecker_tslint_reports -n tslint ``` -## [Golint](https://github.com/golang/lint) +### [Golint](https://github.com/golang/lint) [Golint](https://github.com/golang/lint) is a static analysis tool for `Go` code. @@ -397,7 +406,7 @@ report-converter -t golint -o ./codechecker_golint_reports ./golint_reports.out CodeChecker store ./codechecker_golint_reports -n golint ``` -## [Markdownlint](https://github.com/markdownlint/markdownlint) +### [Markdownlint](https://github.com/markdownlint/markdownlint) [Markdownlint](https://github.com/markdownlint/markdownlint) is a static analysis tool for markdown files. @@ -419,7 +428,7 @@ report-converter -t mdl -o ./codechecker_mdl_reports ./mdl_reports.out CodeChecker store ./codechecker_mdl_reports -n mdl ``` -## [Coccinelle](https://github.com/coccinelle/coccinelle) +### [Coccinelle](https://github.com/coccinelle/coccinelle) [Coccinelle](https://github.com/coccinelle/coccinelle) allows programmers to easily write some complex style-preserving source-to-source transformations on C source code, like for instance to perform some refactorings. @@ -446,7 +455,7 @@ report-converter -t coccinelle -o ./codechecker_coccinelle_reports ./coccinelle_ CodeChecker store ./codechecker_coccinelle_reports -n coccinelle ``` -## [Smatch](https://repo.or.cz/w/smatch.git) +### [Smatch](https://repo.or.cz/w/smatch.git) [Smatch](https://repo.or.cz/w/smatch.git) is a static analysis tool for C that is used on the kernel. The recommended way of running Smatch is to redirect the output to a file and @@ -470,7 +479,7 @@ report-converter -t smatch -o ./codechecker_smatch_reports ./smatch_warns.txt CodeChecker store ./codechecker_smatch_reports -n smatch ``` -## [Kernel-Doc](https://github.com/torvalds/linux/blob/master/scripts/kernel-doc) +### [Kernel-Doc](https://github.com/torvalds/linux/blob/master/scripts/kernel-doc) [Kernel-Doc](https://github.com/torvalds/linux/blob/master/scripts/kernel-doc) structure is extracted from the comments, and proper Sphinx C Domain function and type descriptions with anchors are generated from them. The descriptions are filtered for special kernel-doc highlights and cross-references. @@ -497,7 +506,7 @@ report-converter -t kernel-doc -o ./codechecker_kernel_doc_reports ./kernel-docs CodeChecker store ./codechecker_kernel_doc_reports -n kernel-doc ``` -## [Sphinx](https://github.com/sphinx-doc/sphinx) +### [Sphinx](https://github.com/sphinx-doc/sphinx) [Sphinx](https://github.com/sphinx-doc/sphinx) Sphinx is a documentation generator or a tool that translates a set of plain text source files into various output formats, automatically producing cross-references, indices, etc. @@ -525,7 +534,7 @@ report-converter -t sphinx -o ./codechecker_sphinx_reports ./sphinx.out CodeChecker store ./codechecker_sphinx_reports -n sphinx ``` -## [Sparse](https://git.kernel.org/pub/scm/devel/sparse/sparse.git) +### [Sparse](https://git.kernel.org/pub/scm/devel/sparse/sparse.git) [Sparse](https://git.kernel.org/pub/scm/devel/sparse/sparse.git) is a semantic checker for C programs; it can be used to find a number of potential problems with kernel code. @@ -550,7 +559,7 @@ report-converter -t sparse -o ./codechecker_sparse_reports ./sparse.out CodeChecker store ./codechecker_sparse_reports -n sparse ``` -## [cpplint](https://github.com/cpplint/cpplint) +### [cpplint](https://github.com/cpplint/cpplint) [cpplint](https://github.com/cpplint/cpplint) is a lint-like tool which checks C++ code against [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). @@ -575,6 +584,100 @@ report-converter -t cpplint -o ./codechecker_cpplint_reports ./sample.out CodeChecker store ./codechecker_cpplint_reports -n cpplint ``` +## Plist to html tool +`plist-to-html` is a python tool which parses and creates HTML files from one +or more `.plist` result files. + +### Usage +
+ + $ plist-to-html --help (click to expand) + + +``` +usage: plist-to-html [-h] -o OUTPUT_DIR [-l LAYOUT_DIR] + file/folder [file/folder ...] + +Parse and create HTML files from one or more '.plist' result files. + +positional arguments: + file/folder The plist files and/or folders containing analysis + results which should be parsed. + +optional arguments: + -h, --help show this help message and exit + -o OUTPUT_DIR, --output OUTPUT_DIR + Generate HTML output files in the given folder. + (default: None) + -l LAYOUT_DIR, --layout LAYOUT_DIR + Directory which contains dependency HTML, CSS and + JavaScript files. (default: plist_to_html/../static) +``` +
+ +## Report hash generation module +A report hash identifies a specific bug in the analyzed code. For example if +a function contains some bug and this function is called from several parts of +the program then essentially these are the same bug. These bugs get the same +report hash which indicates a connection between them. CodeChecker web +interface also helps to group these findings. However, in some special cases +the hash should be built from specific information which makes bug +identification sensitive on some different things (for example indentation of +the code). We recommend using `CONTEXT_FREE` which works in most cases. + +Multiple hash types are available: +- [`PATH_SENSITIVE`](#generate-path-sensitive-report-hash) +- [`CONTEXT_FREE`](#generate-context-free-report-hash) +- [`DIAGNOSTIC_MESSAGE`](#generate-diagnostic-message-hash) + +You can use this library to generate report hash for these types by using the +`get_report_hash` function. + +### Generate path sensitive report hash +`get_report_hash` function can be used to generate report hash with bug path +if the hash type parameter is `PATH_SENSITIVE`. + +High level overview of the hash content: +* `file_name` from the main diag section. +* `checker name`. +* `checker message`. +* `line content` from the source file if can be read up. +* `column numbers` from the *main diag section*. +* `range column numbers` only from the control diag sections if column number + in the range is not the same as the previous control diag section number in + the bug path. If there are no control sections event section column numbers + are used. + +*Note*: as the *main diagnostic section* the last element from the bug path is +used. + +### Generate context free report hash +`get_report_hash` function can be used to generate report hash without bug path +if the hash type parameter is `CONTEXT_FREE`. + +High level overview of the hash content: +* `file_name` from the main diag section. +* `checker message`. +* `line content` from the source file if can be read up. All the whitespaces + from the source content are removed. +* `column numbers` from the main diag sections location. + +### Generate diagnostic message hash +`get_report_hash` function can be used to generate report hash with bug event +messages if the hash type parameter is `DIAGNOSTIC_MESSAGE`. + +High level overview of the hash content: +* Same as `CONTEXT_FREE` (*file name*, *checker message* etc.) +* `bug step messages` from all events. + +**Note**: this is an experimental hash and it is not recommended to use it on +your project because this hash can change very easily for example on variable / +function renames. + +### Generate path hash +`get_report_path_hash` can be used to get path hash for the given bug path +which can be used to filter deduplications of multiple reports. + ## License The project is licensed under Apache License v2.0 with LLVM Exceptions. diff --git a/tools/bazel/requirements_py/dev/requirements.txt b/tools/bazel/requirements_py/dev/requirements.txt index 020e4a30e5..73168800e3 100644 --- a/tools/bazel/requirements_py/dev/requirements.txt +++ b/tools/bazel/requirements_py/dev/requirements.txt @@ -1,3 +1,5 @@ nose==1.3.7 pycodestyle==2.7.0 pylint==2.8.2 +mypy==0.812 +mypy_extensions==0.4.3 diff --git a/tools/codechecker_report_hash/.gitignore b/tools/codechecker_report_hash/.gitignore deleted file mode 100644 index d7047d4f7f..0000000000 --- a/tools/codechecker_report_hash/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -build/ -dist/ -codechecker_report_hash.egg-info diff --git a/tools/codechecker_report_hash/.noserc b/tools/codechecker_report_hash/.noserc deleted file mode 100644 index 512f4e1a08..0000000000 --- a/tools/codechecker_report_hash/.noserc +++ /dev/null @@ -1,13 +0,0 @@ -[nosetests] - -# increase verbosity level -verbosity=3 - -# more detailed error messages on failed asserts -detailed-errors=1 - -# stop running tests on first error -stop=1 - -# do not capture stdout -#nocapture=1 diff --git a/tools/codechecker_report_hash/.pylintrc b/tools/codechecker_report_hash/.pylintrc deleted file mode 100644 index bdefcf1e21..0000000000 --- a/tools/codechecker_report_hash/.pylintrc +++ /dev/null @@ -1,377 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=all - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -enable=logging-format-interpolation,old-style-class,unused-wildcard-import,unused-import,unused-variable,len-as-condition,bad-indentation,unpacking-in-except,import-star-module-level,parameter-unpacking,long-suffix,old-octal-literal,old-ne-operator,backtick,old-raise-syntax,print-statement,unpacking-in-except,import-star-module-level,parameter-unpacking,long-suffix,old-octal-literal,old-ne-operator,backtick,old-raise-syntax,print-statement,not-in-loop,function-redefined,continue-in-finally,abstract-class-instantiated,sstar-needs-assignment-target,duplicate-argument-name,too-many-star-expressions,nonlocal-and-global,return-outside-function,return-arg-in-generator,invalid-star-assignment-target,bad-reversed-sequence,nonexistent-operator,yield-outside-function,init-is-generator,nonlocal-without-binding,invalid-unary-operand-type,unsupported-binary-operation,no-member,not-callable,redundant-keyword-arg,assignment-from-no-return,assignment-from-none,not-context-manager,repeated-keyword,missing-kwoa,no-value-for-parameter,invalid-sequence-index,invalid-slice-index,too-many-function-args,unexpected-keyword-arg,unsupported-membership-test,unsubscriptable-object,unpacking-non-sequence,invalid-all-object,no-name-in-module,unbalanced-tuple-unpacking,undefined-variable,undefined-all-variable,used-before-assignment,format-needs-mapping,truncated-format-string,missing-format-string-key,mixed-format-string,too-few-format-args,bad-str-strip-call,too-many-format-args,bad-format-character,access-member-before-definition,method-hidden,assigning-non-slot,duplicate-bases,inconsistent-mro,inherit-non-class,invalid-slots,invalid-slots-object,no-method-argument,no-self-argument,unexpected-special-method-signature,non-iterator-returned,invalid-length-returned,cyclic-import,consider-iterating-dictionary - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -msg-template=[{msg_id}] {path}:{line:3d}:{column}: {msg} - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=2000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -#notes=FIXME,XXX,TODO - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - - -[BASIC] - -# List of builtins function names that should not be used, separated by a comma -bad-functions= - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=50 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes= - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=8 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/tools/codechecker_report_hash/.pypirc b/tools/codechecker_report_hash/.pypirc deleted file mode 100644 index 52d57ec25f..0000000000 --- a/tools/codechecker_report_hash/.pypirc +++ /dev/null @@ -1,10 +0,0 @@ -[distutils] -index-servers = - pypi - testpypi - -[pypi] -repository: https://upload.pypi.org/legacy/ - -[testpypi] -repository: https://test.pypi.org/legacy/ diff --git a/tools/codechecker_report_hash/LICENSE.txt b/tools/codechecker_report_hash/LICENSE.txt deleted file mode 100644 index bd8b243dfa..0000000000 --- a/tools/codechecker_report_hash/LICENSE.txt +++ /dev/null @@ -1,218 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ---- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. diff --git a/tools/codechecker_report_hash/MANIFEST.in b/tools/codechecker_report_hash/MANIFEST.in deleted file mode 100644 index c1ebcaeab4..0000000000 --- a/tools/codechecker_report_hash/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.md -include *.txt diff --git a/tools/codechecker_report_hash/Makefile b/tools/codechecker_report_hash/Makefile deleted file mode 100644 index 461b993ee7..0000000000 --- a/tools/codechecker_report_hash/Makefile +++ /dev/null @@ -1,65 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -CURRENT_DIR = ${CURDIR} -ROOT = $(CURRENT_DIR) - -BUILD_DIR = $(CURRENT_DIR)/build -PYTHON_BIN ?= python3 -CC_REPORT_HASH_DIR = $(BUILD_DIR)/codechecker_report_hash - -ACTIVATE_DEV_VENV ?= . venv_dev/bin/activate -ACTIVATE_RUNTIME_VENV ?= . venv/bin/activate - -VENV_DEV_REQ_FILE ?= requirements_py/dev/requirements.txt - -default: all - -all: package - -venv: - # Create a virtual environment which can be used to run the build package. - python3 -m venv venv && $(ACTIVATE_RUNTIME_VENV) - -venv_dev: - # Create a virtual environment for development. - python3 -m venv venv_dev && \ - $(ACTIVATE_DEV_VENV) && pip3 install -r $(VENV_DEV_REQ_FILE) - -clean_venv_dev: - rm -rf venv_dev - -include tests/Makefile - -package: - # Install package in 'development mode'. - ${PYTHON_BIN} setup.py develop - -build: - ${PYTHON_BIN} setup.py build --build-purelib $(CC_REPORT_HASH_DIR) - -dist: - # Create a source distribution. - ${PYTHON_BIN} setup.py sdist - -upload_test: dist - # Upload package to the TestPyPI repository. - $(eval PKG_NAME := $(shell ${PYTHON_BIN} setup.py --name)) - $(eval PKG_VERSION := $(shell ${PYTHON_BIN} setup.py --version)) - twine upload -r testpypi dist/$(PKG_NAME)-$(PKG_VERSION).tar.gz - -upload: dist - # Upload package to the PyPI repository. - $(eval PKG_NAME := $(shell ${PYTHON_BIN} setup.py --name)) - $(eval PKG_VERSION := $(shell ${PYTHON_BIN} setup.py --version)) - twine upload -r pypi dist/$(PKG_NAME)-$(PKG_VERSION).tar.gz - -clean: - rm -rf $(BUILD_DIR) - rm -rf dist - rm -rf codechecker_report_hash.egg-info diff --git a/tools/codechecker_report_hash/codechecker_report_hash/hash.py b/tools/codechecker_report_hash/codechecker_report_hash/hash.py deleted file mode 100644 index 025888c6f3..0000000000 --- a/tools/codechecker_report_hash/codechecker_report_hash/hash.py +++ /dev/null @@ -1,399 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" CodeChecker hash generation algorithms. """ - -import hashlib -import logging -import os -import plistlib -import sys -import traceback - -from enum import Enum - -from typing import List, Optional, Tuple - -if sys.version_info >= (3, 8): - from typing import TypedDict # pylint: disable=no-name-in-module -else: - from mypy_extensions import TypedDict - -LOG = logging.getLogger('codechecker_report_hash') - -handler = logging.StreamHandler() -formatter = logging.Formatter('[%(levelname)s] - %(message)s') -handler.setFormatter(formatter) - -LOG.setLevel(logging.INFO) -LOG.addHandler(handler) - - -class DiagLoc(TypedDict): - line: int - col: int - - -class DiagEdge(TypedDict): - start: Tuple[DiagLoc, DiagLoc] - end: Tuple[DiagLoc, DiagLoc] - - -class DiagPath(TypedDict): - kind: str - message: str - location: DiagLoc - edges: List[DiagEdge] - - -class Diag(TypedDict): - description: str - check_name: str - location: DiagLoc - path: List[DiagPath] - - -class HashType(Enum): - """ Report hash types. """ - CONTEXT_FREE = 1 - PATH_SENSITIVE = 2 - DIAGNOSTIC_MESSAGE = 3 - - -def __get_line(file_path: str, line_no: int, errors: str = 'ignore') -> str: - """ Return the given line from the file. - - If line_no is larger than the number of lines in the file then empty - string returns. If the file can't be opened for read, the function also - returns empty string. - - Try to encode every file as utf-8 to read the line content do not depend - on the platform settings. By default locale.getpreferredencoding() is used - which depends on the platform. - - Changing the encoding error handling can influence the hash content! - """ - try: - with open(file_path, mode='r', - encoding='utf-8', errors=errors) as f: - for line in f: - line_no -= 1 - if line_no == 0: - return line - return '' - except IOError: - LOG.error("Failed to open file %s", file_path) - return '' - - -def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str: - """ Encodes the given string and generates a hash from it. """ - string_hash = string_to_hash.encode(encoding="utf-8", errors=errors) - return hashlib.md5(string_hash).hexdigest() - - -def _remove_whitespace(line_content: str, old_col: int) -> Tuple[str, int]: - """ - This function removes white spaces from the line content parameter and - calculates the new line location. - Returns the line content without white spaces and the new column number. - E.g.: - line_content = " int foo = 17; sizeof(43); " - ^ - |- bug_col = 18 - content_begin = " int foo = 17; " - content_begin_strip = "intfoo=17;" - line_strip_len = 18 - 10 => 8 - ''.join(line_content.split()) => "intfoo=17;sizeof(43);" - ^ - |- until_col - line_strip_len - 18 - 8 - = 10 - """ - content_begin = line_content[:old_col] - content_begin_strip = ''.join(content_begin.split()) - line_strip_len = len(content_begin) - len(content_begin_strip) - - return ''.join(line_content.split()), \ - old_col - line_strip_len - - -def __get_report_hash_path_sensitive(diag: Diag, file_path: str) -> List[str]: - """ Report hash generation from the given diagnostic. - - Hash generation algorithm for older plist versions where no - issue hash was generated or for the plists generated - from Clang Tidy where the issue hash generation feature - is still missing. - - As the main diagnostic section the last element from the bug path is used. - - High level overview of the hash content: - * 'file_name' from the main diag section. - * 'checker name' - * 'checker message' - * 'line content' from the source file if can be read up - * 'column numbers' from the main diag section - * 'range column numbers' only from the control diag sections if - column number in the range is not the same as the previous - control diag section number in the bug path. If there are no control - sections event section column numbers are used. - """ - def compare_ctrl_sections( - curr: DiagPath, - prev: DiagPath - ) -> Optional[Tuple[int, int]]: - """ - Compare two sections and return column numbers which - should be included in the path hash or None if the - two compared sections ranges are identical. - """ - curr_edges = curr['edges'] - curr_start_range_begin = curr_edges[0]['start'][0] - curr_start_range_end = curr_edges[0]['start'][1] - - prev_edges = prev['edges'] - prev_end_range_begin = prev_edges[0]['end'][0] - prev_end_range_end = prev_edges[0]['end'][1] - - if curr_start_range_begin != prev_end_range_begin and \ - curr_start_range_end != prev_end_range_end: - return (curr_start_range_begin['col'], - curr_start_range_end['col']) - - return None - - path = diag['path'] - - # The last diag section from the bug path used as a main - # diagnostic section. - try: - ctrl_sections = [x for x in path if x.get('kind') == 'control'] - - main_section = path[-1] - - m_loc = main_section.get('location', {}) - source_line = m_loc.get('line', -1) - - from_col = m_loc.get('col', -1) - until_col = m_loc.get('col', -1) - - # WARNING!!! Changing the error handling type for encoding errors - # can influence the hash content! - line_content = __get_line(file_path, source_line, errors='ignore') - - if line_content == '' and not os.path.isfile(file_path): - LOG.error("Failed to generate report hash.") - LOG.error('%s does not exists!', file_path) - - file_name = os.path.basename(file_path) - msg = main_section.get('message', '') - - hash_content = [file_name, - diag.get('check_name', 'unknown'), - msg, - line_content, - str(from_col), - str(until_col)] - - hash_from_ctrl_section = True - for i, section in enumerate(ctrl_sections): - edges = section['edges'] - - try: - start_range_begin = edges[0]['start'][0] - start_range_end = edges[0]['start'][1] - - end_range_begin = edges[0]['end'][0] - end_range_end = edges[0]['end'][1] - - if i > 0: - prev = ctrl_sections[i-1] - col_to_append = compare_ctrl_sections(section, prev) - if col_to_append: - begin_col, end_col = col_to_append - hash_content.append(str(begin_col)) - hash_content.append(str(end_col)) - else: - hash_content.append(str(start_range_begin['col'])) - hash_content.append(str(start_range_end['col'])) - - hash_content.append(str(end_range_begin['col'])) - hash_content.append(str(end_range_end['col'])) - except IndexError: - # Edges might be empty. - hash_from_ctrl_section = False - - # Hash generation from the control sections failed for some reason - # use event section positions for hash generation. - if not hash_from_ctrl_section: - event_sections = [x for x in path if x.get('kind') == 'event'] - - for i, section in enumerate(event_sections): - loc = section['location'] - col_num = loc['col'] - hash_content.append(str(col_num)) - - return hash_content - except Exception as ex: - LOG.error("Hash generation failed") - LOG.error(ex) - return [] - - -def __get_report_hash_context_free(diag: Diag, file_path: str) -> List[str]: - """ Generate report hash without bug path. - - !!! NOT Compatible with the old hash generation method - - High level overview of the hash content: - * 'file_name' from the main diag section. - * 'checker message'. - * 'line content' from the source file if can be read up. All the - whitespaces from the source content are removed. - * 'column numbers' from the main diag sections location. - """ - try: - m_loc = diag.get('location', {}) - source_line = m_loc.get('line', -1) - - from_col = m_loc.get('col', -1) - until_col = m_loc.get('col', -1) - - # WARNING!!! Changing the error handling type for encoding errors - # can influence the hash content! - line_content = __get_line(file_path, source_line, errors='ignore') - - # Remove whitespaces so the hash will be independet of the - # source code indentation. - line_content, new_col = _remove_whitespace(line_content, from_col) - - # Update the column number in sync with the - # removed whitespaces. - until_col = until_col - (from_col - new_col) - from_col = new_col - - if line_content == '' and not os.path.isfile(file_path): - LOG.error("Failed to include soruce line in the report hash.") - LOG.error('%s does not exists!', file_path) - - file_name = os.path.basename(file_path) - msg = diag.get('description', '') - - hash_content = [file_name, - msg, - line_content, - str(from_col), - str(until_col)] - - return hash_content - except Exception as ex: - LOG.error("Hash generation failed") - LOG.error(ex) - return [] - - -def __get_report_hash_diagnostic_message( - diag: Diag, - file_path: str -) -> List[str]: - """ Generate report hash with bug path messages. - - The hash will contain the same information as the CONTEXT_FREE hash + - 'bug step messages' from events. - """ - try: - hash_content = __get_report_hash_context_free(diag, file_path) - - # Add bug step messages to the hash. - for event in [x for x in diag['path'] if x.get('kind') == 'event']: - hash_content.append(event['message']) - - return hash_content - except Exception as ex: - LOG.error("Hash generation failed") - LOG.error(ex) - return [] - - -def get_report_hash(diag: Diag, file_path: str, hash_type: HashType) -> str: - """ Get report hash for the given diagnostic. """ - hash_content = None - - if hash_type == HashType.CONTEXT_FREE: - hash_content = __get_report_hash_context_free(diag, file_path) - elif hash_type == HashType.PATH_SENSITIVE: - hash_content = __get_report_hash_path_sensitive(diag, file_path) - elif hash_type == HashType.DIAGNOSTIC_MESSAGE: - hash_content = __get_report_hash_diagnostic_message(diag, file_path) - else: - raise Exception("Invalid report hash type: " + str(hash_type)) - - return __str_to_hash('|||'.join(hash_content)) - - -def get_report_path_hash(report) -> str: - """ Returns path hash for the given bug path. - - This can be used to filter deduplications of multiple reports. - - report type should be codechecker_common.Report - """ - report_path_hash = '' - events = [i for i in report.bug_path if i.get('kind') == 'event'] - for event in events: - file_name = \ - os.path.basename(report.files.get(event['location']['file'])) - line = str(event['location']['line'] if 'location' in event else 0) - col = str(event['location']['col'] if 'location' in event else 0) - - report_path_hash += line + '|' + col + '|' + event['message'] + \ - file_name - - report_path_hash += report.check_name - - if not report_path_hash: - LOG.error('Failed to generate report path hash!') - LOG.error(report.bug_path) - - LOG.debug(report_path_hash) - return __str_to_hash(report_path_hash) - - -def replace_report_hash(plist_file: str, hash_type=HashType.CONTEXT_FREE): - """ Override hash in the given file by using the given version hash. """ - try: - with open(plist_file, 'rb+') as f: - plist = plistlib.load(f) - f.seek(0) - f.truncate() - files = plist['files'] - - for diag in plist['diagnostics']: - file_path = files[diag['location']['file']] - report_hash = get_report_hash(diag, file_path, hash_type) - diag['issue_hash_content_of_line_in_context'] = report_hash - - plistlib.dump(plist, f) - - except (TypeError, AttributeError, plistlib.InvalidFileException) as err: - LOG.warning('Failed to process plist file: %s wrong file format?', - plist_file) - LOG.warning(err) - except IndexError as iex: - LOG.warning('Indexing error during processing plist file %s', - plist_file) - LOG.warning(type(iex)) - LOG.warning(repr(iex)) - _, _, exc_traceback = sys.exc_info() - traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) - except Exception as ex: - LOG.warning('Error during processing reports from the plist file: %s', - plist_file) - traceback.print_exc() - LOG.warning(type(ex)) - LOG.warning(ex) diff --git a/tools/codechecker_report_hash/requirements_py/dev/requirements.txt b/tools/codechecker_report_hash/requirements_py/dev/requirements.txt deleted file mode 100644 index 73168800e3..0000000000 --- a/tools/codechecker_report_hash/requirements_py/dev/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -nose==1.3.7 -pycodestyle==2.7.0 -pylint==2.8.2 -mypy==0.812 -mypy_extensions==0.4.3 diff --git a/tools/codechecker_report_hash/setup.py b/tools/codechecker_report_hash/setup.py deleted file mode 100644 index 21eb94f885..0000000000 --- a/tools/codechecker_report_hash/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import setuptools - -readme_file_path = os.path.join( - "..", "..", "docs", "tools", "codechecker_report_hash.md") - -with open(readme_file_path, "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="codechecker_report_hash", - version="0.1.0", - author='CodeChecker Team (Ericsson)', - description="Module to generate report hash for CodeChecker.", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/Ericsson/CodeChecker", - keywords=['plist', 'report', 'hash'], - license='LICENSE.txt', - packages=setuptools.find_packages(), - include_package_data=True, - classifiers=[ - "Environment :: Console", - "Intended Audience :: Developers", - "Operating System :: MacOS", - "Operating System :: POSIX", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - ] -) diff --git a/tools/codechecker_report_hash/tests/Makefile b/tools/codechecker_report_hash/tests/Makefile deleted file mode 100644 index d8452f8e04..0000000000 --- a/tools/codechecker_report_hash/tests/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# Environment variables to run tests. - -# Test project configuration, tests are run on these files. -TEST_PROJECT ?= TEST_PROJ=$(CURRENT_DIR)/tests/projects - -REPO_ROOT ?= REPO_ROOT=$(ROOT) - -# Nose test runner configuration options. -NOSECFG = --config .noserc - -test: mypy pycodestyle pylint test_unit - -test_in_env: pycodestyle_in_env pylint_in_env test_unit_in_env - -MYPY_TEST_CMD = mypy --ignore-missing-imports codechecker_report_hash tests - -mypy: - $(MYPY_TEST_CMD) - -mypy_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(MYPY_TEST_CMD) - -PYCODESTYLE_TEST_CMD = pycodestyle codechecker_report_hash tests - -pycodestyle: - $(PYCODESTYLE_TEST_CMD) - -pycodestyle_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(PYCODESTYLE_TEST_CMD) - -PYLINT_TEST_CMD = PYLINTRC=$(ROOT)/.pylintrc \ - pylint -j0 ./codechecker_report_hash ./tests/** - -pylint: - $(PYLINT_TEST_CMD) - -pylint_in_env: venv - $(ACTIVATE_DEV_VENV) && $(PYLINT_TEST_CMD) - -UNIT_TEST_CMD = $(REPO_ROOT) $(TEST_PROJECT) \ - nosetests $(NOSECFG) tests/unit - -test_unit: - $(UNIT_TEST_CMD) - -test_unit_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(UNIT_TEST_CMD) diff --git a/tools/plist_to_html/.gitignore b/tools/plist_to_html/.gitignore deleted file mode 100644 index 988d68d77b..0000000000 --- a/tools/plist_to_html/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -build/ -dist/ -plist_to_html.egg-info -plist_to_html/static/vendor diff --git a/tools/plist_to_html/.noserc b/tools/plist_to_html/.noserc deleted file mode 100644 index 512f4e1a08..0000000000 --- a/tools/plist_to_html/.noserc +++ /dev/null @@ -1,13 +0,0 @@ -[nosetests] - -# increase verbosity level -verbosity=3 - -# more detailed error messages on failed asserts -detailed-errors=1 - -# stop running tests on first error -stop=1 - -# do not capture stdout -#nocapture=1 diff --git a/tools/plist_to_html/.pypirc b/tools/plist_to_html/.pypirc deleted file mode 100644 index 52d57ec25f..0000000000 --- a/tools/plist_to_html/.pypirc +++ /dev/null @@ -1,10 +0,0 @@ -[distutils] -index-servers = - pypi - testpypi - -[pypi] -repository: https://upload.pypi.org/legacy/ - -[testpypi] -repository: https://test.pypi.org/legacy/ diff --git a/tools/plist_to_html/LICENSE.txt b/tools/plist_to_html/LICENSE.txt deleted file mode 100644 index bd8b243dfa..0000000000 --- a/tools/plist_to_html/LICENSE.txt +++ /dev/null @@ -1,218 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ---- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. diff --git a/tools/plist_to_html/MANIFEST.in b/tools/plist_to_html/MANIFEST.in deleted file mode 100644 index 48bd99b93c..0000000000 --- a/tools/plist_to_html/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include README.md -include *.txt -recursive-include plist_to_html/static * diff --git a/tools/plist_to_html/Makefile b/tools/plist_to_html/Makefile deleted file mode 100644 index d1205b5063..0000000000 --- a/tools/plist_to_html/Makefile +++ /dev/null @@ -1,115 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -CODEMIRROR = https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.30.0 - -PYTHON_BIN ?= python3 -CURRENT_DIR = ${CURDIR} -ROOT = $(CURRENT_DIR) - -BUILD_DIR = $(CURRENT_DIR)/build -PLIST_TO_HTML_DIR = $(BUILD_DIR)/plist_to_html - -STATIC_DIR = $(CURRENT_DIR)/plist_to_html/static -VENDOR_DIR = $(STATIC_DIR)/vendor -CODEMIRROR_DIR = $(VENDOR_DIR)/codemirror - - -ACTIVATE_DEV_VENV ?= . venv_dev/bin/activate -ACTIVATE_RUNTIME_VENV ?= . venv/bin/activate - -VENV_DEV_REQ_FILE ?= requirements_py/dev/requirements.txt - -default: all - -all: package - -venv: - # Create a virtual environment which can be used to run the build package. - python3 -m venv venv && $(ACTIVATE_RUNTIME_VENV) - -pip_dev_deps: - pip3 install -r $(VENV_DEV_REQ_FILE) - -venv_dev: - # Create a virtual environment for development. - python3 -m venv venv_dev && \ - $(ACTIVATE_DEV_VENV) && pip3 install -r $(VENV_DEV_REQ_FILE) - -clean_venv_dev: - rm -rf venv_dev - -include tests/Makefile - -package: dep - # Install package in 'development mode'. - ${PYTHON_BIN} setup.py develop - -build: dep - ${PYTHON_BIN} setup.py build --build-purelib $(PLIST_TO_HTML_DIR) - -dist: dep - # Create a source distribution. - ${PYTHON_BIN} setup.py sdist - -upload_test: dist - # Upload package to the TestPyPI repository. - $(eval PKG_NAME := $(shell ${PYTHON_BIN} setup.py --name)) - $(eval PKG_VERSION := $(shell ${PYTHON_BIN} setup.py --version)) - twine upload -r testpypi dist/$(PKG_NAME)-$(PKG_VERSION).tar.gz - -upload: dist - # Upload package to the PyPI repository. - $(eval PKG_NAME := $(shell ${PYTHON_BIN} setup.py --name)) - $(eval PKG_VERSION := $(shell ${PYTHON_BIN} setup.py --version)) - twine upload -r pypi dist/$(PKG_NAME)-$(PKG_VERSION).tar.gz - -vendor_dir: - mkdir -p $(VENDOR_DIR) - -codemirror_dir: vendor_dir - mkdir -p $ $(CODEMIRROR_DIR) - -dep: codemirror - -codemirror: $(CODEMIRROR_DIR)/codemirror.min.js -codemirror: $(CODEMIRROR_DIR)/codemirror.min.css -codemirror: $(CODEMIRROR_DIR)/codemirror.LICENSE -codemirror: $(CODEMIRROR_DIR)/clike.min.js - -$(CODEMIRROR_DIR)/codemirror.min.js: codemirror_dir - [ -f $(CODEMIRROR_DIR)/codemirror.min.js ] && : || \ - curl -sSfLk --get $(CODEMIRROR)/codemirror.min.js \ - -z $(CODEMIRROR_DIR)/codemirror.min.js \ - -o $(CODEMIRROR_DIR)/codemirror.min.js - -$(CODEMIRROR_DIR)/codemirror.min.css: codemirror_dir - [ -f $(CODEMIRROR_DIR)/codemirror.min.css ] && : || \ - curl -sSfLk --get $(CODEMIRROR)/codemirror.min.css \ - -z $(CODEMIRROR_DIR)/codemirror.min.css \ - -o $(CODEMIRROR_DIR)/codemirror.min.css - -$(CODEMIRROR_DIR)/codemirror.LICENSE: codemirror_dir - [ -f $(CODEMIRROR_DIR)/codemirror.LICENSE ] && : || \ - curl -sSfLk --get https://raw.githubusercontent.com/codemirror/CodeMirror/master/LICENSE \ - -z $(CODEMIRROR_DIR)/codemirror.LICENSE \ - -o $(CODEMIRROR_DIR)/codemirror.LICENSE - -$(CODEMIRROR_DIR)/clike.min.js: codemirror_dir - [ -f $(CODEMIRROR_DIR)/clike.min.js ] && : || \ - curl -sSfLk --get $(CODEMIRROR)/mode/clike/clike.min.js \ - -z $(CODEMIRROR_DIR)/clike.min.js \ - -o $(CODEMIRROR_DIR)/clike.min.js - -clean: - rm -rf $(BUILD_DIR) - rm -rf dist - rm -rf plist_to_html.egg-info - -clean_vendor: - rm -rf $(VENDOR_DIR) diff --git a/tools/plist_to_html/plist_to_html/PlistToHtml.py b/tools/plist_to_html/plist_to_html/PlistToHtml.py deleted file mode 100755 index f829c0f08e..0000000000 --- a/tools/plist_to_html/plist_to_html/PlistToHtml.py +++ /dev/null @@ -1,697 +0,0 @@ -#!/usr/bin/env python3 -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import argparse -import io -import json -import os -import plistlib -import shutil -import sys - -from collections import defaultdict -from string import Template -from typing import Callable, Dict, List, Optional, Set, Tuple -from xml.parsers.expat import ExpatError - -if sys.version_info >= (3, 8): - from typing import TypedDict # pylint: disable=no-name-in-module -else: - from mypy_extensions import TypedDict - - -SkipReportHandler = Callable[ - [str, str, int, str, dict, Dict[int, str]], - Tuple[bool, list] -] - -TrimPathPrefixHandler = Callable[[str], str] -SeverityMap = Dict[str, str] - - -class Location(TypedDict): - col: int - file: int - line: int - - -class Event(TypedDict): - location: Location - message: str - - -class Macro(TypedDict): - location: Location - expansion: str - name: str - - -class Note(TypedDict): - location: Location - message: str - - -Events = List[Event] -Macros = List[Macro] -Notes = List[Note] - - -class Report(TypedDict): - events: Events - macros: Macros - notes: Notes - path: str - reportHash: str - checkerName: str - reviewStatus: Optional[str] - severity: Optional[str] - - -Reports = List[Report] - - -class FileSource(TypedDict): - id: int - path: str - content: str - - -FileSources = Dict[int, FileSource] - - -class ReportData(TypedDict): - files: FileSources - reports: Reports - - -class HtmlReport(TypedDict): - html_file: str - report: Report - - -def get_last_mod_time(file_path: str) -> int: - """ Return the last modification time of a file. """ - return os.stat(file_path)[9] - - -def get_file_content(file_path: str) -> str: - """ Return file content of the given file. """ - with open(file_path, 'r', encoding='utf-8', errors='replace') as f: - return f.read() - - -def twodim_to_table( - lines: List[List[str]], - separate_head: bool = True, - separate_footer: bool = False -) -> Optional[str]: - """ Pretty-prints the given two-dimensional array's lines. """ - - str_parts = [] - - # Count the column width. - widths: List[int] = [] - for line in lines: - for i, size in enumerate([len(str(x)) for x in line]): - while i >= len(widths): - widths.append(0) - if size > widths[i]: - widths[i] = size - - # Generate the format string to pad the columns. - print_string = "" - for i, width in enumerate(widths): - print_string += "{" + str(i) + ":" + str(width) + "} | " - - if not print_string: - return "" - - print_string = print_string[:-3] - - # Print the actual data. - str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - for i, line in enumerate(lines): - try: - str_parts.append(print_string.format(*line)) - except IndexError: - raise TypeError("One of the rows have a different number of " - "columns than the others") - if i == 0 and separate_head: - str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - if separate_footer and i == len(lines) - 2: - str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - - str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - - return '\n'.join(str_parts) - - -class HtmlBuilder: - """ - Helper class to create html file from a report data. - """ - def __init__( - self, - layout_dir: str, - checker_labels=None # : Optional[CheckerLabels] = None - ): - self._checker_labels = checker_labels - self.layout_dir = layout_dir - self.generated_html_reports: Dict[str, Reports] = {} - - css_dir = os.path.join(self.layout_dir, 'css') - js_dir = os.path.join(self.layout_dir, 'js') - codemirror_dir = os.path.join(self.layout_dir, 'vendor', - 'codemirror') - - # Mapping layout tags to files. - self._layout_tag_files = { - 'style_css': os.path.join(css_dir, 'style.css'), - 'buglist_css': os.path.join(css_dir, 'buglist.css'), - 'bugview_css': os.path.join(css_dir, 'bugview.css'), - 'statistics_css': os.path.join(css_dir, 'statistics.css'), - 'icon_css': os.path.join(css_dir, 'icon.css'), - 'table_css': os.path.join(css_dir, 'table.css'), - 'codemirror_license': os.path.join(codemirror_dir, - 'codemirror.LICENSE'), - 'codemirror_css': os.path.join(codemirror_dir, - 'codemirror.min.css'), - 'codemirror_js': os.path.join(codemirror_dir, 'codemirror.min.js'), - 'clike_js': os.path.join(codemirror_dir, 'clike.min.js'), - 'bug_viewer': os.path.join(js_dir, 'bugviewer.js'), - 'bug_list': os.path.join(js_dir, 'buglist.js'), - 'browser_support': os.path.join(js_dir, 'browsersupport.js') - } - - # Get the HTML layout file content. - self._layout = Template(get_file_content( - os.path.join(self.layout_dir, 'layout.html'))) - - self._index = Template(get_file_content( - os.path.join(self.layout_dir, 'index.html'))) - - self._statistics = Template(get_file_content( - os.path.join(self.layout_dir, 'statistics.html'))) - - # Get the content of the HTML layout dependencies. - self._tag_contents = {} - for tag in self._layout_tag_files: - self._tag_contents[tag] = get_file_content( - self._layout_tag_files[tag]) - - def get_severity(self, checker: str) -> str: - return self._checker_labels.severity(checker) \ - if self._checker_labels else 'UNSPECIFIED' - - def create(self, output_path: str, report_data: ReportData): - """ - Create html file with the given report data to the output path. - """ - # Add severity levels for reports. - for report in report_data['reports']: - checker = report['checkerName'] - report['severity'] = self.get_severity(checker) - - self.generated_html_reports[output_path] = report_data['reports'] - - substitute_data = self._tag_contents - substitute_data.update({'report_data': json.dumps(report_data)}) - - content = self._layout.substitute(substitute_data) - - with open(output_path, 'w+', encoding='utf-8', - errors='replace') as html_output: - html_output.write(content) - - def create_index_html(self, output_dir: str): - """ - Creates an index.html file which lists all available bugs which was - found in the processed plist files. This also creates a link for each - bug to the created html file where the bug can be found. - """ - - # Sort reports based on file path levels. - report_data: List[HtmlReport] = [] - for html_file in self.generated_html_reports: - for report in self.generated_html_reports[html_file]: - report_data.append({'html_file': html_file, 'report': report}) - report_data = sorted(report_data, - key=lambda d: d['report']['path']) - - with io.StringIO() as table_reports: - # Create table header. - table_reports.write(''' - -   - File - Severity - Checker name - Message - Bug path length - Review status - ''') - - # Create table lines. - for i, data in enumerate(report_data): - html_file = os.path.basename(data['html_file']) - report = data['report'] - - events = report['events'] - checker = report['checkerName'] - severity = report['severity'].lower() \ - if 'severity' in report \ - and report['severity'] is not None \ - else '' - - review_status = report['reviewStatus'] \ - if 'reviewStatus' in report and \ - report['reviewStatus'] is not None \ - else '' - - line = events[-1]['location']['line'] - rs = review_status.lower().replace(' ', '-') - - table_reports.write(f''' - - {i + 1} - - - {report['path']} @ Line {line} - - - - - - {checker} - {events[-1]['message']} - {len(events)} - - {review_status} - - ''') - - substitute_data = self._tag_contents - substitute_data.update({'table_reports': table_reports.getvalue()}) - - content = self._index.substitute(substitute_data) - output_path = os.path.join(output_dir, 'index.html') - with open(output_path, 'w+', encoding='utf-8', - errors='replace') as html_output: - html_output.write(content) - - def create_statistics_html(self, output_dir: str): - """ - Creates an statistics.html file which contains statistics information - from the HTML generation process. - """ - def severity_order(severity: str) -> int: - """ - This function determines in which order severities should be - printed to the output. This function can be given via "key" - attribute to sort() function. - """ - severities = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'STYLE', - 'UNSPECIFIED'] - return severities.index(severity) - - num_of_plist_files = len(self.generated_html_reports) - - num_of_reports = 0 - for html_file in self.generated_html_reports: - num_of_reports += len(self.generated_html_reports[html_file]) - - checker_statistics: Dict[str, int] = defaultdict(int) - for html_file in self.generated_html_reports: - for report in self.generated_html_reports[html_file]: - checker = report['checkerName'] - checker_statistics[checker] += 1 - - checker_rows: List[List[str]] = [] - severity_statistics: Dict[str, int] = defaultdict(int) - - with io.StringIO() as string: - for checker_name in sorted(checker_statistics): - severity = self.get_severity(checker_name) - string.write(''' - - {0} - - - - {2} - - '''.format(checker_name, severity.lower(), - checker_statistics[checker_name])) - checker_rows.append([checker_name, severity, - str(checker_statistics[checker_name])]) - severity_statistics[severity] += \ - checker_statistics[checker_name] - checker_statistics_content = string.getvalue() - - severity_rows: List[List[str]] = [] - - with io.StringIO() as string: - for severity in sorted(severity_statistics, key=severity_order): - num = severity_statistics[severity] - string.write(''' - - - - - {1} - - '''.format(severity.lower(), num)) - severity_rows.append([severity, str(num)]) - severity_statistics_content = string.getvalue() - - substitute_data = self._tag_contents - substitute_data.update({ - 'number_of_plist_files': str(num_of_plist_files), - 'number_of_reports': str(num_of_reports), - 'checker_statistics': checker_statistics_content, - 'severity_statistics': severity_statistics_content}) - - content = self._statistics.substitute(substitute_data) - - output_path = os.path.join(output_dir, 'statistics.html') - with open(output_path, 'w+', encoding='utf-8', - errors='ignore') as html_output: - html_output.write(content) - - print("\n----==== Summary ====----") - - print("----=================----") - print("Total number of reports: {}".format(num_of_reports)) - print("----=================----") - - print("\n----======== Statistics ========----") - statistics_rows = [ - ["Number of processed plist files", str(num_of_plist_files)], - ["Number of analyzer reports", str(num_of_reports)]] - print(twodim_to_table(statistics_rows, False)) - - print("\n----==== Checker Statistics ====----") - header = ["Checker name", "Severity", "Number of reports"] - print(twodim_to_table([header] + checker_rows)) - - print("\n----==== Severity Statistics ====----") - header = ["Severity", "Number of reports"] - print(twodim_to_table([header] + severity_rows)) - - -def get_report_data_from_plist( - plist: dict, - skip_report_handler: Optional[SkipReportHandler] = None, - trim_path_prefixes_handler: Optional[TrimPathPrefixHandler] = None -): - """ - Returns a dictionary with the source file contents and the reports parsed - from the plist. - """ - files = plist['files'] - reports: Reports = [] - file_sources: FileSources = {} - - def update_source_file(file_id: int): - """ - Updates file source data by file id if the given file hasn't been - processed. - """ - if file_id not in file_sources: - file_path = files[file_id] - with open(file_path, 'r', encoding='utf-8', - errors='ignore') as source_data: - # trim path prefixes after file loading - if trim_path_prefixes_handler: - file_path = trim_path_prefixes_handler(file_path) - file_sources[file_id] = {'id': file_id, - 'path': file_path, - 'content': source_data.read()} - - for diag in plist['diagnostics']: - bug_path_items = [item for item in diag['path']] - - source_file = files[diag['location']['file']] - report_line = diag['location']['line'] - report_hash = diag['issue_hash_content_of_line_in_context'] - checker_name = diag['check_name'] - source_code_comments: list = [] - - if skip_report_handler: - skip, source_code_comments = skip_report_handler(report_hash, - source_file, - report_line, - checker_name, - diag, - files) - if skip: - continue - - # Processing bug path events. - events: Events = [] - for path in bug_path_items: - kind = path.get('kind') - if kind == 'event': - events.append({'location': path['location'], - 'message': path['message']}) - else: - continue - - update_source_file(path['location']['file']) - - # Processing macro expansions. - macros: Macros = [] - for macro in diag.get('macro_expansions', []): - macros.append({'location': macro['location'], - 'expansion': macro['expansion'], - 'name': macro['name']}) - - update_source_file(macro['location']['file']) - - # Processing notes. - notes: Notes = [] - for note in diag.get('notes', []): - notes.append({'location': note['location'], - 'message': note['message']}) - - update_source_file(note['location']['file']) - - # trim path prefixes after skip_report_handler filtering - if trim_path_prefixes_handler: - source_file = trim_path_prefixes_handler(source_file) - - reviewStatus = None - if len(source_code_comments) == 1: - reviewStatus = source_code_comments[0]['status'] \ - .capitalize().replace('_', ' ') - - reports.append({'events': events, - 'macros': macros, - 'notes': notes, - 'path': source_file, - 'reportHash': report_hash, - 'checkerName': checker_name, - 'reviewStatus': reviewStatus, - 'severity': None}) - - return {'files': file_sources, - 'reports': reports} - - -def plist_to_html( - file_path: str, - output_path: str, - html_builder: HtmlBuilder, - skip_report_handler: Optional[SkipReportHandler] = None, - trim_path_prefixes_handler: Optional[TrimPathPrefixHandler] = None -) -> Tuple[Optional[str], Set[str]]: - """ - Prints the results in the given file to HTML file. - - Returns the skipped plist files because of source - file content change. - """ - changed_source: Set[str] = set() - if not file_path.endswith(".plist"): - print("\nSkipping input file {0} as it is not a plist.".format( - file_path)) - return file_path, changed_source - - print("\nParsing input file '" + file_path + "'") - try: - plist = {} - with open(file_path, 'rb') as plist_file: - plist = plistlib.load(plist_file) - - report_data = get_report_data_from_plist(plist, - skip_report_handler, - trim_path_prefixes_handler) - - plist_mtime = get_last_mod_time(file_path) - - source_changed = False - - for sf in plist.get('files', []): - sf_mtime = get_last_mod_time(sf) - if sf_mtime > plist_mtime: - source_changed = True - changed_source.add(sf) - - if source_changed: - return file_path, changed_source - - if report_data is None or not report_data['reports']: - print('No report data in {0} file.'.format(file_path)) - return file_path, changed_source - - html_filename = os.path.basename(file_path) + '.html' - html_output_path = os.path.join(output_path, html_filename) - html_builder.create(html_output_path, report_data) - - print('Html file was generated: {0}'.format(html_output_path)) - return None, changed_source - - except (ExpatError, plistlib.InvalidFileException) as err: - print('Failed to process plist file: ' + file_path + - ' wrong file format?', err) - return file_path, changed_source - except AttributeError as ex: - print('Failed to get important report data from plist.', ex) - return file_path, changed_source - except IndexError as iex: - print('Indexing error during processing plist file ' + file_path, iex) - return file_path, changed_source - except Exception as ex: - print('Error during processing reports from the plist file: ' + - file_path, ex) - return file_path, changed_source - - -def parse( - input_path: str, - output_path: str, - layout_dir: str, - skip_report_handler: Optional[SkipReportHandler] = None, - html_builder: Optional[HtmlBuilder] = None, - trim_path_prefixes_handler: Optional[TrimPathPrefixHandler] = None -) -> Set[str]: - """ - Parses plist files from the given input directory to the output directory. - Return a set of changed files. - """ - files = [] - input_path = os.path.abspath(input_path) - output_dir = os.path.abspath(output_path) - - if os.path.exists(output_path): - print("Previous analysis results in '{0}' have been removed, " - "overwriting with current results.".format(output_dir)) - shutil.rmtree(output_path) - - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - if os.path.isfile(input_path): - files.append(input_path) - elif os.path.isdir(input_path): - _, _, file_names = next(os.walk(input_path), ([], [], [])) - files = [os.path.join(input_path, file_name) for file_name - in file_names] - - # Skipped plist reports from html generation because it is not a - # plist file or there are no reports in it. - skipped_report = set() - - # Source files which modification time changed since the last analysis. - changed_source_files: Set[str] = set() - - if not html_builder: - html_builder = HtmlBuilder(layout_dir) - - for file_path in files: - sr, changed_source = plist_to_html(file_path, - output_path, - html_builder, - skip_report_handler, - trim_path_prefixes_handler) - if changed_source: - changed_source_files = changed_source_files.union(changed_source) - if sr: - skipped_report.add(sr) - - return changed_source_files - - -def __add_arguments_to_parser(parser: argparse.ArgumentParser): - parser.add_argument('input', - type=str, - nargs='+', - metavar='file/folder', - help="The plist files and/or folders containing " - "analysis results which should be parsed.") - - parser.add_argument('-o', '--output', - dest="output_dir", - required=True, - help="Generate HTML output files in the given folder.") - - curr_file_dir = os.path.dirname(os.path.realpath(__file__)) - parser.add_argument('-l', '--layout', - dest="layout_dir", - required=False, - default=os.path.join(curr_file_dir, - '..', 'plist_to_html', 'static'), - help="Directory which contains dependency HTML, CSS " - "and JavaScript files.") - - -def main(): - """ Plist parser main command line. """ - parser = argparse.ArgumentParser( - prog="plist-to-html", - description="Parse and create HTML files from one or more '.plist' " - "result files.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - __add_arguments_to_parser(parser) - - args = parser.parse_args() - - if isinstance(args.input, str): - args.input = [args.input] - - # Source files which modification time changed since the last analysis. - changed_source_files = set() - - html_builder = HtmlBuilder(args.layout_dir) - for input_path in args.input: - changed_files = parse(input_path, args.output_dir, args.layout_dir, - None, html_builder) - changed_source_files.union(changed_files) - - html_builder.create_index_html(args.output_dir) - html_builder.create_statistics_html(args.output_dir) - - print('\nTo view statistics in a browser run:\n> firefox {0}'.format( - os.path.join(args.output_dir, 'statistics.html'))) - - print('\nTo view the results in a browser run:\n> firefox {0}'.format( - os.path.join(args.output_dir, 'index.html'))) - - if changed_source_files: - changed_files = '\n'.join([' - ' + f for f in changed_source_files]) - print("\nThe following source file contents changed since the " - "latest analysis:\n{0}\nPlease analyze your project again to " - "update the reports!".format(changed_files)) - - -if __name__ == "__main__": - main() diff --git a/tools/plist_to_html/requirements_py/dev/requirements.txt b/tools/plist_to_html/requirements_py/dev/requirements.txt deleted file mode 100644 index 73168800e3..0000000000 --- a/tools/plist_to_html/requirements_py/dev/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -nose==1.3.7 -pycodestyle==2.7.0 -pylint==2.8.2 -mypy==0.812 -mypy_extensions==0.4.3 diff --git a/tools/plist_to_html/setup.py b/tools/plist_to_html/setup.py deleted file mode 100644 index 95f19e918c..0000000000 --- a/tools/plist_to_html/setup.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 - -import os -import setuptools - -readme_file_path = os.path.join( - "..", "..", "docs", "tools", "plist_to_html.md") - -with open(readme_file_path, "r", encoding="utf-8", errors="ignore") as fh: - long_description = fh.read() - -setuptools.setup( - name="plist-to-html", - version="0.1.0", - author='CodeChecker Team (Ericsson)', - description="Parse and create HTML files from one or more '.plist' " - "result files.", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/Ericsson/CodeChecker", - keywords=['clang', 'report-converter', 'plist-to-html', 'plist', 'html', - 'static-analysis', 'analysis'], - license='LICENSE.txt', - packages=setuptools.find_packages(), - include_package_data=True, - classifiers=[ - "Environment :: Console", - "Intended Audience :: Developers", - "Operating System :: MacOS", - "Operating System :: POSIX", - "Programming Language :: Python :: 3", - ], - python_requires='>=3.6', - entry_points={ - 'console_scripts': [ - 'plist-to-html = plist_to_html.PlistToHtml:main' - ] - }, -) diff --git a/tools/plist_to_html/tests/Makefile b/tools/plist_to_html/tests/Makefile deleted file mode 100644 index a9bb150bd7..0000000000 --- a/tools/plist_to_html/tests/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Environment variables to run tests. - -# Test project configuration, tests are run on these files. -TEST_PROJECT ?= TEST_PROJ=$(CURRENT_DIR)/tests/projects - -REPO_ROOT ?= REPO_ROOT=$(ROOT) - -LAYOUT_DIR ?= LAYOUT_DIR=$(STATIC_DIR) - -# Nose test runner configuration options. -NOSECFG = --config .noserc - -test: mypy pycodestyle pylint test_unit - -test_in_env: pycodestyle_in_env pylint_in_env test_unit_in_env - -MYPY_TEST_CMD = mypy plist_to_html tests - -mypy: - $(MYPY_TEST_CMD) - -mypy_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(MYPY_TEST_CMD) - -PYCODESTYLE_TEST_CMD = pycodestyle plist_to_html tests - -pycodestyle: - $(PYCODESTYLE_TEST_CMD) - -pycodestyle_in_env: venv_dev - $(ACTIVATE_DEV_VENV) && $(PYCODESTYLE_TEST_CMD) - -PYLINT_TEST_CMD = pylint -j0 ./plist_to_html ./tests \ - --disable=all \ - --enable=logging-format-interpolation,old-style-class,unused-wildcard-import,unused-import,unused-variable,len-as-condition,bad-indentation - -pylint: - $(PYLINT_TEST_CMD) - -pylint_in_env: venv - $(ACTIVATE_DEV_VENV) && $(PYLINT_TEST_CMD) - -UNIT_TEST_CMD = $(REPO_ROOT) $(TEST_PROJECT) $(LAYOUT_DIR) \ - nosetests $(NOSECFG) tests/unit - -test_unit: dep - $(UNIT_TEST_CMD) - -test_unit_in_env: venv_dev dep - $(ACTIVATE_DEV_VENV) && $(UNIT_TEST_CMD) diff --git a/tools/report-converter/.gitignore b/tools/report-converter/.gitignore index 35a3a8c7ba..8882435063 100644 --- a/tools/report-converter/.gitignore +++ b/tools/report-converter/.gitignore @@ -1,3 +1,4 @@ build/ report_converter.egg-info *.class +codechecker_report_converter/report/output/html/static/vendor diff --git a/tools/report-converter/MANIFEST.in b/tools/report-converter/MANIFEST.in new file mode 100644 index 0000000000..80fb6f108b --- /dev/null +++ b/tools/report-converter/MANIFEST.in @@ -0,0 +1,3 @@ +include README.md +include *.txt +recursive-include codechecker_report_converter/report/output/html/static * diff --git a/tools/report-converter/Makefile b/tools/report-converter/Makefile index 2b8d9f2e62..5c7320cbf0 100644 --- a/tools/report-converter/Makefile +++ b/tools/report-converter/Makefile @@ -5,6 +5,8 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- +CODEMIRROR = https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.30.0 + CURRENT_DIR = ${CURDIR} ROOT = $(CURRENT_DIR) @@ -12,6 +14,10 @@ BUILD_DIR = $(CURRENT_DIR)/build PYTHON_BIN ?= python3 REPORT_CONVERTER_DIR = $(BUILD_DIR)/report_converter +STATIC_DIR = $(CURRENT_DIR)/codechecker_report_converter/report/output/html/static +VENDOR_DIR = $(STATIC_DIR)/vendor +CODEMIRROR_DIR = $(VENDOR_DIR)/codemirror + ACTIVATE_DEV_VENV ?= . venv_dev/bin/activate ACTIVATE_RUNTIME_VENV ?= . venv/bin/activate @@ -38,14 +44,14 @@ clean_venv_dev: include tests/Makefile -package: +package: dep # Install package in 'development mode'. - ${PYTHON_BIN} -m pip install -e . -e ../codechecker_report_hash/ + ${PYTHON_BIN} setup.py develop -build: +build: dep ${PYTHON_BIN} setup.py build --build-purelib $(REPORT_CONVERTER_DIR) -dist: +dist: dep # Create a source distribution. ${PYTHON_BIN} setup.py sdist @@ -61,6 +67,46 @@ upload: $(eval PKG_VERSION := $(shell ${PYTHON_BIN} setup.py --version)) twine upload -r pypi dist/$(PKG_NAME)-$(PKG_VERSION).tar.gz +vendor_dir: + mkdir -p $(VENDOR_DIR) + +codemirror_dir: vendor_dir + mkdir -p $ $(CODEMIRROR_DIR) + +dep: codemirror + +codemirror: $(CODEMIRROR_DIR)/codemirror.min.js +codemirror: $(CODEMIRROR_DIR)/codemirror.min.css +codemirror: $(CODEMIRROR_DIR)/codemirror.LICENSE +codemirror: $(CODEMIRROR_DIR)/clike.min.js + +$(CODEMIRROR_DIR)/codemirror.min.js: codemirror_dir + [ -f $(CODEMIRROR_DIR)/codemirror.min.js ] && : || \ + curl -sSfLk --get $(CODEMIRROR)/codemirror.min.js \ + -z $(CODEMIRROR_DIR)/codemirror.min.js \ + -o $(CODEMIRROR_DIR)/codemirror.min.js + +$(CODEMIRROR_DIR)/codemirror.min.css: codemirror_dir + [ -f $(CODEMIRROR_DIR)/codemirror.min.css ] && : || \ + curl -sSfLk --get $(CODEMIRROR)/codemirror.min.css \ + -z $(CODEMIRROR_DIR)/codemirror.min.css \ + -o $(CODEMIRROR_DIR)/codemirror.min.css + +$(CODEMIRROR_DIR)/codemirror.LICENSE: codemirror_dir + [ -f $(CODEMIRROR_DIR)/codemirror.LICENSE ] && : || \ + curl -sSfLk --get https://raw.githubusercontent.com/codemirror/CodeMirror/master/LICENSE \ + -z $(CODEMIRROR_DIR)/codemirror.LICENSE \ + -o $(CODEMIRROR_DIR)/codemirror.LICENSE + +$(CODEMIRROR_DIR)/clike.min.js: codemirror_dir + [ -f $(CODEMIRROR_DIR)/clike.min.js ] && : || \ + curl -sSfLk --get $(CODEMIRROR)/mode/clike/clike.min.js \ + -z $(CODEMIRROR_DIR)/clike.min.js \ + -o $(CODEMIRROR_DIR)/clike.min.js + clean: rm -rf $(BUILD_DIR) rm -rf report_converter.egg-info + +clean_vendor: + rm -rf $(VENDOR_DIR) diff --git a/tools/report-converter/codechecker_report_converter/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzer_result.py deleted file mode 100644 index 37707a9ff8..0000000000 --- a/tools/report-converter/codechecker_report_converter/analyzer_result.py +++ /dev/null @@ -1,162 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from abc import ABCMeta, abstractmethod -import json -import logging -import os -import plistlib - -from codechecker_report_hash.hash import get_report_hash, HashType - -from . import __title__, __version__ - -LOG = logging.getLogger('ReportConverter') - - -class AnalyzerResult(metaclass=ABCMeta): - """ Base class to transform analyzer result. """ - - # Short name of the analyzer. - TOOL_NAME = None - - # Full name of the analyzer. - NAME = None - - # Link to the official analyzer website. - URL = None - - def transform(self, analyzer_result, output_dir, - file_name="{source_file}_{analyzer}", metadata=None): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - analyzer_result = os.path.abspath(analyzer_result) - plist_objs = self.parse(analyzer_result) - if not plist_objs: - LOG.info("No '%s' results can be found in the given code analyzer " - "output.", self.TOOL_NAME) - return False - - self._post_process_result(plist_objs) - - self._write(plist_objs, output_dir, file_name) - - if metadata: - self._save_metadata(metadata, output_dir) - else: - LOG.warning("Use '--meta' option to provide extra information " - "to the CodeChecker server such as analyzer version " - "and analysis command when storing the results to it. " - "For more information see the --help.") - - return True - - @abstractmethod - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - raise NotImplementedError("Subclasses should implement this!") - - def _save_metadata(self, metadata, output_dir): - """ Save metadata.json file to the output directory which will be used - by CodeChecker. - """ - meta_info = { - "version": 2, - "num_of_report_dir": 1, - "tools": [] - } - - tool = {"name": self.TOOL_NAME} - - if "analyzer_version" in metadata: - tool["version"] = metadata["analyzer_version"] - - if "analyzer_command" in metadata: - tool["command"] = metadata["analyzer_command"] - - meta_info["tools"].append(tool) - - metadata_file = os.path.join(output_dir, 'metadata.json') - with open(metadata_file, 'w', - encoding="utf-8", errors="ignore") as metafile: - json.dump(meta_info, metafile) - - def _post_process_result(self, plist_objs): - """ Post process the parsed result. - - By default it will add report hashes and metada information for the - diagnostics. - """ - for plist_obj in plist_objs: - self._add_report_hash(plist_obj) - self._add_metadata(plist_obj) - - def _add_report_hash(self, plist_obj): - """ Generate report hash for the given plist data. """ - files = plist_obj['files'] - for diag in plist_obj['diagnostics']: - report_hash = get_report_hash( - diag, files[diag['location']['file']], HashType.CONTEXT_FREE) - - diag['issue_hash_content_of_line_in_context'] = report_hash - - def _add_metadata(self, plist_obj): - """ Add metada information to the given plist data. """ - plist_obj['metadata'] = { - 'analyzer': { - 'name': self.TOOL_NAME - }, - 'generated_by': { - 'name': __title__, - 'version': __version__ - } - } - - def _get_analyzer_result_file_content(self, result_file): - """ Return the content of the given file. """ - if not os.path.exists(result_file): - LOG.error("Result file does not exists: %s", result_file) - return - - if os.path.isdir(result_file): - LOG.error("Directory is given instead of a file: %s", result_file) - return - - with open(result_file, 'r', encoding='utf-8', - errors='replace') as analyzer_result: - return analyzer_result.readlines() - - def _write(self, plist_objs, output_dir, file_name): - """ Creates plist files from the parse result to the given output. - - It will generate a context free hash for each diagnostics. - """ - output_dir = os.path.abspath(output_dir) - for plist_data in plist_objs: - source_file = os.path.basename(plist_data['files'][0]) - - out_file_name = file_name \ - .replace("{source_file}", source_file) \ - .replace("{analyzer}", self.TOOL_NAME) - out_file_name = '{0}.plist'.format(out_file_name) - out_file = os.path.join(output_dir, out_file_name) - - LOG.info("Create/modify plist file: '%s'.", out_file) - LOG.debug(plist_data) - - try: - with open(out_file, 'wb') as plist_file: - plistlib.dump(plist_data, plist_file) - except TypeError as err: - LOG.error('Failed to write plist file: %s', out_file) - LOG.error(err) diff --git a/tools/codechecker_report_hash/codechecker_report_hash/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/__init__.py similarity index 100% rename from tools/codechecker_report_hash/codechecker_report_hash/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py new file mode 100644 index 0000000000..018caedc65 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py @@ -0,0 +1,162 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import json +import logging +import os + +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from typing import Dict, List, Optional + +from codechecker_report_converter.report import Report, report_file +from codechecker_report_converter.report.hash import get_report_hash, HashType +from codechecker_report_converter.report.parser.base import AnalyzerInfo + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResultBase(metaclass=ABCMeta): + """ Base class to transform analyzer result. """ + + # Short name of the analyzer. + TOOL_NAME: str = '' + + # Full name of the analyzer. + NAME: str = '' + + # Link to the official analyzer website. + URL: str = '' + + def transform( + self, + analyzer_result_file_path: str, + output_dir_path: str, + export_type: str, + file_name: str = "{source_file}_{analyzer}", + metadata: Optional[Dict[str, str]] = None + ) -> bool: + """ + Converts the given analyzer result to the output directory in the given + output type. + """ + parser = report_file.get_parser(f".{export_type}") + if not parser: + LOG.error("The given output type '%s' is not supported!", + export_type) + return False + + analyzer_result_file_path = os.path.abspath(analyzer_result_file_path) + reports = self.get_reports(analyzer_result_file_path) + if not reports: + LOG.info("No '%s' results can be found in the given code analyzer " + "output.", self.TOOL_NAME) + return False + + self._post_process_result(reports) + + for report in reports: + report.analyzer_result_file_path = analyzer_result_file_path + + if not report.checker_name: + report.checker_name = self.TOOL_NAME + + self._write( + reports, output_dir_path, parser, export_type, file_name) + + if metadata: + self._save_metadata(metadata, output_dir_path) + else: + LOG.warning("Use '--meta' option to provide extra information " + "to the CodeChecker server such as analyzer version " + "and analysis command when storing the results to it. " + "For more information see the --help.") + + return True + + @abstractmethod + def get_reports(self, analyzer_result_file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + raise NotImplementedError("Subclasses should implement this!") + + def _save_metadata(self, metadata, output_dir): + """ Save metadata.json file to the output directory which will be used + by CodeChecker. + """ + meta_info = { + "version": 2, + "num_of_report_dir": 1, + "tools": [] + } + + tool = {"name": self.TOOL_NAME} + + if "analyzer_version" in metadata: + tool["version"] = metadata["analyzer_version"] + + if "analyzer_command" in metadata: + tool["command"] = metadata["analyzer_command"] + + meta_info["tools"].append(tool) + + metadata_file = os.path.join(output_dir, 'metadata.json') + with open(metadata_file, 'w', + encoding="utf-8", errors="ignore") as metafile: + json.dump(meta_info, metafile) + + def _post_process_result(self, reports: List[Report]): + """ Post process the parsed result. + + By default it will add report hashes and metada information. + """ + for report in reports: + self._add_report_hash(report) + self._add_metadata(report) + + def _add_report_hash(self, report: Report): + """ Generate report hash for the given plist data. """ + report.report_hash = get_report_hash(report, HashType.CONTEXT_FREE) + + def _add_metadata(self, report: Report): + """ Add metada information to the given plist data. """ + report.analyzer_name = self.TOOL_NAME + + def _write( + self, + reports: List[Report], + output_dir_path: str, + parser, + export_type: str, + file_name: str + ): + """ Creates plist files from the parse result to the given output. + + It will generate a context free hash for each diagnostics. + """ + output_dir = os.path.abspath(output_dir_path) + + file_to_report: Dict[str, List[Report]] = defaultdict(list) + for report in reports: + file_to_report[report.file.original_path].append(report) + + analyzer_info = AnalyzerInfo(name=self.TOOL_NAME) + for file_path, file_reports in file_to_report.items(): + source_file = os.path.basename(file_path) + + out_file_name = file_name \ + .replace("{source_file}", source_file) \ + .replace("{analyzer}", self.TOOL_NAME) + out_file_name = f"{out_file_name}.{export_type}" + out_file_path = os.path.join(output_dir, out_file_name) + + LOG.info("Create/modify plist file: '%s'.", out_file_path) + LOG.debug(file_reports) + + data = parser.convert(file_reports, analyzer_info) + parser.write(data, out_file_path) diff --git a/tools/codechecker_report_hash/tests/unit/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/__init__.py similarity index 100% rename from tools/codechecker_report_hash/tests/unit/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/analyzer_result.py new file mode 100644 index 0000000000..f3dad2b73a --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang Tidy. """ + + TOOL_NAME = 'clang-tidy' + NAME = 'Clang Tidy' + URL = 'https://clang.llvm.org/extra/clang-tidy' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py new file mode 100644 index 0000000000..ade28393bf --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py @@ -0,0 +1,175 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os +import re + +from copy import deepcopy +from typing import Iterator, List, Tuple + +from codechecker_report_converter.report import BugPathEvent, \ + get_or_create_file, Report +from ..parser import BaseParser + + +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): + """ Parser for clang-tidy console output. """ + + def __init__(self): + super(Parser, self).__init__() + + # Regex for parsing a clang-tidy message. + self.message_line_re = re.compile( + # File path followed by a ':'. + r'^(?P[\S ]+?):' + # Line number followed by a ':'. + r'(?P\d+?):' + # Column number followed by a ':' and a space. + r'(?P\d+?): ' + # Severity followed by a ':'. + r'(?P(error|warning)):' + # Checker message. + r'(?P[\S \t]+)\s*' + # Checker name. + r'\[(?P.*)\]') + + # Matches a note. + self.note_line_re = re.compile( + # File path followed by a ':'. + r'^(?P[\S ]+?):' + # Line number followed by a ':'. + r'(?P\d+?):' + # Column number followed by a ':' and a space. + r'(?P\d+?): ' + # Severity == note. + r'note:' + # Checker message. + r'(?P.*)') + + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ + match = self.message_line_re.match(line) + if match is None: + return [], next(it) + + checker_names = match.group('checker').strip().split(",") + report = Report( + get_or_create_file( + os.path.abspath(match.group('path')), self._file_cache), + int(match.group('line')), + int(match.group('column')), + match.group('message').strip(), + checker_names[0], + bug_path_events=[]) + + try: + line = next(it) + line = self._parse_code(it, line) + line = self._parse_fixits(report, it, line) + line = self._parse_notes(report, it, line) + except StopIteration: + line = '' + finally: + report.bug_path_events.append(BugPathEvent( + report.message, + report.file, + report.line, + report.column)) + + # When a checker name and the alias of this checker is turned on, + # Clang Tidy (>v11) will generate only one report where the checker + # names are concatenated with ',' mark. With this we will generate + # multiple reports for each checker name / alias. + reports = [] + for checker_name in checker_names: + r = deepcopy(report) + r.checker_name = checker_name + r.category = self._get_category(checker_name) + + reports.append(r) + + return reports, line + + def _get_category(self, checker_name: str) -> str: + """ Get category for Clang-Tidy checker. """ + parts = checker_name.split('-') + return parts[0] if parts else 'unknown' + + def _parse_code( + self, + it: Iterator[str], + line: str + ) -> str: + # Eat code line. + if self.note_line_re.match(line) or self.message_line_re.match(line): + LOG.debug("Unexpected line: %s. Expected a code line!", line) + return line + + # Eat arrow line. + # FIXME: range support? + line = next(it) + if '^' not in line: + LOG.debug("Unexpected line: %s. Expected an arrow line!", line) + return line + + return next(it) + + def _parse_fixits( + self, + report: Report, + it: Iterator[str], + line: str + ) -> str: + """ Parses fixit messages. """ + + while self.message_line_re.match(line) is None and \ + self.note_line_re.match(line) is None: + message_text = line.strip() + + if message_text != '': + report.bug_path_events.append(BugPathEvent( + f"{message_text} (fixit)", + report.file, + report.line, + line.find(message_text) + 1)) + + line = next(it) + return line + + def _parse_notes( + self, + report: Report, + it: Iterator[str], + line: str + ) -> str: + """ Parses note messages. """ + + while self.message_line_re.match(line) is None: + match = self.note_line_re.match(line) + if match is None: + LOG.debug("Unexpected line: %s", line) + return next(it) + + report.bug_path_events.append(BugPathEvent( + match.group('message').strip(), + get_or_create_file( + os.path.abspath(match.group('path')), self._file_cache), + int(match.group('line')), + int(match.group('column')))) + + line = next(it) + line = self._parse_code(it, line) + return line diff --git a/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/__init__.py new file mode 100644 index 0000000000..4494ed481c --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# --------------------------------------------------------------------- diff --git a/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/analyzer_result.py new file mode 100644 index 0000000000..0132cb63cf --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Coccinelle. """ + + TOOL_NAME = 'coccinelle' + NAME = 'Coccinelle' + URL = 'https://github.com/coccinelle/coccinelle' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/coccinelle/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py similarity index 69% rename from tools/report-converter/codechecker_report_converter/coccinelle/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py index 848b3c19d0..c1cd5890eb 100644 --- a/tools/report-converter/codechecker_report_converter/coccinelle/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py @@ -10,20 +10,25 @@ import os import re -from ..output_parser import BaseParser, Message -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class CoccinelleParser(BaseParser): + +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Coccinelle Output """ - def __init__(self, analyzer_result): - super(CoccinelleParser, self).__init__() + def __init__(self, analyzer_result: str): + super(Parser, self).__init__() self.analyzer_result = analyzer_result - self.checker_name = None + self.checker_name: str = '' self.checker_name_re = re.compile( r'^Processing (?P[\S ]+)\.cocci$' @@ -39,10 +44,12 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) checker_match = self.checker_name_re.match(line) @@ -51,20 +58,20 @@ def parse_message(self, it, line): checker_match.group('checker_name') if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), int(match.group('column')), match.group('message').strip(), self.checker_name) try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/plist_to_html/plist_to_html/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/__init__.py similarity index 100% rename from tools/plist_to_html/plist_to_html/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/cppcheck/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py new file mode 100644 index 0000000000..59b7e0da56 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py @@ -0,0 +1,53 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + + +import glob +import logging +import os + +from typing import Dict, List + +from codechecker_report_converter.report import File, Report, report_file + +from ..analyzer_result import AnalyzerResultBase + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Cppcheck. """ + + TOOL_NAME = 'cppcheck' + NAME = 'Cppcheck' + URL = 'http://cppcheck.sourceforge.net' + + def get_reports(self, analyzer_result_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + reports: List[Report] = [] + + plist_files = [] + if os.path.isdir(analyzer_result_path): + plist_files = glob.glob(os.path.join( + analyzer_result_path, "*.plist")) + elif os.path.isfile(analyzer_result_path) and \ + analyzer_result_path.endswith(".plist"): + plist_files = [analyzer_result_path] + else: + LOG.error("The given input should be an existing CppCheck result " + "directory or a plist file.") + return reports + + file_cache: Dict[str, File] = {} + for plist_file in plist_files: + plist_reports = report_file.get_reports( + plist_file, None, file_cache) + reports.extend(plist_reports) + + return reports diff --git a/tools/plist_to_html/tests/libtest/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/__init__.py similarity index 100% rename from tools/plist_to_html/tests/libtest/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/cpplint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/cpplint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/analyzer_result.py new file mode 100644 index 0000000000..b52fc7ff25 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of cpplint. """ + + TOOL_NAME = 'cpplint' + NAME = 'cpplint' + URL = 'https://github.com/cpplint/cpplint' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/cpplint/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py similarity index 68% rename from tools/report-converter/codechecker_report_converter/cpplint/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py index 08cf9a6c83..46fd30d4df 100644 --- a/tools/report-converter/codechecker_report_converter/cpplint/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py @@ -10,17 +10,21 @@ import os import re -from ..output_parser import BaseParser, Message -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class CpplintParser(BaseParser): +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for cpplint Output """ def __init__(self, analyzer_result): - super(CpplintParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -34,14 +38,16 @@ def __init__(self, analyzer_result): # Checker name followed by a whitespace r'(?P\S+)\]\s') - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), @@ -49,14 +55,14 @@ def parse_message(self, it, line): column = 0 - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), column, match.group('message').strip(), match.group('checker_name')) try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/plist_to_html/tests/unit/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/eslint/__init__.py similarity index 100% rename from tools/plist_to_html/tests/unit/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/eslint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/eslint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/eslint/analyzer_result.py new file mode 100644 index 0000000000..56dbf29de9 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/eslint/analyzer_result.py @@ -0,0 +1,66 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import json +import logging +import os + +from typing import Dict, List + +from codechecker_report_converter.report import File, get_or_create_file, \ + Report + +from ..analyzer_result import AnalyzerResultBase + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of the ESLint analyzer. """ + + TOOL_NAME = 'eslint' + NAME = 'ESLint' + URL = 'https://eslint.org/' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + reports: List[Report] = [] + + if not os.path.exists(file_path): + LOG.error("Report file does not exist: %s", file_path) + return reports + + try: + with open(file_path, 'r', encoding="utf-8", errors="ignore") as f: + diagnostics = json.load(f) + except (IOError, json.decoder.JSONDecodeError): + LOG.error("Failed to parse the given analyzer result '%s'. Please " + "give a valid json file generated by ESLint.", + file_path) + return reports + + file_cache: Dict[str, File] = {} + for diag in diagnostics: + file_path = os.path.join( + os.path.dirname(file_path), diag.get('filePath')) + + if not os.path.exists(file_path): + LOG.warning("Source file does not exists: %s", file_path) + continue + + for bug in diag.get('messages', []): + reports.append(Report( + get_or_create_file( + os.path.abspath(file_path), file_cache), + int(bug['line']), + int(bug['column']), + bug['message'], + bug['ruleId'])) + + return reports diff --git a/tools/report-converter/codechecker_report_converter/clang_tidy/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/golint/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/clang_tidy/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/golint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/golint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/golint/analyzer_result.py new file mode 100644 index 0000000000..44264eb992 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/golint/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Golint. """ + + TOOL_NAME = 'golint' + NAME = 'Golint' + URL = 'https://github.com/golang/lint' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/golint/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py similarity index 67% rename from tools/report-converter/codechecker_report_converter/golint/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py index 51dfb07063..28053ed222 100644 --- a/tools/report-converter/codechecker_report_converter/golint/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py @@ -10,16 +10,20 @@ import os import re -from ..output_parser import Message, BaseParser +from typing import Iterator, List, Tuple -LOG = logging.getLogger('ReportConverter') +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class GolintParser(BaseParser): +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Golint output. """ def __init__(self, analyzer_result): - super(GolintParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -34,26 +38,27 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """Parse the given line. - - Returns a (message, next_line) pair or throws a StopIteration. - The message could be None. - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.join(os.path.dirname(self.analyzer_result), match.group('path')) - message = Message( - file_path, + + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), int(match.group('column')), match.group('message').strip(), - None) + '') try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/coccinelle/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/infer/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/coccinelle/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/infer/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py new file mode 100644 index 0000000000..0341ce18c0 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py @@ -0,0 +1,132 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import json +import logging +import os + +from typing import Dict, List, Optional + +from codechecker_report_converter.report import BugPathEvent, File, \ + get_or_create_file, Report + +from ..analyzer_result import AnalyzerResultBase + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of the FB Infer. """ + + TOOL_NAME = 'fbinfer' + NAME = 'Facebook Infer' + URL = 'https://fbinfer.com' + + def __init__(self): + super(AnalyzerResult, self).__init__() + self.__infer_out_parent_dir = None + self.__file_cache: Dict[str, File] = {} + + def get_reports(self, result_file_path: str) -> List[Report]: + """ Parse the given analyzer result. """ + reports: List[Report] = [] + + if os.path.isdir(result_file_path): + report_file = os.path.join(result_file_path, "report.json") + self.__infer_out_parent_dir = os.path.dirname(result_file_path) + else: + report_file = result_file_path + self.__infer_out_parent_dir = os.path.dirname( + os.path.dirname(result_file_path)) + + if not os.path.exists(report_file): + LOG.error("Report file does not exist: %s", report_file) + return reports + + try: + with open(report_file, 'r', + encoding="utf-8", errors="ignore") as f: + bugs = json.load(f) + except IOError: + LOG.error("Failed to parse the given analyzer result '%s'. Please " + "give a infer output directory which contains a valid " + "'report.json' file.", result_file_path) + return reports + + for bug in bugs: + report = self.__parse_report(bug) + if report: + reports.append(report) + + return reports + + def __get_abs_path(self, source_path): + """ Returns full path of the given source path. + It will try to find the given source path relative to the given + analyzer report directory (infer-out). + """ + if os.path.exists(source_path): + return os.path.abspath(source_path) + + full_path = os.path.join(self.__infer_out_parent_dir, source_path) + if os.path.exists(full_path): + return full_path + + LOG.warning("No source file found: %s", source_path) + + def __parse_report(self, bug) -> Optional[Report]: + """ Parse the given report and create a message from them. """ + report_hash = bug['hash'] + checker_name = bug['bug_type'] + + message = bug['qualifier'] + line = int(bug['line']) + col = int(bug['column']) + if col < 0: + col = 0 + + source_path = self.__get_abs_path(bug['file']) + if not source_path: + return None + + report = Report( + get_or_create_file( + os.path.abspath(source_path), self.__file_cache), + line, col, message, checker_name, + report_hash=report_hash, + bug_path_events=[]) + + for bug_trace in bug['bug_trace']: + event = self.__parse_bug_trace(bug_trace) + + if event: + report.bug_path_events.append(event) + + report.bug_path_events.append(BugPathEvent( + report.message, report.file, report.line, report.column)) + + return report + + def __parse_bug_trace(self, bug_trace) -> Optional[BugPathEvent]: + """ Creates event from a bug trace element. """ + source_path = self.__get_abs_path(bug_trace['filename']) + if not source_path: + return None + + message = bug_trace['description'] + line = int(bug_trace['line_number']) + col = int(bug_trace['column_number']) + if col < 0: + col = 0 + + return BugPathEvent( + message, + get_or_create_file(source_path, self.__file_cache), + line, + col) diff --git a/tools/report-converter/codechecker_report_converter/cppcheck/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/cppcheck/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/analyzer_result.py new file mode 100644 index 0000000000..f2994d15c5 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of kernel-docs. """ + + TOOL_NAME = 'kernel-doc' + NAME = 'Kernel-Doc' + URL = 'https://github.com/torvalds/linux/blob/master/scripts/kernel-doc' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/kerneldoc/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py similarity index 63% rename from tools/report-converter/codechecker_report_converter/kerneldoc/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py index c84244b644..9adfcc242f 100644 --- a/tools/report-converter/codechecker_report_converter/kerneldoc/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py @@ -10,17 +10,21 @@ import os import re -from ..output_parser import BaseParser, Message -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class KernelDocParser(BaseParser): +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Kernel-Docs Output """ - def __init__(self, analyzer_result): - super(KernelDocParser, self).__init__() + def __init__(self, analyzer_result: str): + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -32,30 +36,29 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - It is expected that each line contains a seperate report - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - checker_name = None - - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), 0, match.group('message').strip(), - checker_name) + '') try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/cpplint/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/cpplint/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/markdownlint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/analyzer_result.py new file mode 100644 index 0000000000..2d387371fb --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Markdownlint. """ + + TOOL_NAME = 'mdl' + NAME = 'Markdownlint' + URL = 'https://github.com/markdownlint/markdownlint' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/markdownlint/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py similarity index 68% rename from tools/report-converter/codechecker_report_converter/markdownlint/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py index 25d12eb53a..f0cf4b10b7 100644 --- a/tools/report-converter/codechecker_report_converter/markdownlint/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py @@ -10,16 +10,20 @@ import os import re -from ..output_parser import Message, BaseParser +from typing import Iterator, List, Tuple -LOG = logging.getLogger('ReportConverter') +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class MarkdownlintParser(BaseParser): +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Markdownlint output. """ def __init__(self, analyzer_result): - super(MarkdownlintParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -34,28 +38,28 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """Parse the given line. - - Returns a (message, next_line) pair or throws a StopIteration. - The message could be None. - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), 0, match.group('message').strip(), match.group('checker').strip()) try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/analyzers/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/parser.py new file mode 100644 index 0000000000..228e93a040 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/parser.py @@ -0,0 +1,83 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os + +from abc import ABCMeta, abstractmethod +from typing import Dict, Iterable, Iterator, List, Optional, Tuple + +from codechecker_report_converter.report import File, Report + + +LOG = logging.getLogger('report-converter') + + +def get_next(it): + """ Returns the next item from the iterator or return an empty string. """ + try: + return next(it) + except StopIteration: + return '' + + +class BaseParser(metaclass=ABCMeta): + """ Warning message parser. """ + + def __init__(self): + self.reports: List[Report] = [] + self._file_cache: Dict[str, File] = {} + + def get_reports(self, file_path: str) -> List[Report]: + """ Parse the given output. """ + lines = self._get_analyzer_result_file_content(file_path) + if not lines: + return self.reports + + return self.get_reports_from_iter(lines) + + def get_reports_from_iter(self, lines: Iterable[str]) -> List[Report]: + """ Parse the given output lines. """ + it = iter(lines) + try: + next_line = next(it) + while True: + reports, next_line = self._parse_line(it, next_line) + if reports: + self.reports.extend(reports) + except StopIteration: + pass + + return self.reports + + def _get_analyzer_result_file_content( + self, + result_file_path: str + ) -> Optional[List[str]]: + """ Return the content of the given file. """ + if not os.path.exists(result_file_path): + LOG.error("Result file does not exists: %s", result_file_path) + return None + + if os.path.isdir(result_file_path): + LOG.error("Directory is given instead of a file: %s", + result_file_path) + return None + + with open(result_file_path, 'r', encoding='utf-8', + errors='replace') as analyzer_result: + return analyzer_result.readlines() + + @abstractmethod + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ + raise NotImplementedError("Subclasses should implement this!") diff --git a/tools/report-converter/codechecker_report_converter/eslint/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/eslint/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/pyflakes/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/analyzer_result.py new file mode 100644 index 0000000000..8c26495b6f --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Pyflakes. """ + + TOOL_NAME = 'pyflakes' + NAME = 'Pyflakes' + URL = 'https://github.com/PyCQA/pyflakes' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/pyflakes/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py similarity index 64% rename from tools/report-converter/codechecker_report_converter/pyflakes/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py index ef6d5a7158..d250a1d3d2 100644 --- a/tools/report-converter/codechecker_report_converter/pyflakes/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py @@ -10,16 +10,20 @@ import os import re -from ..output_parser import Message, BaseParser +from typing import Iterator, List, Tuple -LOG = logging.getLogger('ReportConverter') +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class PyflakesParser(BaseParser): +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Pyflakes output. """ def __init__(self, analyzer_result): - super(PyflakesParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -32,29 +36,27 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """Parse the given line. - - Returns a (message, next_line) pair or throws a StopIteration. - The message could be None. - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.join(os.path.dirname(self.analyzer_result), match.group('path')) - column = 0 - checker_name = None - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), - column, + 0, match.group('message').strip(), - checker_name) + '') try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/golint/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/pylint/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/golint/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/pylint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py new file mode 100644 index 0000000000..60b1a8c21c --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py @@ -0,0 +1,64 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import json +import logging +import os + +from typing import Dict, List + +from codechecker_report_converter.report import File, get_or_create_file, \ + Report + +from ..analyzer_result import AnalyzerResultBase + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of the Pylint analyzer. """ + + TOOL_NAME = 'pylint' + NAME = 'Pylint' + URL = 'https://www.pylint.org' + + def get_reports(self, result_file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + reports: List[Report] = [] + + if not os.path.exists(result_file_path): + LOG.error("Report file does not exist: %s", result_file_path) + return reports + + try: + with open(result_file_path, 'r', + encoding="utf-8", errors="ignore") as f: + bugs = json.load(f) + except (IOError, json.decoder.JSONDecodeError): + LOG.error("Failed to parse the given analyzer result '%s'. Please " + "give a valid json file generated by Pylint.", + result_file_path) + return reports + + file_cache: Dict[str, File] = {} + for bug in bugs: + file_path = os.path.join( + os.path.dirname(result_file_path), bug.get('path')) + if not os.path.exists(file_path): + LOG.warning("Source file does not exists: %s", file_path) + continue + + reports.append(Report( + get_or_create_file(os.path.abspath(file_path), file_cache), + int(bug['line']), + int(bug['column']), + bug['message'], + bug['symbol'])) + + return reports diff --git a/tools/report-converter/codechecker_report_converter/infer/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/infer/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/kerneldoc/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/kerneldoc/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/analyzer_result.py new file mode 100644 index 0000000000..35cc57360a --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ...analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang AddressSanitizer. """ + + TOOL_NAME = 'asan' + NAME = 'AddressSanitizer' + URL = 'https://clang.llvm.org/docs/AddressSanitizer.html' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/parser.py new file mode 100644 index 0000000000..d369600fb9 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/address/parser.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import re + +from ..parser import SANParser + +LOG = logging.getLogger('report-converter') + + +class Parser(SANParser): + """ Parser for Clang AddressSanitizer console outputs. """ + + checker_name = "AddressSanitizer" + + # Regex for parsing AddressSanitizer output message. + line_re = re.compile( + # Error code + r'==(?P\d+)==(ERROR|WARNING): AddressSanitizer: ' + # Checker message. + r'(?P[\S \t]+)') diff --git a/tools/report-converter/codechecker_report_converter/markdownlint/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/markdownlint/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/analyzer_result.py new file mode 100644 index 0000000000..21032d2452 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/analyzer_result.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + + +from typing import List + +from codechecker_report_converter.report import Report + +from ...analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang LeakSanitizer. """ + + TOOL_NAME = 'lsan' + NAME = 'LeakSanitizer' + URL = 'https://clang.llvm.org/docs/LeakSanitizer.html' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/parser.py new file mode 100644 index 0000000000..7a28da6382 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/leak/parser.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import re + +from ..parser import SANParser + +LOG = logging.getLogger('report-converter') + + +class Parser(SANParser): + """ Parser for Clang LeakSanitizer console outputs. """ + + checker_name = "LeakSanitizer" + + # Regex for parsing MemorySanitizer output message. + line_re = re.compile( + # Error code + r'==(?P\d+)==(ERROR|WARNING): LeakSanitizer: ' + # Checker message. + r'(?P[\S \t]+)') diff --git a/tools/report-converter/codechecker_report_converter/pyflakes/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/pyflakes/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/analyzer_result.py new file mode 100644 index 0000000000..714683f498 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/analyzer_result.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + + +from typing import List + +from codechecker_report_converter.report import Report + +from ...analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang MemorySanitizer. """ + + TOOL_NAME = 'msan' + NAME = 'MemorySanitizer' + URL = 'https://clang.llvm.org/docs/MemorySanitizer.html' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/parser.py new file mode 100644 index 0000000000..bfe7dbc001 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/memory/parser.py @@ -0,0 +1,29 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + + +import logging +import re + +from ..parser import SANParser + + +LOG = logging.getLogger('report-converter') + + +class Parser(SANParser): + """ Parser for Clang MemorySanitizer console outputs. """ + + checker_name = "MemorySanitizer" + + # Regex for parsing MemorySanitizer output message. + line_re = re.compile( + # Error code + r'==(?P\d+)==(ERROR|WARNING): MemorySanitizer: ' + # Checker message. + r'(?P[\S \t]+)') diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py new file mode 100644 index 0000000000..1162cdb352 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py @@ -0,0 +1,146 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os +import re + +from typing import Iterator, List, Optional, Tuple + +from codechecker_report_converter.report import BugPathEvent, File, \ + get_or_create_file, Report + +from ..parser import get_next, BaseParser + +LOG = logging.getLogger('report-converter') + + +class SANParser(BaseParser): + """ Parser for Clang UndefinedBehaviourSanitizer console outputs. + + Example output + /a/b/main.cpp:13:10: runtime error: load of value 7... + """ + + checker_name: str = "" + main_event_index = -1 + line_re = re.compile(r'') + + def __init__(self): + super(SANParser, self).__init__() + + # Regex for parsing stack trace line. + # It has the following format: + # #1 0x42a51d in main /dummy/main.cpp:24:2 + self.stack_trace_re = re.compile(r'^\s+#\d+') + + self.file_re = re.compile( + r'(?P[\S]+?):(?P\d+)(:(?P\d+))?') + + def parse_sanitizer_message( + self, + it: Iterator[str], + line: str + ) -> Tuple[Optional[Report], str]: + """ Parses ThreadSanitizer output message. """ + match = self.line_re.match(line) + if not match: + return None, line + + line = get_next(it) + stack_traces, events, line = self.parse_stack_trace(it, line) + + if not events: + return None, line + + main_event = events[self.main_event_index] + + report = self.create_report( + events, main_event.file, main_event.line, main_event.column, + match.group('message').strip(), stack_traces) + + return report, line + + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ + report, next_line = self.parse_sanitizer_message(it, line) + if report: + return [report], next_line + + return [], next(it) + + def parse_stack_trace_line(self, line: str) -> Optional[BugPathEvent]: + """ Parse the given stack trace line. + + Return an event if the file in the stack trace line exists otherwise + it returns None. + """ + file_match = self.file_re.search(line) + if not file_match: + return None + + file_path = file_match.group('path') + if file_path and os.path.exists(file_path): + col = file_match.group('column') + return BugPathEvent( + line.rstrip(), + get_or_create_file( + os.path.abspath(file_path), self._file_cache), + int(file_match.group('line')), + int(col) if col else 0) + + return None + + def create_report( + self, + events: List[BugPathEvent], + file: File, + line: int, + column: int, + message: str, + stack_traces: List[str] + ) -> Report: + """ Create a report for the sanitizer output. """ + # The original message should be the last part of the path. This is + # displayed by quick check, and this is the main event displayed by + # the web interface. + events.append(BugPathEvent(message, file, line, column)) + + notes = None + if stack_traces: + notes = [BugPathEvent(''.join(stack_traces), file, line, column)] + + return Report( + file, line, column, message, self.checker_name, + bug_path_events=events, + notes=notes) + + def parse_stack_trace( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[str], List[BugPathEvent], str]: + """ Iterate over lines and parse stack traces. """ + events: List[BugPathEvent] = [] + stack_traces: List[str] = [] + + while line.strip(): + event = self.parse_stack_trace_line(line) + if event: + events.append(event) + + stack_traces.append(line) + line = get_next(it) + + events.reverse() + + return stack_traces, events, line diff --git a/tools/report-converter/codechecker_report_converter/pylint/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/pylint/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py new file mode 100644 index 0000000000..d234dce0c4 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ...analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang ThreadSanitizer. """ + + TOOL_NAME = 'tsan' + NAME = 'ThreadSanitizer' + URL = 'https://clang.llvm.org/docs/ThreadSanitizer.html' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/parser.py new file mode 100644 index 0000000000..022906fdb0 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/parser.py @@ -0,0 +1,29 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import re + +from ..parser import SANParser + + +LOG = logging.getLogger('report-converter') + + +class Parser(SANParser): + """ Parser for Clang AddressSanitizer console outputs. """ + + checker_name = "ThreadSanitizer" + main_event_index = 0 + + # Regex for parsing ThreadSanitizer output message. + line_re = re.compile( + # Error code + r'==(?P\d+)==(ERROR|WARNING): ThreadSanitizer: ' + # Checker message. + r'(?P[\S \t]+)') diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/analyzer_result.py new file mode 100644 index 0000000000..4d9f1cb5ac --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ...analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Clang UndefinedBehaviourSanitizer. """ + + TOOL_NAME = 'ubsan' + NAME = 'UndefinedBehaviorSanitizer' + URL = 'https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/ub/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/parser.py similarity index 50% rename from tools/report-converter/codechecker_report_converter/sanitizers/ub/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/parser.py index 15b95d18b3..3eeae1834d 100644 --- a/tools/report-converter/codechecker_report_converter/sanitizers/ub/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/ub/parser.py @@ -6,37 +6,40 @@ # # ------------------------------------------------------------------------- - import logging import os import re -from ...output_parser import get_next, Message, Event -from ..output_parser import SANParser +from typing import Iterable, Optional, Tuple + +from codechecker_report_converter.report import get_or_create_file, Report + +from ...parser import get_next +from ..parser import SANParser -LOG = logging.getLogger('ReportConverter') +LOG = logging.getLogger('report-converter') -class UBSANParser(SANParser): + +class Parser(SANParser): """ Parser for Clang UndefinedBehaviourSanitizer console outputs. Example output /a/b/main.cpp:13:10: runtime error: load of value 7... """ - def __init__(self): - super(UBSANParser, self).__init__() + checker_name = "UndefinedBehaviorSanitizer" - # Regex for parsing UndefinedBehaviorSanitizer output message. - self.ub_line_re = re.compile( - # File path followed by a ':'. - r'^(?P[\S ]+?):' - # Line number followed by a ':'. - r'(?P\d+?):' - # Column number followed by a ':' and a space. - r'(?P\d+?): runtime error: ' - # Checker message. - r'(?P[\S \t]+)') + # Regex for parsing UndefinedBehaviorSanitizer output message. + line_re = re.compile( + # File path followed by a ':'. + r'^(?P[\S ]+?):' + # Line number followed by a ':'. + r'(?P\d+?):' + # Column number followed by a ':' and a space. + r'(?P\d+?): runtime error: ' + # Checker message. + r'(?P[\S \t]+)') def parse_stack_trace(self, it, line): """ Iterate over lines and parse stack traces. """ @@ -55,28 +58,26 @@ def parse_stack_trace(self, it, line): return stack_traces, events, line - def parse_sanitizer_message(self, it, line): + def parse_sanitizer_message( + self, + it: Iterable[str], + line: str + ) -> Tuple[Optional[Report], str]: """ Parses UndefinedBehaviorSanitizer output message. """ - match = self.ub_line_re.match(line) + match = self.line_re.match(line) if not match: return None, line - report_file = os.path.abspath(match.group('path')) + report_file = get_or_create_file( + os.path.abspath(match.group('path')), self._file_cache) report_line = int(match.group('line')) report_col = int(match.group('column')) line = get_next(it) stack_traces, events, line = self.parse_stack_trace(it, line) - notes = [] - if stack_traces: - notes.append(Event(report_file, report_line, report_col, - ''.join(stack_traces))) - - return Message(report_file, - report_line, - report_col, - match.group('message').strip(), - 'UndefinedBehaviorSanitizer', - events, - notes), line + report = self.create_report( + events, report_file, report_line, report_col, + match.group('message').strip(), stack_traces) + + return report, line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/address/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/smatch/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/address/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/smatch/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/smatch/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/smatch/analyzer_result.py new file mode 100644 index 0000000000..c1867f6bdf --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/smatch/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Smatch. """ + + TOOL_NAME = 'smatch' + NAME = 'Smatch' + URL = 'https://repo.or.cz/w/smatch.git' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/smatch/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py similarity index 69% rename from tools/report-converter/codechecker_report_converter/smatch/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py index bdb46ff91c..8c3b5ca4d6 100644 --- a/tools/report-converter/codechecker_report_converter/smatch/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py @@ -10,17 +10,22 @@ import os import re -from ..output_parser import BaseParser, Message -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class SmatchParser(BaseParser): + +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Smatch Output """ def __init__(self, analyzer_result): - super(SmatchParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -36,29 +41,28 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - It is expected that each line contains a seperate report - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - column = 0 - - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), - column, + 0, match.group('message').strip(), match.group('checker_name')) try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/leak/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sparse/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/leak/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sparse/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sparse/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sparse/analyzer_result.py new file mode 100644 index 0000000000..4c6d5099fb --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sparse/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Sparse. """ + + TOOL_NAME = 'sparse' + NAME = 'Sparse' + URL = 'https://git.kernel.org/pub/scm/devel/sparse/sparse.git' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/sparse/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py similarity index 63% rename from tools/report-converter/codechecker_report_converter/sparse/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py index 590b5ec197..59894446b8 100644 --- a/tools/report-converter/codechecker_report_converter/sparse/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py @@ -10,17 +10,23 @@ import os import re -from ..output_parser import BaseParser, Message, Event -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import BugPathEvent, \ + get_or_create_file, Report +from ..parser import BaseParser -class SparseParser(BaseParser): + +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Sparse Output """ def __init__(self, analyzer_result): - super(SparseParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -45,28 +51,30 @@ def __init__(self, analyzer_result): r'(?P[\S \t]+)\s*' ) - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - It is expected that each line contains a seperate report - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if (match is None): - return None, next(it) - - checker_name = None + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - message = Message( - file_path, + + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), int(match.group('column')), match.group('message').strip(), - checker_name) + '', + bug_path_events=[]) + line = '' try: line = next(it) note_match = self.note_line_re.match(line) @@ -74,15 +82,22 @@ def parse_message(self, it, line): file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), note_match.group('path'))) - message.events.append(Event(file_path, - int(note_match.group('line')), - int(note_match - .group('column')), - note_match.group('message') - .strip())) + + report.bug_path_events.append(BugPathEvent( + note_match.group('message').strip(), + get_or_create_file(file_path, self._file_cache), + int(note_match.group('line')), + int(note_match.group('column')))) + line = next(it) note_match = self.note_line_re.match(line) - return message, line - except StopIteration: - return message, '' + line = '' + finally: + report.bug_path_events.append(BugPathEvent( + report.message, + report.file, + report.line, + report.column)) + + return [report], line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/memory/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/memory/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/sphinx/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sphinx/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/analyzer_result.py new file mode 100644 index 0000000000..ec22a4c2fc --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/analyzer_result.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +from typing import List + +from codechecker_report_converter.report import Report + +from ..analyzer_result import AnalyzerResultBase +from .parser import Parser + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of Sphinx. """ + + TOOL_NAME = 'sphinx' + NAME = 'Sphinx' + URL = 'https://github.com/sphinx-doc/sphinx' + + def get_reports(self, file_path: str) -> List[Report]: + """ Get reports from the given analyzer result. """ + return Parser(file_path).get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/sphinx/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py similarity index 65% rename from tools/report-converter/codechecker_report_converter/sphinx/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py index 13240b457c..816aa0aa15 100644 --- a/tools/report-converter/codechecker_report_converter/sphinx/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py @@ -10,17 +10,22 @@ import os import re -from ..output_parser import BaseParser, Message -LOG = logging.getLogger('ReportConverter') +from typing import Iterator, List, Tuple +from codechecker_report_converter.report import get_or_create_file, Report +from ..parser import BaseParser -class SphinxParser(BaseParser): + +LOG = logging.getLogger('report-converter') + + +class Parser(BaseParser): """ Parser for Sphinx Output """ def __init__(self, analyzer_result): - super(SphinxParser, self).__init__() + super(Parser, self).__init__() self.analyzer_result = analyzer_result @@ -32,30 +37,29 @@ def __init__(self, analyzer_result): # Message. r'(?P[\S \t]+)\s*') - def parse_message(self, it, line): - """ - Actual Parsing function for the given line - It is expected that each line contains a seperate report - """ + def _parse_line( + self, + it: Iterator[str], + line: str + ) -> Tuple[List[Report], str]: + """ Parse the given line. """ match = self.message_line_re.match(line) if match is None: - return None, next(it) + return [], next(it) file_path = os.path.normpath( os.path.join(os.path.dirname(self.analyzer_result), match.group('path'))) - checker_name = None - - message = Message( - file_path, + report = Report( + get_or_create_file(file_path, self._file_cache), int(match.group('line')), 0, match.group('message').strip(), - checker_name) + '') try: - return message, next(it) + return [report], next(it) except StopIteration: - return message, '' + return [report], '' diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/thread/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/thread/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/spotbugs/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/spotbugs/output_parser.py b/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py similarity index 64% rename from tools/report-converter/codechecker_report_converter/spotbugs/output_parser.py rename to tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py index 6f2f2c8df6..5d5193d68c 100644 --- a/tools/report-converter/codechecker_report_converter/spotbugs/output_parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py @@ -10,56 +10,47 @@ import os import xml.etree.ElementTree as ET -from ..output_parser import Message, Event, BaseParser +from typing import Dict, List, Optional -LOG = logging.getLogger('ReportConverter') +from codechecker_report_converter.report import BugPathEvent, \ + File, get_or_create_file, Report +from ..analyzer_result import AnalyzerResultBase -class SpotBugsMessage(Message): - """ Represents a message with an optional event, fixit and note messages. - This will be a diagnostic section in the plist which represents a report. - """ +LOG = logging.getLogger('report-converter') - def __init__(self, path, line, column, message, checker, report_hash, - events=None, notes=None, fixits=None): - super(SpotBugsMessage, self).__init__(path, line, column, message, - checker, events, notes, fixits) - self.report_hash = report_hash - def __eq__(self, other): - return super(SpotBugsMessage, self).__eq__(other) and \ - self.report_hash == other.report_hash +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of SpotBugs. """ - def __str__(self): - return '%s, report_hash=%s' % \ - (super(SpotBugsMessage, self).__str__(), self.report_hash) - - -class SpotBugsParser(BaseParser): - """ Parser for SpotBugs output. """ + TOOL_NAME = 'spotbugs' + NAME = 'spotbugs' + URL = 'https://spotbugs.github.io' def __init__(self): - super(SpotBugsParser, self).__init__() - self.project_paths = [] + super(AnalyzerResult, self).__init__() + self.__project_paths = [] + self.__file_cache: Dict[str, File] = {} - def parse_messages(self, analyzer_result): + def get_reports(self, file_path: str) -> List[Report]: """ Parse the given analyzer result. """ + reports: List[Report] = [] - root = self.__parse_analyzer_result(analyzer_result) + root = self.__parse_analyzer_result(file_path) if root is None: - return + return reports - self.project_paths = self.__get_project_paths(root) + self.__project_paths = self.__get_project_paths(root) for bug in root.findall('BugInstance'): - message = self.__parse_bug(bug) - if message: - self.messages.append(message) + report = self.__parse_bug(bug) + if report: + reports.append(report) - return self.messages + return reports - def __get_abs_path(self, source_path): + def __get_abs_path(self, source_path: str): """ Returns full path of the given source path. It will try to find the given source path in the project paths and @@ -68,14 +59,14 @@ def __get_abs_path(self, source_path): if os.path.exists(source_path): return source_path - for project_path in self.project_paths: + for project_path in self.__project_paths: full_path = os.path.join(project_path, source_path) if os.path.exists(full_path): return full_path LOG.warning("No source file found: %s", source_path) - def __parse_analyzer_result(self, analyzer_result): + def __parse_analyzer_result(self, analyzer_result: str): """ Parse the given analyzer result xml file. Returns the root element of the parsed tree or None if something goes @@ -138,41 +129,60 @@ def __parse_bug(self, bug): if line is None: line = next((e.line for e in reversed(events) if e.line > 0), 0) - return SpotBugsMessage(source_path, int(line), col, long_message, - checker_name, report_hash, events) + report = Report( + get_or_create_file(source_path, self.__file_cache), + int(line), + col, + long_message, + checker_name, + report_hash=report_hash, + bug_path_events=events) + + report.bug_path_events.append(BugPathEvent( + report.message, report.file, report.line, report.column)) + + return report - def __event_from_class(self, element): + def __event_from_class(self, element) -> Optional[BugPathEvent]: """ Creates event from a Class element. """ message = element.find('Message').text source_line = element.find('SourceLine') if source_line is None: - return + return None source_path = source_line.attrib.get('sourcepath') source_path = self.__get_abs_path(source_path) if not source_path: - return + return None - line = source_line.attrib.get('start', 0) + line = int(source_line.attrib.get('start', 0)) col = 0 - return Event(source_path, int(line), col, message) + return BugPathEvent( + message, + get_or_create_file(source_path, self.__file_cache), + line, + col) - def __event_from_method(self, element): + def __event_from_method(self, element) -> Optional[BugPathEvent]: """ Creates event from a Method element. """ message = element.find('Message').text source_line = element.find('SourceLine') if source_line is None: - return + return None source_path = source_line.attrib.get('sourcepath') source_path = self.__get_abs_path(source_path) if not source_path: - return + return None - line = source_line.attrib.get('start', 0) + line = int(source_line.attrib.get('start', 0)) col = 0 - return Event(source_path, int(line), col, message) + return BugPathEvent( + message, + get_or_create_file(source_path, self.__file_cache), + line, + col) diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/ub/__init__.py b/tools/report-converter/codechecker_report_converter/analyzers/tslint/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sanitizers/ub/__init__.py rename to tools/report-converter/codechecker_report_converter/analyzers/tslint/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py new file mode 100644 index 0000000000..7f2a45cc54 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py @@ -0,0 +1,67 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os +import json + +from typing import Dict, List + +from codechecker_report_converter.report import File, get_or_create_file, \ + Report + +from ..analyzer_result import AnalyzerResultBase + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerResult(AnalyzerResultBase): + """ Transform analyzer result of the TSLint analyzer. """ + + TOOL_NAME = 'tslint' + NAME = 'TSLint' + URL = 'https://palantir.github.io/tslint' + + def get_reports(self, result_file_path: str) -> List[Report]: + """ Parse the given analyzer result. """ + reports: List[Report] = [] + + if not os.path.exists(result_file_path): + LOG.error("Report file does not exist: %s", result_file_path) + return reports + + try: + with open(result_file_path, 'r', + encoding="utf-8", errors="ignore") as report_f: + bugs = json.load(report_f) + except (IOError, json.decoder.JSONDecodeError): + LOG.error("Failed to parse the given analyzer result '%s'. Please " + "give a valid json file generated by TSLint.", + result_file_path) + return reports + + file_cache: Dict[str, File] = {} + for bug in bugs: + file_path = os.path.join( + os.path.dirname(result_file_path), bug.get('name')) + + if not os.path.exists(file_path): + LOG.warning("Source file does not exists: %s", file_path) + continue + + end_pos = bug['startPosition'] + line = int(end_pos['line'] + 1) + col = int(end_pos['character'] + 1) + + reports.append(Report( + get_or_create_file(os.path.abspath(file_path), file_cache), + line, col, bug['failure'], bug['ruleName'] + )) + + return reports diff --git a/tools/report-converter/codechecker_report_converter/clang_tidy/analyzer_result.py b/tools/report-converter/codechecker_report_converter/clang_tidy/analyzer_result.py deleted file mode 100644 index 97c9f35cab..0000000000 --- a/tools/report-converter/codechecker_report_converter/clang_tidy/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import ClangTidyParser -from .plist_converter import ClangTidyPlistConverter - - -class ClangTidyAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang Tidy. """ - - TOOL_NAME = 'clang-tidy' - NAME = 'Clang Tidy' - URL = 'https://clang.llvm.org/extra/clang-tidy' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = ClangTidyParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = ClangTidyPlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/clang_tidy/output_parser.py b/tools/report-converter/codechecker_report_converter/clang_tidy/output_parser.py deleted file mode 100644 index e0e7f615b5..0000000000 --- a/tools/report-converter/codechecker_report_converter/clang_tidy/output_parser.py +++ /dev/null @@ -1,124 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import logging -import os -import re - -from ..output_parser import Message, Event, BaseParser - -LOG = logging.getLogger('ReportConverter') - - -class ClangTidyParser(BaseParser): - """ Parser for clang-tidy console output. """ - - def __init__(self): - super(ClangTidyParser, self).__init__() - - # Regex for parsing a clang-tidy message. - self.message_line_re = re.compile( - # File path followed by a ':'. - r'^(?P[\S ]+?):' - # Line number followed by a ':'. - r'(?P\d+?):' - # Column number followed by a ':' and a space. - r'(?P\d+?): ' - # Severity followed by a ':'. - r'(?P(error|warning)):' - # Checker message. - r'(?P[\S \t]+)\s*' - # Checker name. - r'\[(?P.*)\]') - - # Matches a note. - self.note_line_re = re.compile( - # File path followed by a ':'. - r'^(?P[\S ]+?):' - # Line number followed by a ':'. - r'(?P\d+?):' - # Column number followed by a ':' and a space. - r'(?P\d+?): ' - # Severity == note. - r'note:' - # Checker message. - r'(?P.*)') - - def parse_message(self, it, line): - """Parse the given line. - - Returns a (message, next_line) pair or throws a StopIteration. - The message could be None. - """ - match = self.message_line_re.match(line) - if match is None: - return None, next(it) - - message = Message( - os.path.abspath(match.group('path')), - int(match.group('line')), - int(match.group('column')), - match.group('message').strip(), - match.group('checker').strip()) - - try: - line = next(it) - line = self._parse_code(message, it, line) - line = self._parse_fixits(message, it, line) - line = self._parse_notes(message, it, line) - - return message, line - except StopIteration: - return message, '' - - def _parse_code(self, message, it, line): - # Eat code line. - if self.note_line_re.match(line) or self.message_line_re.match(line): - LOG.debug("Unexpected line: %s. Expected a code line!", line) - return line - - # Eat arrow line. - # FIXME: range support? - line = next(it) - if '^' not in line: - LOG.debug("Unexpected line: %s. Expected an arrow line!", line) - return line - - return next(it) - - def _parse_fixits(self, message, it, line): - """ Parses fixit messages. """ - - while self.message_line_re.match(line) is None and \ - self.note_line_re.match(line) is None: - message_text = line.strip() - - if message_text != '': - message.fixits.append(Event(message.path, message.line, - line.find(message_text) + 1, - message_text)) - line = next(it) - return line - - def _parse_notes(self, message, it, line): - """ Parses note messages. """ - - while self.message_line_re.match(line) is None: - match = self.note_line_re.match(line) - if match is None: - LOG.debug("Unexpected line: %s", line) - return next(it) - - message.events.append(Event(os.path.abspath(match.group('path')), - int(match.group('line')), - int(match.group('column')), - match.group('message').strip())) - line = next(it) - line = self._parse_code(message, it, line) - return line diff --git a/tools/report-converter/codechecker_report_converter/cli.py b/tools/report-converter/codechecker_report_converter/cli.py index 74c7e35ec3..9d26ba0b3c 100755 --- a/tools/report-converter/codechecker_report_converter/cli.py +++ b/tools/report-converter/codechecker_report_converter/cli.py @@ -9,11 +9,16 @@ import argparse +import glob +import importlib import logging import os import shutil import sys +from typing import Dict, Optional, Tuple + + # If we run this script in an environment where 'codechecker_report_converter' # module is not available we should add the grandparent directory of this file # to the system path. @@ -22,53 +27,14 @@ # dependencies. if __name__ == '__main__': current_dir = os.path.dirname(os.path.realpath(__file__)) - os.sys.path.insert(0, os.path.dirname(current_dir)) - -from codechecker_report_converter.clang_tidy.analyzer_result import \ - ClangTidyAnalyzerResult # noqa -from codechecker_report_converter.cppcheck.analyzer_result import \ - CppcheckAnalyzerResult # noqa -from codechecker_report_converter.infer.analyzer_result import \ - InferAnalyzerResult # noqa -from codechecker_report_converter.sanitizers.address.analyzer_result import \ - ASANAnalyzerResult # noqa -from codechecker_report_converter.sanitizers.memory.analyzer_result import \ - MSANAnalyzerResult # noqa -from codechecker_report_converter.sanitizers.thread.analyzer_result import \ - TSANAnalyzerResult # noqa -from codechecker_report_converter.sanitizers.ub.analyzer_result import \ - UBSANAnalyzerResult # noqa -from codechecker_report_converter.spotbugs.analyzer_result import \ - SpotBugsAnalyzerResult # noqa -from codechecker_report_converter.eslint.analyzer_result import \ - ESLintAnalyzerResult # noqa -from codechecker_report_converter.pylint.analyzer_result import \ - PylintAnalyzerResult # noqa -from codechecker_report_converter.tslint.analyzer_result import \ - TSLintAnalyzerResult # noqa -from codechecker_report_converter.golint.analyzer_result import \ - GolintAnalyzerResult # noqa -from codechecker_report_converter.pyflakes.analyzer_result import \ - PyflakesAnalyzerResult # noqa -from codechecker_report_converter.markdownlint.analyzer_result import \ - MarkdownlintAnalyzerResult # noqa -from codechecker_report_converter.coccinelle.analyzer_result import \ - CoccinelleAnalyzerResult # noqa -from codechecker_report_converter.smatch.analyzer_result import \ - SmatchAnalyzerResult # noqa -from codechecker_report_converter.kerneldoc.analyzer_result import \ - KernelDocAnalyzerResult # noqa -from codechecker_report_converter.sphinx.analyzer_result import \ - SphinxAnalyzerResult # noqa -from codechecker_report_converter.sparse.analyzer_result import \ - SparseAnalyzerResult # noqa -from codechecker_report_converter.cpplint.analyzer_result import \ - CpplintAnalyzerResult # noqa -from codechecker_report_converter.sanitizers.leak.analyzer_result import \ - LSANAnalyzerResult # noqa - - -LOG = logging.getLogger('ReportConverter') + sys.path.insert(0, os.path.dirname(current_dir)) + + +from codechecker_report_converter.report.report_file import \ + SUPPORTED_ANALYZER_EXTENSIONS +from codechecker_report_converter.report.parser import plist + +LOG = logging.getLogger('report-converter') msg_formatter = logging.Formatter('[%(levelname)s] - %(message)s') log_handler = logging.StreamHandler(sys.stdout) @@ -86,35 +52,41 @@ class RawDescriptionDefaultHelpFormatter( pass -supported_converters = { - ClangTidyAnalyzerResult.TOOL_NAME: ClangTidyAnalyzerResult, - CppcheckAnalyzerResult.TOOL_NAME: CppcheckAnalyzerResult, - InferAnalyzerResult.TOOL_NAME: InferAnalyzerResult, - GolintAnalyzerResult.TOOL_NAME: GolintAnalyzerResult, - ASANAnalyzerResult.TOOL_NAME: ASANAnalyzerResult, - ESLintAnalyzerResult.TOOL_NAME: ESLintAnalyzerResult, - MSANAnalyzerResult.TOOL_NAME: MSANAnalyzerResult, - PylintAnalyzerResult.TOOL_NAME: PylintAnalyzerResult, - PyflakesAnalyzerResult.TOOL_NAME: PyflakesAnalyzerResult, - TSANAnalyzerResult.TOOL_NAME: TSANAnalyzerResult, - TSLintAnalyzerResult.TOOL_NAME: TSLintAnalyzerResult, - UBSANAnalyzerResult.TOOL_NAME: UBSANAnalyzerResult, - SpotBugsAnalyzerResult.TOOL_NAME: SpotBugsAnalyzerResult, - MarkdownlintAnalyzerResult.TOOL_NAME: MarkdownlintAnalyzerResult, - CoccinelleAnalyzerResult.TOOL_NAME: CoccinelleAnalyzerResult, - SmatchAnalyzerResult.TOOL_NAME: SmatchAnalyzerResult, - KernelDocAnalyzerResult.TOOL_NAME: KernelDocAnalyzerResult, - SphinxAnalyzerResult.TOOL_NAME: SphinxAnalyzerResult, - SparseAnalyzerResult.TOOL_NAME: SparseAnalyzerResult, - CpplintAnalyzerResult.TOOL_NAME: CpplintAnalyzerResult, - LSANAnalyzerResult.TOOL_NAME: LSANAnalyzerResult -} +# Load supported converters dynamically. +supported_converters = {} +analyzers_dir_path = os.path.join(os.path.dirname( + os.path.realpath(__file__)), "analyzers") + +analyzers = sorted(glob.glob(os.path.join( + analyzers_dir_path, '**', 'analyzer_result.py'), recursive=True)) +for analyzer_path in analyzers: + analyzer_module = '.'.join(os.path.relpath( + os.path.splitext(analyzer_path)[0], + analyzers_dir_path).split(os.path.sep)) + module_name = f"codechecker_report_converter.analyzers.{analyzer_module}" + + try: + module = importlib.import_module(module_name) + + if hasattr(module, "AnalyzerResult"): + analyzer_result = getattr(module, "AnalyzerResult") + supported_converters[analyzer_result.TOOL_NAME] = analyzer_result + except ModuleNotFoundError: + pass + supported_metadata_keys = ["analyzer_command", "analyzer_version"] -def output_to_plist(analyzer_result, parser_type, output_dir, file_name, - clean=False, metadata=None): +def transform_output( + analyzer_result: str, + parser_type: str, + output_dir: str, + file_name: str, + export_type: str, + clean: bool = False, + metadata: Optional[Dict[str, str]] = None +): """ Creates .plist files from the given output to the given output dir. """ if clean and os.path.isdir(output_dir): LOG.info("Previous analysis results in '%s' have been removed, " @@ -125,10 +97,11 @@ def output_to_plist(analyzer_result, parser_type, output_dir, file_name, os.makedirs(output_dir) parser = supported_converters[parser_type]() - parser.transform(analyzer_result, output_dir, file_name, metadata) + parser.transform( + analyzer_result, output_dir, export_type, file_name, metadata) -def process_metadata(metadata): +def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]: """ Returns a tuple of valid and invalid metadata values. """ if not metadata: return {}, {} @@ -172,6 +145,17 @@ def __add_arguments_to_parser(parser): "Currently supported output types are: " + ', '.join(sorted(supported_converters)) + ".") + parser.add_argument('-e', '--export', + type=str, + dest='export', + metavar='EXPORT', + choices=SUPPORTED_ANALYZER_EXTENSIONS, + default=plist.EXTENSION, + help="Specify the export format of the converted " + "reports. Currently supported export types " + "are: " + ', '.join(sorted( + SUPPORTED_ANALYZER_EXTENSIONS)) + ".") + parser.add_argument('--meta', nargs='*', dest='meta', @@ -248,8 +232,9 @@ def main(): ', '.join(supported_metadata_keys)) sys.exit(1) - return output_to_plist(args.input, args.type, args.output_dir, - args.filename, args.clean, valid_metadata_values) + return transform_output( + args.input, args.type, args.output_dir, args.filename, args.export, + args.clean, valid_metadata_values) if __name__ == "__main__": diff --git a/tools/report-converter/codechecker_report_converter/coccinelle/analyzer_result.py b/tools/report-converter/codechecker_report_converter/coccinelle/analyzer_result.py deleted file mode 100644 index a729ea4072..0000000000 --- a/tools/report-converter/codechecker_report_converter/coccinelle/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import CoccinelleParser -from ..plist_converter import PlistConverter - - -class CoccinelleAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Coccinelle. """ - - TOOL_NAME = 'coccinelle' - NAME = 'Coccinelle' - URL = 'https://github.com/coccinelle/coccinelle' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = CoccinelleParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/cppcheck/analyzer_result.py b/tools/report-converter/codechecker_report_converter/cppcheck/analyzer_result.py deleted file mode 100644 index a015012746..0000000000 --- a/tools/report-converter/codechecker_report_converter/cppcheck/analyzer_result.py +++ /dev/null @@ -1,109 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import glob -import logging -import os -import plistlib - -from xml.parsers.expat import ExpatError - -from codechecker_report_hash.hash import get_report_hash, HashType - -from codechecker_report_converter.analyzer_result import AnalyzerResult - - -LOG = logging.getLogger('ReportConverter') - - -class CppcheckAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Cppcheck. """ - - TOOL_NAME = 'cppcheck' - NAME = 'Cppcheck' - URL = 'http://cppcheck.sourceforge.net' - - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - plist_files = [] - if os.path.isdir(analyzer_result): - plist_files = glob.glob(os.path.join(analyzer_result, "*.plist")) - elif os.path.isfile(analyzer_result) and \ - analyzer_result.endswith(".plist"): - plist_files = [analyzer_result] - else: - LOG.error("The given input should be an existing CppCheck result " - "directory or a plist file.") - return None - - file_to_plist_data = {} - for f in plist_files: - plist_file = os.path.basename(f) - file_name = '{0}_{1}.plist'.format(os.path.splitext(plist_file)[0], - self.TOOL_NAME) - - with open(f, 'rb') as plist_file: - try: - file_to_plist_data[file_name] = plistlib.load(plist_file) - except ExpatError: - LOG.error("Failed to parse '%s'! Skipping...", file_name) - - return file_to_plist_data - - def _post_process_result(self, file_to_plist_data): - """ Post process the parsed result. - - By default it will add report hashes and metada information for the - diagnostics. - """ - for file_name, plist_data in file_to_plist_data.items(): - try: - self._add_report_hash(plist_data) - self._add_metadata(plist_data) - except IndexError: - LOG.warning("Failed to update '%s' while generating a report " - "hash! Skipping...", file_name) - file_to_plist_data[file_name] = None - - def _add_report_hash(self, plist_data): - """ Generate report hash for the given plist data - - It will generate a context free hash for each diagnostics. - """ - files = plist_data['files'] - for diag in plist_data['diagnostics']: - report_hash = diag.get('issue_hash_content_of_line_in_context') - if not report_hash or report_hash == '0': - report_hash = get_report_hash( - diag, files[diag['location']['file']], - HashType.CONTEXT_FREE) - - diag['issue_hash_content_of_line_in_context'] = report_hash - - def _write(self, file_to_plist_data, output_dir, file_name): - """ Creates plist files from the parse result to the given output. """ - output_dir = os.path.abspath(output_dir) - for file_name, plist_data in file_to_plist_data.items(): - if not plist_data: - continue - - out_file = os.path.join(output_dir, file_name) - - LOG.info("Modify plist file: '%s'.", out_file) - LOG.debug(plist_data) - - try: - with open(out_file, 'wb') as plist_file: - plistlib.dump(plist_data, plist_file) - except TypeError as err: - LOG.error('Failed to write plist file: %s', out_file) - LOG.error(err) diff --git a/tools/report-converter/codechecker_report_converter/cpplint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/cpplint/analyzer_result.py deleted file mode 100644 index 4cd71691e1..0000000000 --- a/tools/report-converter/codechecker_report_converter/cpplint/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import CpplintParser -from ..plist_converter import PlistConverter - - -class CpplintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of cpplint. """ - - TOOL_NAME = 'cpplint' - NAME = 'cpplint' - URL = 'https://github.com/cpplint/cpplint' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = CpplintParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/eslint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/eslint/analyzer_result.py deleted file mode 100644 index dc558ede49..0000000000 --- a/tools/report-converter/codechecker_report_converter/eslint/analyzer_result.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import ESLintParser -from ..plist_converter import PlistConverter - - -LOG = logging.getLogger('ReportConverter') - - -class ESLintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of the ESLint analyzer. """ - - TOOL_NAME = 'eslint' - NAME = 'ESLint' - URL = 'https://eslint.org/' - - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - parser = ESLintParser() - messages = parser.parse_messages(analyzer_result) - if not messages: - return - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/eslint/output_parser.py b/tools/report-converter/codechecker_report_converter/eslint/output_parser.py deleted file mode 100644 index 1f20e94e41..0000000000 --- a/tools/report-converter/codechecker_report_converter/eslint/output_parser.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging -import os -import json - -from ..output_parser import Message, BaseParser - -LOG = logging.getLogger('ReportConverter') - - -class ESLintParser(BaseParser): - """ Parser for ESLint output. """ - - def parse_messages(self, analyzer_result): - """ Parse the given analyzer result. """ - if not os.path.exists(analyzer_result): - LOG.error("Report file does not exist: %s", analyzer_result) - return - - try: - with open(analyzer_result, 'r', - encoding="utf-8", errors="ignore") as report_f: - diagnostics = json.load(report_f) - except (IOError, json.decoder.JSONDecodeError): - LOG.error("Failed to parse the given analyzer result '%s'. Please " - "give a valid json file generated by ESLint.", - analyzer_result) - return - - for diag in diagnostics: - file_path = os.path.join(os.path.dirname(analyzer_result), - diag.get('filePath')) - if not os.path.exists(file_path): - LOG.warning("Source file does not exists: %s", file_path) - continue - - for report in diag.get('messages', []): - message = self.__parse_report(report, file_path) - if message: - self.messages.append(message) - - return self.messages - - def __parse_report(self, bug, file_path): - """ Parse the given report and create a message from them. """ - checker_name = bug['ruleId'] - - message = bug['message'] - line = int(bug['line']) - col = int(bug['column']) - - return Message(file_path, line, col, message, checker_name) diff --git a/tools/report-converter/codechecker_report_converter/golint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/golint/analyzer_result.py deleted file mode 100644 index 4d3bbcf880..0000000000 --- a/tools/report-converter/codechecker_report_converter/golint/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import GolintParser -from ..plist_converter import PlistConverter - - -class GolintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Golint. """ - - TOOL_NAME = 'golint' - NAME = 'Golint' - URL = 'https://github.com/golang/lint' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = GolintParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/infer/analyzer_result.py b/tools/report-converter/codechecker_report_converter/infer/analyzer_result.py deleted file mode 100644 index b2cdeab493..0000000000 --- a/tools/report-converter/codechecker_report_converter/infer/analyzer_result.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import InferParser -from .plist_converter import InferPlistConverter - - -LOG = logging.getLogger('ReportConverter') - - -class InferAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of the FB Infer. """ - - TOOL_NAME = 'fbinfer' - NAME = 'Facebook Infer' - URL = 'https://fbinfer.com' - - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - parser = InferParser() - messages = parser.parse_messages(analyzer_result) - if not messages: - return - - plist_converter = InferPlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/infer/output_parser.py b/tools/report-converter/codechecker_report_converter/infer/output_parser.py deleted file mode 100644 index f512877104..0000000000 --- a/tools/report-converter/codechecker_report_converter/infer/output_parser.py +++ /dev/null @@ -1,128 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging -import os -import json - -from ..output_parser import Message, Event, BaseParser - -LOG = logging.getLogger('ReportConverter') - - -class InferMessage(Message): - """ Represents a message with an optional event, fixit and note messages. - - This will be a diagnostic section in the plist which represents a report. - """ - - def __init__(self, path, line, column, message, checker, report_hash, - events=None, notes=None, fixits=None): - super(InferMessage, self).__init__(path, line, column, message, - checker, events, notes, fixits) - self.report_hash = report_hash - - def __eq__(self, other): - return super(InferMessage, self).__eq__(other) and \ - self.report_hash == other.report_hash - - def __str__(self): - return '%s, report_hash=%s' % \ - (super(InferMessage, self).__str__(), self.report_hash) - - -class InferParser(BaseParser): - """ Parser for Infer output. """ - - def __init__(self): - super(InferParser, self).__init__() - self.infer_out_parent_dir = None - - def parse_messages(self, analyzer_result): - """ Parse the given analyzer result. """ - if os.path.isdir(analyzer_result): - report_file = os.path.join(analyzer_result, "report.json") - self.infer_out_parent_dir = os.path.dirname(analyzer_result) - else: - report_file = analyzer_result - self.infer_out_parent_dir = os.path.dirname( - os.path.dirname(analyzer_result)) - - if not os.path.exists(report_file): - LOG.error("Report file does not exist: %s", report_file) - return - - try: - with open(report_file, 'r', - encoding="utf-8", errors="ignore") as report_f: - reports = json.load(report_f) - except IOError: - LOG.error("Failed to parse the given analyzer result '%s'. Please " - "give a infer output directory which contains a valid " - "'report.json' file.", analyzer_result) - return - - for report in reports: - message = self.__parse_report(report) - if message: - self.messages.append(message) - - return self.messages - - def __get_abs_path(self, source_path): - """ Returns full path of the given source path. - It will try to find the given source path relative to the given - analyzer report directory (infer-out). - """ - if os.path.exists(source_path): - return source_path - - full_path = os.path.join(self.infer_out_parent_dir, source_path) - if os.path.exists(full_path): - return full_path - - LOG.warning("No source file found: %s", source_path) - - def __parse_report(self, bug): - """ Parse the given report and create a message from them. """ - report_hash = bug['hash'] - checker_name = bug['bug_type'] - - message = bug['qualifier'] - line = int(bug['line']) - col = int(bug['column']) - if col < 0: - col = 0 - - source_path = self.__get_abs_path(bug['file']) - if not source_path: - return - - events = [] - for bug_trace in bug['bug_trace']: - event = self.__parse_bug_trace(bug_trace) - - if event: - events.append(event) - - return InferMessage(source_path, line, col, message, checker_name, - report_hash, events) - - def __parse_bug_trace(self, bug_trace): - """ Creates event from a bug trace element. """ - source_path = self.__get_abs_path(bug_trace['filename']) - if not source_path: - return - - message = bug_trace['description'] - line = int(bug_trace['line_number']) - col = int(bug_trace['column_number']) - if col < 0: - col = 0 - - return Event(source_path, line, col, message) diff --git a/tools/report-converter/codechecker_report_converter/infer/plist_converter.py b/tools/report-converter/codechecker_report_converter/infer/plist_converter.py deleted file mode 100644 index 65dc461ffd..0000000000 --- a/tools/report-converter/codechecker_report_converter/infer/plist_converter.py +++ /dev/null @@ -1,22 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from ..plist_converter import PlistConverter - - -class InferPlistConverter(PlistConverter): - """ Infer plist converter. """ - - def _create_diag(self, message, files): - """ Creates a new plist diagnostic from the given message. """ - diag = super(InferPlistConverter, self) \ - ._create_diag(message, files) - diag['orig_issue_hash_content_of_line_in_context'] = \ - message.report_hash - - return diag diff --git a/tools/report-converter/codechecker_report_converter/kerneldoc/analyzer_result.py b/tools/report-converter/codechecker_report_converter/kerneldoc/analyzer_result.py deleted file mode 100644 index 80a56d3f11..0000000000 --- a/tools/report-converter/codechecker_report_converter/kerneldoc/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import KernelDocParser -from ..plist_converter import PlistConverter - - -class KernelDocAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of kernel-docs. """ - - TOOL_NAME = 'kernel-doc' - NAME = 'Kernel-Doc' - URL = 'https://github.com/torvalds/linux/blob/master/scripts/kernel-doc' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = KernelDocParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/markdownlint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/markdownlint/analyzer_result.py deleted file mode 100644 index b48f39a9b5..0000000000 --- a/tools/report-converter/codechecker_report_converter/markdownlint/analyzer_result.py +++ /dev/null @@ -1,34 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import MarkdownlintParser -from ..plist_converter import PlistConverter - - -class MarkdownlintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Markdownlint. """ - - TOOL_NAME = 'mdl' - NAME = 'Markdownlint' - URL = 'https://github.com/markdownlint/markdownlint' - - def parse(self, analyzer_result): - """ Creates plist data from the given analyzer results. """ - parser = MarkdownlintParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/output_parser.py b/tools/report-converter/codechecker_report_converter/output_parser.py deleted file mode 100644 index d7555e222d..0000000000 --- a/tools/report-converter/codechecker_report_converter/output_parser.py +++ /dev/null @@ -1,97 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from abc import ABCMeta - - -def get_next(it): - """ Returns the next item from the iterator or return an empty string. """ - try: - return next(it) - except StopIteration: - return '' - - -class Event: - """ Represents an event message. """ - - def __init__(self, path, line, column, message): - self.path = path - self.line = line - self.column = column - self.message = message - - def __eq__(self, other): - return self.path == other.path and \ - self.line == other.line and \ - self.column == other.column and \ - self.message == other.message - - def __str__(self): - return 'path={0}, line={1}, column={2}, message={3}'.format( - self.path, self.line, self.column, self.message) - - -class Message(Event): - """ Represents a message with an optional event, fixit and note messages. - - This will be a diagnostic section in the plist which represents a report. - """ - - def __init__(self, path, line, column, message, checker, events=None, - notes=None, fixits=None): - super(Message, self).__init__(path, line, column, message) - self.checker = checker - self.events = events if events else [] - self.notes = notes if notes else [] - self.fixits = fixits if fixits else [] - - def __eq__(self, other): - return super(Message, self).__eq__(other) and \ - self.checker == other.checker and \ - self.events == other.events and \ - self.notes == other.notes and \ - self.fixits == other.fixits - - def __str__(self): - return '%s, checker=%s, events=%s, notes=%s, fixits=%s' % \ - (super(Message, self).__str__(), self.checker, - [str(event) for event in self.events], - [str(note) for note in self.notes], - [str(fixit) for fixit in self.fixits]) - - -class BaseParser(metaclass=ABCMeta): - """ Warning message parser. """ - - def __init__(self): - self.messages = [] - - def parse_messages_from_file(self, path): - """ Parse output dump (redirected output). """ - with open(path, 'r', encoding="utf-8", errors="ignore") as file: - return self.parse_messages(file) - - def parse_messages(self, lines): - """ Parse the given output. """ - it = iter(lines) - try: - next_line = next(it) - while True: - message, next_line = self.parse_message(it, next_line) - if message: - self.messages.append(message) - except StopIteration: - pass - - return self.messages - - def parse_message(self, it, line): - """ Parse the given line. """ - raise NotImplementedError("Subclasses should implement this!") diff --git a/tools/report-converter/codechecker_report_converter/plist_converter.py b/tools/report-converter/codechecker_report_converter/plist_converter.py deleted file mode 100644 index 9b56b5175b..0000000000 --- a/tools/report-converter/codechecker_report_converter/plist_converter.py +++ /dev/null @@ -1,157 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from abc import ABCMeta -import copy -import json - - -class PlistConverter(metaclass=ABCMeta): - """ Warning messages to plist converter. """ - - def __init__(self, tool_name): - self.tool_name = tool_name - self.path_to_plist = {} - - def get_plist_results(self): - """ Returns a list of plist results. """ - return list(self.path_to_plist.values()) - - def add_messages(self, messages): - """ Adds the given messages to the plist. """ - self._add_diagnostics(messages) - - def _create_location(self, msg, fmap): - """ Create a location section from the message. """ - return {'line': msg.line, - 'col': msg.column, - 'file': fmap[msg.path]} - - def _create_event(self, msg, fmap): - """ Create an event from the given message. """ - return {'kind': 'event', - 'location': self._create_location(msg, fmap), - 'depth': 0, - 'message': msg.message} - - def _create_note(self, msg, fmap): - """ Create a note from the given message. """ - return {'kind': 'note', - 'location': self._create_location(msg, fmap), - 'depth': 0, - 'message': msg.message} - - def _create_edge(self, start_msg, end_msg, fmap): - """ Create an edge between the start and end messages. """ - start_loc = self._create_location(start_msg, fmap) - end_loc = self._create_location(end_msg, fmap) - return {'start': [start_loc, start_loc], - 'end': [end_loc, end_loc]} - - def _add_diagnostics(self, messages): - """ Adds the messages to the plist as diagnostics. """ - self._add_files_from_messages(messages) - for message in messages: - plist_data = self.path_to_plist[message.path] - diag = self._create_diag(message, plist_data['files']) - plist_data['diagnostics'].append(diag) - - def _add_files_from_message(self, message, plist_data): - """ Add new file from the given message. """ - try: - plist_data['files'].index(message.path) - except ValueError: - plist_data['files'].append(message.path) - - def _add_files_from_messages(self, messages): - """ Add new file from the given messages. - - Adds the new files from the given message array to the plist's "files" - key, and returns a path to file index dictionary. - """ - for message in messages: - if message.path not in self.path_to_plist: - self.path_to_plist[message.path] = { - 'files': [], - 'diagnostics': []} - - plist_data = self.path_to_plist[message.path] - - self._add_files_from_message(message, plist_data) - - # Collect file paths from the events. - for nt in message.events: - self._add_files_from_message(nt, plist_data) - - def _get_checker_category(self, checker): - """ Returns the check's category.""" - return 'unknown' - - def _get_analyzer_type(self): - """ Returns the analyzer type. """ - return self.tool_name - - def _create_diag(self, message, files): - """ Creates a new plist diagnostic from the given message. """ - fmap = {files[i]: i for i in range(0, len(files))} - checker_name = message.checker if message.checker else self.tool_name - diag = {'location': self._create_location(message, fmap), - 'check_name': checker_name, - 'description': message.message, - 'category': self._get_checker_category(message.checker), - 'type': self._get_analyzer_type(), - 'path': []} - - self.__add_fixits(diag, message, fmap) - self.__add_events(diag, message, fmap) - self.__add_notes(diag, message, fmap) - - # The original message should be the last part of the path. This is - # displayed by quick check, and this is the main event displayed by - # the web interface. FIXME: notes and fixits should not be events. - diag['path'].append(self._create_event(message, fmap)) - - return diag - - def __add_fixits(self, diag, message, fmap): - """ Adds fixits as events to the diagnostics. """ - for fixit in message.fixits: - mf = copy.deepcopy(fixit) - mf.message = '%s (fixit)' % fixit.message - diag['path'].append(self._create_event(mf, fmap)) - - def __add_notes(self, diag, message, fmap): - """ Adds notes to the diagnostics. """ - if not message.notes: - return - - diag['notes'] = [self._create_note(n, fmap) for n in message.notes] - - def __add_events(self, diag, message, fmap): - """ Adds events to the diagnostics. - - It also creates edges between the events. - """ - edges = [] - last = None - for event in message.events: - if last is not None: - edges.append(self._create_edge(last, event, fmap)) - - diag['path'].append(self._create_event(event, fmap)) - last = event - - # Add control items only if there is any. - if edges: - diag['path'].append({'kind': 'control', 'edges': edges}) - - def __str__(self): - return str(json.dumps(self.path_to_plist, - indent=4, - separators=(',', ': '))) diff --git a/tools/report-converter/codechecker_report_converter/pyflakes/analyzer_result.py b/tools/report-converter/codechecker_report_converter/pyflakes/analyzer_result.py deleted file mode 100644 index 52adfe8e13..0000000000 --- a/tools/report-converter/codechecker_report_converter/pyflakes/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import PyflakesParser -from ..plist_converter import PlistConverter - - -class PyflakesAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Pyflakes. """ - - TOOL_NAME = 'pyflakes' - NAME = 'Pyflakes' - URL = 'https://github.com/PyCQA/pyflakes' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = PyflakesParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/pylint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/pylint/analyzer_result.py deleted file mode 100644 index ab8ecb942f..0000000000 --- a/tools/report-converter/codechecker_report_converter/pylint/analyzer_result.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import PylintParser -from ..plist_converter import PlistConverter - - -LOG = logging.getLogger('ReportConverter') - - -class PylintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of the Pylint analyzer. """ - - TOOL_NAME = 'pylint' - NAME = 'Pylint' - URL = 'https://www.pylint.org' - - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - parser = PylintParser() - messages = parser.parse_messages(analyzer_result) - if not messages: - return - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/pylint/output_parser.py b/tools/report-converter/codechecker_report_converter/pylint/output_parser.py deleted file mode 100644 index bc776f1e5e..0000000000 --- a/tools/report-converter/codechecker_report_converter/pylint/output_parser.py +++ /dev/null @@ -1,57 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging -import os -import json - -from ..output_parser import Message, BaseParser - -LOG = logging.getLogger('ReportConverter') - - -class PylintParser(BaseParser): - """ Parser for Pylint output. """ - - def parse_messages(self, analyzer_result): - """ Parse the given analyzer result. """ - if not os.path.exists(analyzer_result): - LOG.error("Report file does not exist: %s", analyzer_result) - return - - try: - with open(analyzer_result, 'r', - encoding="utf-8", errors="ignore") as report_f: - reports = json.load(report_f) - except (IOError, json.decoder.JSONDecodeError): - LOG.error("Failed to parse the given analyzer result '%s'. Please " - "give a valid json file generated by Pylint.", - analyzer_result) - return - - for report in reports: - file_path = os.path.join(os.path.dirname(analyzer_result), - report.get('path')) - if not os.path.exists(file_path): - LOG.warning("Source file does not exists: %s", file_path) - continue - - message = self.__parse_report(report, file_path) - if message: - self.messages.append(message) - - return self.messages - - def __parse_report(self, bug, file_path): - """ Parse the given report and create a message from them. """ - checker_name = bug['symbol'] - message = bug['message'] - line = int(bug['line']) - col = int(bug['column']) - - return Message(file_path, line, col, message, checker_name) diff --git a/tools/report-converter/codechecker_report_converter/report.py b/tools/report-converter/codechecker_report_converter/report.py deleted file mode 100644 index a46c0d891e..0000000000 --- a/tools/report-converter/codechecker_report_converter/report.py +++ /dev/null @@ -1,72 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" -Parsers for the analyzer output formats (plist ...) should create this -Report which will be stored. - -Multiple bug identification hash-es can be generated. -All hash generation algorithms should be documented and implemented here. -""" - -import logging - - -LOG = logging.getLogger('ReportConverter') - - -def get_line(file_name, line_no, errors='ignore'): - """ Return the given line from the file. - If line_no is larger than the number of lines in the file then empty - string returns. If the file can't be opened for read, the function also - returns empty string. - - Try to encode every file as utf-8 to read the line content do not depend - on the platform settings. By default locale.getpreferredencoding() is used - which depends on the platform. - - Changing the encoding error handling can influence the hash content! - """ - try: - with open(file_name, mode='r', - encoding='utf-8', - errors=errors) as source_file: - for line in source_file: - line_no -= 1 - if line_no == 0: - return line - return '' - except IOError: - LOG.error("Failed to open file %s", file_name) - return '' - - -def remove_whitespace(line_content, old_col): - """ Removes white spaces from the given line content. - - This function removes white spaces from the line content parameter and - calculates the new line location. - Returns the line content without white spaces and the new column number. - E.g.: - line_content = " int foo = 17; sizeof(43); " - ^ - |- bug_col = 18 - content_begin = " int foo = 17; " - content_begin_strip = "intfoo=17;" - line_strip_len = 18 - 10 => 8 - ''.join(line_content.split()) => "intfoo=17;sizeof(43);" - ^ - |- until_col - line_strip_len - 18 - 8 - = 10 - """ - content_begin = line_content[:old_col] - content_begin_strip = ''.join(content_begin.split()) - line_strip_len = len(content_begin) - len(content_begin_strip) - - return ''.join(line_content.split()), \ - old_col - line_strip_len diff --git a/tools/report-converter/codechecker_report_converter/report/__init__.py b/tools/report-converter/codechecker_report_converter/report/__init__.py new file mode 100644 index 0000000000..5648bdce17 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/__init__.py @@ -0,0 +1,549 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import builtins +import itertools +import json +import logging +import os + +from typing import Callable, Dict, Iterable, List, Optional, Set + +from .. import util +from ..source_code_comment_handler import SourceCodeCommentHandler, \ + SourceCodeComments, SpellException + + +LOG = logging.getLogger('report-converter') + + +SkipListHandler = Callable[[str], bool] + + +InvalidFileContentMsg: str = \ + "WARNING: source file content is changed or missing. Please re-analyze " \ + "your project to update the reports." + + +class File: + def __init__( + self, + file_path: str, + file_id: Optional[str] = None, + content: Optional[str] = None + ): + self.__id = file_path if file_id is None else file_id + self.__path = file_path + self.__original_path = file_path + self.__content = content + self.__name: Optional[str] = None + + @property + def id(self) -> str: + """ Get unique id. """ + return self.__id + + @property + def path(self) -> str: + """ + If the 'trim' member function is called it will return the trimmed + version of the file path otherwise it will return the original + file path. + """ + return self.__path + + @property + def original_path(self) -> str: + """ Always returns the original file path. """ + return self.__original_path + + @property + def name(self) -> str: + """ Returns the file name. """ + if self.__name is None: + self.__name = os.path.basename(self.__original_path) + + return self.__name + + @property + def content(self) -> str: + """ Get file content. """ + if self.__content is None: + with open(self.original_path, 'r', + encoding='utf-8', errors='replace') as f: + self.__content = f.read() + + return self.__content + + @content.setter + def content(self, content: str): + """ Sets the file content manually if it's not set yet. """ + if self.__content is None: + self.__content = content + + def get_line(self, line: int) -> str: + return util.get_line(self.original_path, line) + + def trim(self, path_prefixes: Optional[List[str]] = None) -> str: + """ Removes the longest matching leading path from the file paths. """ + self.__path = util.trim_path_prefixes( + self.__path, path_prefixes) + return self.__path + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "id": self.id, + "path": self.path, + "original_path": self.original_path} + + def __eq__(self, other) -> bool: + if isinstance(other, File): + return self.id == other.id + + if isinstance(other, str): + return self.id == other + + raise NotImplementedError( + f"Comparison File object with '{type(other)}' is not supported") + + def __hash__(self) -> int: + return builtins.hash(self.id) + + def __repr__(self): + return self.to_json() + + +def get_or_create_file( + file_path: str, + file_cache: Dict[str, File] +) -> File: + """ Get File object for the given file path. """ + if file_path not in file_cache: + file_cache[file_path] = File(file_path) + + return file_cache[file_path] + + +class Range: + def __init__( + self, + start_line: int, + start_col: int, + end_line: int, + end_col: int + ): + self.start_line = start_line + self.start_col = start_col + self.end_line = end_line + self.end_col = end_col + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "start_line": self.start_line, + "start_col": self.start_col, + "end_line": self.end_line, + "end_col": self.end_col} + + def __eq__(self, other): + if isinstance(other, Range): + return self.start_line == other.start_line and \ + self.start_col == other.start_col and \ + self.end_line == other.end_line and \ + self.end_col == other.end_col + + raise NotImplementedError( + f"Comparison Range object with '{type(other)}' is not supported") + + def __repr__(self): + return json.dumps(self.to_json()) + + +class BugPathPosition: + def __init__( + self, + file: File, + range: Optional[Range] + ): + self.file = file + self.range = range + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "range": self.range.to_json() if self.range else None, + "file": self.file.to_json() + } + + def __eq__(self, other): + if isinstance(other, BugPathPosition): + return self.file == other.file and \ + self.range == other.range + + raise NotImplementedError( + f"Comparison BugPathPosition object with '{type(other)}' is not " + f"supported") + + def __repr__(self): + return json.dumps(self.to_json()) + + +class BugPathEvent(BugPathPosition): + def __init__( + self, + message: str, + file: File, + line: int, + column: int, + range: Optional[Range] = None + ): + super(BugPathEvent, self).__init__(file, range) + + # Range can provide more precise location information than line and + # column. Use that instead of these fields. + self.line = line + self.column = column + + self.message = message + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "file": self.file.to_json(), + "line": self.line, + "column": self.column, + "message": self.message, + "range": self.range.to_json() if self.range else None + } + + def __eq__(self, other): + if isinstance(other, BugPathEvent): + return self.file == other.file and \ + self.line == other.line and \ + self.column == other.column and \ + self.message == other.message and \ + self.range == other.range + + raise NotImplementedError( + f"Comparison BugPathEvent object with '{type(other)}' is not " + f"supported") + + def __repr__(self): + return json.dumps(self.to_json()) + + +class MacroExpansion(BugPathEvent): + def __init__( + self, + message: str, # Expanded message. + name: str, # Macro name which will be expanded. + file: File, + line: int, + column: int, + range: Optional[Range] = None + ): + super(MacroExpansion, self).__init__( + message, file, line, column, range) + + self.name = name + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "name": self.name, + **super(MacroExpansion, self).to_json() + } + + def __repr__(self): + return json.dumps(self.to_json()) + + +class Report: + """ Represents a report object. """ + + def __init__( + self, + file: File, + line: int, + column: int, + message: str, + checker_name: str, + severity: Optional[str] = None, + report_hash: Optional[str] = None, + analyzer_name: Optional[str] = None, + category: Optional[str] = None, + type: Optional[str] = None, + analyzer_result_file_path: Optional[str] = None, + source_line: Optional[str] = None, + bug_path_events: Optional[List[BugPathEvent]] = None, + bug_path_positions: Optional[List[BugPathPosition]] = None, + notes: Optional[List[BugPathEvent]] = None, + macro_expansions: Optional[List[MacroExpansion]] = None + ): + self.analyzer_result_file_path = analyzer_result_file_path + self.file = file + self.line = line + self.column = column + self.message = message + self.checker_name = checker_name + self.severity = severity + self.report_hash = report_hash + self.analyzer_name = analyzer_name + self.category = category + self.type = type + + self.bug_path_events = bug_path_events \ + if bug_path_events is not None else \ + [BugPathEvent(self.message, self.file, self.line, self.column)] + + self.bug_path_positions = bug_path_positions \ + if bug_path_positions is not None else [] + self.notes = notes if notes is not None else [] + self.macro_expansions = macro_expansions \ + if macro_expansions is not None else [] + + self.__source_code_comments: Optional[SourceCodeComments] = None + self.__source_code_comment_warnings: List[str] = [] + self.__sc_handler = SourceCodeCommentHandler() + + self.__source_line: Optional[str] = source_line + self.__files: Optional[Set[str]] = None + self.__changed_files: Optional[Set[str]] = None + + @property + def source_line(self) -> str: + """ Get the source line for the main location. + + If the source line is already set returns that + if not tries to read it from the disk. + """ + if not self.__source_line: + if self.file.original_path in self.changed_files: + self.__source_line = InvalidFileContentMsg + else: + self.__source_line = self.file.get_line(self.line) + + return self.__source_line + + @source_line.setter + def source_line(self, source_line): + """ Sets the source line manually if it's not set yet. """ + if self.__source_line is None: + self.__source_line = source_line + + def trim_path_prefixes(self, path_prefixes: Optional[List[str]] = None): + """ Removes the longest matching leading path from the file paths. """ + self.file.trim(path_prefixes) + + for event in itertools.chain( + self.bug_path_events, + self.bug_path_positions, + self.notes, + self.macro_expansions + ): + event.file.trim(path_prefixes) + + @property + def files(self) -> Set[str]: + """ Returns all referenced file paths. """ + if self.__files is not None: + return self.__files + + self.__files = {self.file.original_path} + + for event in itertools.chain( + self.bug_path_events, + self.bug_path_positions, + self.notes, + self.macro_expansions + ): + self.__files.add(event.file.original_path) + + return self.__files + + @property + def changed_files(self) -> Set[str]: + """ + Returns set of files which are changed or not available compared to the + analyzer result file. + """ + if self.__changed_files is not None: + return self.__changed_files + + self.__changed_files = set() + + if self.analyzer_result_file_path is None: + LOG.warning("No analyzer result file path is set for report: %s", + self) + return self.__changed_files + + analyzer_result_file_mtime = util.get_last_mod_time( + self.analyzer_result_file_path) + + if analyzer_result_file_mtime is None: + # Failed to get the modification time for a file mark it as + # changed. + self.__changed_files.add(self.analyzer_result_file_path) + + for file_path in self.files: + if not os.path.exists(file_path): + self.__changed_files.add(file_path) + continue + + f_mtime = util.get_last_mod_time(file_path) + + if not f_mtime: + self.__changed_files.add(file_path) + continue + + if not analyzer_result_file_mtime or \ + f_mtime > analyzer_result_file_mtime: + self.__changed_files.add(file_path) + + return self.__changed_files + + @changed_files.setter + def changed_files(self, changed_files: Set[str]): + """ Sets the changed files list manually if it's not set yet. """ + if self.__changed_files is None: + self.__changed_files = changed_files + + def __init_source_code_comments(self): + """ + Initialize source code comments and warnings if it is not parsed yet. + """ + if self.__source_code_comments is not None: + return None + + self.__source_code_comments = [] + + if self.file.original_path in self.changed_files: + return None + + if not os.path.exists(self.file.original_path): + return None + + with open(self.file.original_path, + encoding='utf-8', errors='ignore') as f: + try: + self.__source_code_comments = \ + self.__sc_handler.filter_source_line_comments( + f, self.line, self.checker_name) + except SpellException as ex: + self.__source_code_comment_warnings.append( + f"{self.file.name} contains {str(ex)}") + + if len(self.__source_code_comments) == 1: + LOG.debug("Found source code comment for report '%s' in file " + "'%s': %s", + self.report_hash, self.file.path, + self.__source_code_comments) + elif len(self.__source_code_comments) > 1: + self.__source_code_comment_warnings.append( + f"Multiple source code comment can be found for " + f"'{self.checker_name}' checker in '{self.file.path}' at " + f"line {self.line}. This bug will not be suppressed!") + + @property + def source_code_comment_warnings(self) -> List[str]: + """ Get source code comment warnings. """ + self.__init_source_code_comments() + return self.__source_code_comment_warnings + + def dump_source_code_comment_warnings(self): + """ Dump source code comments warnings. """ + for warning in self.source_code_comment_warnings: + LOG.warning(warning) + + @property + def source_code_comments(self) -> SourceCodeComments: + """ + Get source code comments for the report. + It will read the source file only once. + """ + self.__init_source_code_comments() + + if self.__source_code_comments is None: + self.__source_code_comments = [] + + return self.__source_code_comments + + @source_code_comments.setter + def source_code_comments(self, source_code_comments: SourceCodeComments): + """ Sets the source code comments manually if it's not set yet. """ + if self.__source_code_comments is None: + self.__source_code_comments = source_code_comments + + def check_source_code_comments(self, comment_types: Iterable[str]) -> bool: + """ + True if it doesn't have a source code comment or if every comments have + specified comment types. + """ + if not self.source_code_comments: + return True + + return all(c.status in comment_types + for c in self.source_code_comments) + + @property + def review_status(self) -> str: + """ Return review status for the given report. """ + if len(self.source_code_comments) == 1: + return self.source_code_comments[0].status \ + .lower().replace('_', ' ') + + return 'unreviewed' + + def skip(self, skip_handler: Optional[SkipListHandler]) -> bool: + """ True if the report should be skipped. """ + if skip_handler: + for file_path in self.files: + if skip_handler(file_path): + return True + + return False + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "analyzer_result_file_path": self.analyzer_result_file_path, + "file": self.file.to_json(), + "line": self.line, + "column": self.column, + "message": self.message, + "checker_name": self.checker_name, + "severity": self.severity, + "report_hash": self.report_hash, + "analyzer_name": self.analyzer_name, + "category": self.category, + "type": self.type, + "source_code_comments": [ + s.to_json() for s in self.source_code_comments], + "review_status": self.review_status, + "bug_path_events": [e.to_json() for e in self.bug_path_events], + "bug_path_positions": [ + p.to_json() for p in self.bug_path_positions], + "notes": [n.to_json() for n in self.notes], + "macro_expansions": [m.to_json() for m in self.macro_expansions] + } + + def __eq__(self, other): + if isinstance(other, Report): + return self.file == other.file and \ + self.line == other.line and \ + self.column == other.column and \ + self.message == other.message and \ + self.checker_name == other.checker_name and \ + self.report_hash == other.report_hash + + raise NotImplementedError( + f"Comparison Range object with '{type(other)}' is not supported") + + def __repr__(self): + return json.dumps(self.to_json()) diff --git a/codechecker_common/tests/unit/__init__.py b/tools/report-converter/codechecker_report_converter/report/checker_labels.py similarity index 69% rename from codechecker_common/tests/unit/__init__.py rename to tools/report-converter/codechecker_report_converter/report/checker_labels.py index 91e10d9461..97831a3ba5 100644 --- a/codechecker_common/tests/unit/__init__.py +++ b/tools/report-converter/codechecker_report_converter/report/checker_labels.py @@ -5,13 +5,9 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -""" -Setup python modules for the unit tests. -""" +from typing import Any, Callable -import os -import sys -REPO_ROOT = os.path.abspath(os.environ['REPO_ROOT']) -sys.path.append(REPO_ROOT) +class CheckerLabels: + severity: Callable[[Any, str], str] diff --git a/tools/report-converter/codechecker_report_converter/report/hash.py b/tools/report-converter/codechecker_report_converter/report/hash.py new file mode 100644 index 0000000000..bc6d6dce9c --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/hash.py @@ -0,0 +1,206 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" CodeChecker hash generation algorithms. """ + +import hashlib +import logging +import os + +from enum import Enum + +from typing import List, Tuple + +from codechecker_report_converter.report import Report + +LOG = logging.getLogger('report-converter') + + +class HashType(Enum): + """ Report hash types. """ + CONTEXT_FREE = 1 + PATH_SENSITIVE = 2 + DIAGNOSTIC_MESSAGE = 3 + + +def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str: + """ Encodes the given string and generates a hash from it. """ + string_hash = string_to_hash.encode(encoding="utf-8", errors=errors) + return hashlib.md5(string_hash).hexdigest() + + +def _remove_whitespace(line_content: str, old_col: int) -> Tuple[str, int]: + """ + This function removes white spaces from the line content parameter and + calculates the new line location. + Returns the line content without white spaces and the new column number. + E.g.: + line_content = " int foo = 17; sizeof(43); " + ^ + |- bug_col = 18 + content_begin = " int foo = 17; " + content_begin_strip = "intfoo=17;" + line_strip_len = 18 - 10 => 8 + ''.join(line_content.split()) => "intfoo=17;sizeof(43);" + ^ + |- until_col - line_strip_len + 18 - 8 + = 10 + """ + content_begin = line_content[:old_col] + content_begin_strip = ''.join(content_begin.split()) + line_strip_len = len(content_begin) - len(content_begin_strip) + + return ''.join(line_content.split()), \ + old_col - line_strip_len + + +def __get_report_hash_path_sensitive(report: Report) -> List[str]: + """ Report hash generation from the given report. + + High level overview of the hash content: + * 'file_name' from the main diag section. + * 'checker name' + * 'checker message' + * 'line content' from the source file if can be read up + * 'column numbers' from the main diag section + * 'range column numbers' from bug_path_positions. + """ + try: + event = report.bug_path_events[-1] + + from_col = event.column + until_col = event.column + + # WARNING!!! Changing the error handling type for encoding errors + # can influence the hash content! + line_content = report.file.get_line(event.line) + + if line_content == '' and \ + not os.path.isfile(report.file.original_path): + LOG.error("Failed to generate report hash. %s does not exists!", + report.file.original_path) + + hash_content = [report.file.name, + report.checker_name, + event.message, + line_content, + str(from_col), + str(until_col)] + + for p in report.bug_path_positions: + if p.range: + hash_content.append(str(p.range.start_col)) + hash_content.append(str(p.range.end_col)) + + return hash_content + except Exception as ex: + LOG.error("Hash generation failed!") + LOG.error(ex) + return [] + + +def __get_report_hash_context_free(report: Report) -> List[str]: + """ Generate report hash without bug path. + + !!! NOT Compatible with the old hash generation method + + High level overview of the hash content: + * 'file_name' from the main diag section. + * 'checker message'. + * 'line content' from the source file if can be read up. All the + whitespaces from the source content are removed. + * 'column numbers' from the main diag sections location. + """ + try: + from_col = report.column + until_col = report.column + + # WARNING!!! Changing the error handling type for encoding errors + # can influence the hash content! + line_content = report.file.get_line(report.line) + + # Remove whitespaces so the hash will be independet of the + # source code indentation. + line_content, new_col = _remove_whitespace(line_content, from_col) + + # Update the column number in sync with the + # removed whitespaces. + until_col = until_col - (from_col - new_col) + from_col = new_col + + if line_content == '' and \ + not os.path.isfile(report.file.original_path): + LOG.error("Failed to include soruce line in the report hash.") + LOG.error('%s does not exists!', report.file.original_path) + + return [ + report.file.name, + report.message, + line_content, + str(from_col), + str(until_col)] + except Exception as ex: + LOG.error("Hash generation failed") + LOG.error(ex) + return [] + + +def __get_report_hash_diagnostic_message(report: Report) -> List[str]: + """ Generate report hash with bug path messages. + + The hash will contain the same information as the CONTEXT_FREE hash + + 'bug step messages' from events. + """ + try: + hash_content = __get_report_hash_context_free(report) + + # Add bug step messages to the hash. + for event in report.bug_path_events: + hash_content.append(event.message) + + return hash_content + except Exception as ex: + LOG.error("Hash generation failed: %s", ex) + return [] + + +def get_report_hash(report: Report, hash_type: HashType) -> str: + """ Get report hash for the given diagnostic. """ + hash_content = None + + if hash_type == HashType.CONTEXT_FREE: + hash_content = __get_report_hash_context_free(report) + elif hash_type == HashType.PATH_SENSITIVE: + hash_content = __get_report_hash_path_sensitive(report) + elif hash_type == HashType.DIAGNOSTIC_MESSAGE: + hash_content = __get_report_hash_diagnostic_message(report) + else: + raise Exception("Invalid report hash type: " + str(hash_type)) + + return __str_to_hash('|||'.join(hash_content)) + + +def get_report_path_hash(report: Report) -> str: + """ Returns path hash for the given report. + + This can be used to filter deduplications of multiple reports. + """ + report_path_hash = '' + for event in report.bug_path_events: + line = str(event.line) + col = str(event.column) + + report_path_hash += f"{line}|{col}|{event.message}{event.file.name}" + + report_path_hash += report.checker_name + + if not report_path_hash: + LOG.error('Failed to generate report path hash: %s', report) + + LOG.debug(report_path_hash) + return __str_to_hash(report_path_hash) diff --git a/tools/report-converter/codechecker_report_converter/smatch/__init__.py b/tools/report-converter/codechecker_report_converter/report/output/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/smatch/__init__.py rename to tools/report-converter/codechecker_report_converter/report/output/__init__.py diff --git a/codechecker_common/output/baseline.py b/tools/report-converter/codechecker_report_converter/report/output/baseline.py similarity index 88% rename from codechecker_common/output/baseline.py rename to tools/report-converter/codechecker_report_converter/report/output/baseline.py index 516b13b51d..874fbc9ca2 100644 --- a/codechecker_common/output/baseline.py +++ b/tools/report-converter/codechecker_report_converter/report/output/baseline.py @@ -7,17 +7,16 @@ # ------------------------------------------------------------------------- """ CodeChecker baseline output helpers. """ -from io import TextIOWrapper -from typing import Iterable, List, Set +import logging +from typing import Iterable, List, Set, TextIO -from codechecker_common import logger -from codechecker_common.report import Report +from codechecker_report_converter.report import Report -LOG = logger.get_logger('system') +LOG = logging.getLogger('report-converter') -def __get_report_hashes(f: TextIOWrapper) -> List[str]: +def __get_report_hashes(f: TextIO) -> List[str]: """ Get report hashes from the given file. """ return [h.strip() for h in f.readlines() if h] @@ -44,7 +43,8 @@ def convert(reports: Iterable[Report]) -> List[str]: Returns a list of sorted unique report hashes. """ - return sorted(set(r.report_hash for r in reports)) + return sorted(set( + r.report_hash for r in reports if r.report_hash is not None)) def write(file_path: str, report_hashes: Iterable[str]): diff --git a/codechecker_common/output/codeclimate.py b/tools/report-converter/codechecker_report_converter/report/output/codeclimate.py similarity index 63% rename from codechecker_common/output/codeclimate.py rename to tools/report-converter/codechecker_report_converter/report/output/codeclimate.py index 734066a8fb..11fa702afd 100644 --- a/codechecker_common/output/codeclimate.py +++ b/tools/report-converter/codechecker_report_converter/report/output/codeclimate.py @@ -9,11 +9,10 @@ from typing import Dict, List -from codechecker_common.report import Report -from codechecker_common.checker_labels import CheckerLabels +from codechecker_report_converter.report import Report -def convert(reports: List[Report], checker_labels: CheckerLabels) -> Dict: +def convert(reports: List[Report]) -> List[Dict]: """Convert the given reports to codeclimate format. This function will convert the given report to Code Climate format. @@ -22,8 +21,10 @@ def convert(reports: List[Report], checker_labels: CheckerLabels) -> Dict: returns a list of reports converted to codeclimate format """ codeclimate_reports = [] + for report in reports: - codeclimate_reports.append(__to_codeclimate(report, checker_labels)) + codeclimate_reports.append(__to_codeclimate(report)) + return codeclimate_reports @@ -37,21 +38,22 @@ def convert(reports: List[Report], checker_labels: CheckerLabels) -> Dict: } -def __to_codeclimate(report: Report, checker_labels: CheckerLabels) -> Dict: +def __to_codeclimate(report: Report) -> Dict: """Convert a Report to Code Climate format.""" + severity = __codeclimate_severity_map.get(report.severity) \ + if report.severity else 'info' + return { "type": "issue", - "check_name": report.check_name, - "description": report.description, + "check_name": report.checker_name, + "description": report.message, "categories": ["Bug Risk"], "fingerprint": report.report_hash, - "severity": __codeclimate_severity_map.get( - checker_labels.label_of_checker(report.check_name, 'severity'), - 'info'), + "severity": severity, "location": { - "path": report.file_path, + "path": report.file.path, "lines": { - "begin": report.main['location']['line'] + "begin": report.line } } } diff --git a/codechecker_common/output/gerrit.py b/tools/report-converter/codechecker_report_converter/report/output/gerrit.py similarity index 82% rename from codechecker_common/output/gerrit.py rename to tools/report-converter/codechecker_report_converter/report/output/gerrit.py index 682ebc3799..18dc9d49a3 100644 --- a/codechecker_common/output/gerrit.py +++ b/tools/report-converter/codechecker_report_converter/report/output/gerrit.py @@ -7,19 +7,20 @@ # ------------------------------------------------------------------------- """Helper and converter functions for the gerrit review json format.""" -from typing import Dict, List, Union import json +import logging import os import re -from codechecker_common import logger -from codechecker_common.checker_labels import CheckerLabels -from codechecker_common.report import Report +from typing import Dict, List, Union + +from codechecker_report_converter.report import Report -LOG = logger.get_logger('system') +LOG = logging.getLogger('report-converter') -def convert(reports: List[Report], checker_labels: CheckerLabels) -> Dict: + +def convert(reports: List[Report]) -> Dict: """Convert reports to gerrit review format. Process the required environment variables and convert the reports @@ -31,8 +32,7 @@ def convert(reports: List[Report], checker_labels: CheckerLabels) -> Dict: changed_files = __get_changed_files(changed_file_path) return __convert_reports(reports, repo_dir, report_url, - changed_files, changed_file_path, - checker_labels) + changed_files, changed_file_path) def mandatory_env_var_is_set(): @@ -62,8 +62,7 @@ def __convert_reports(reports: List[Report], repo_dir: Union[str, None], report_url: Union[str, None], changed_files: List[str], - changed_file_path: Union[str, None], - checker_labels: CheckerLabels) -> Dict: + changed_file_path: Union[str, None]) -> Dict: """Convert the given reports to gerrit json format. This function will convert the given report to Gerrit json format. @@ -76,19 +75,12 @@ def __convert_reports(reports: List[Report], changed_files - list of the changed files checker_labels """ - review_comments = {} + review_comments: Dict[str, List[Dict]] = {} report_count = 0 report_messages_in_unchanged_files = [] for report in reports: - bug_line = report.line - bug_col = report.col - - check_name = report.check_name - severity = checker_labels.severity(check_name) - file_name = report.file_path - check_msg = report.description - source_line = report.source_line + file_name = report.file.path report_count += 1 @@ -97,11 +89,11 @@ def __convert_reports(reports: List[Report], if repo_dir and os.path.dirname(file_name) != "" else file_name checked_file = rel_file_path \ - + ':' + str(bug_line) + ":" + str(bug_col) + + ':' + str(report.line) + ":" + str(report.column) review_comment_msg = \ - f"[{severity}] {checked_file}: {check_msg} [{check_name}]\n" \ - f"{source_line}" + f"[{report.severity}] {checked_file}: {report.message} " \ + f"[{report.checker_name}]\n{report.source_line}" # Skip the report if it is not in the changed files. if changed_file_path and not \ @@ -114,10 +106,10 @@ def __convert_reports(reports: List[Report], review_comments[rel_file_path].append({ "range": { - "start_line": bug_line, - "start_character": bug_col, - "end_line": bug_line, - "end_character": bug_col}, + "start_line": report.line, + "start_character": report.column, + "end_line": report.line, + "end_character": report.column}, "message": review_comment_msg}) message = f"CodeChecker found {report_count} issue(s) in the code." @@ -149,7 +141,7 @@ def __get_changed_files(changed_file_path: Union[None, str]) -> List[str]: The file can contain some garbage values at start, so we use regex to find a json object. """ - changed_files = [] + changed_files: List[str] = [] if not changed_file_path or not os.path.exists(changed_file_path): return changed_files diff --git a/tools/report-converter/codechecker_report_converter/sparse/__init__.py b/tools/report-converter/codechecker_report_converter/report/output/html/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sparse/__init__.py rename to tools/report-converter/codechecker_report_converter/report/output/html/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/report/output/html/cli.py b/tools/report-converter/codechecker_report_converter/report/output/html/cli.py new file mode 100644 index 0000000000..c53d3b33fe --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/output/html/cli.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import argparse +import os +import sys + +# If we run this script in an environment where 'codechecker_report_converter' +# module is not available we should add the grandparent directory of this file +# to the system path. +if __name__ == '__main__': + sys.path.insert(0, os.path.abspath( + os.path.join(__file__, *[os.path.pardir] * 4))) + +from codechecker_report_converter.report.output.html.html import HtmlBuilder, \ + parse + + +def __add_arguments_to_parser(parser: argparse.ArgumentParser): + parser.add_argument('input', + type=str, + nargs='+', + metavar='file/folder', + help="Analyzer result file(s) or folders containing " + "analysis results which should be parsed.") + + parser.add_argument('-o', '--output', + dest="output_dir", + required=True, + help="Generate HTML output files in the given folder.") + + curr_file_dir = os.path.dirname(os.path.realpath(__file__)) + parser.add_argument('-l', '--layout', + dest="layout_dir", + required=False, + default=os.path.join(curr_file_dir, 'static'), + help="Directory which contains dependency HTML, CSS " + "and JavaScript files.") + + +def main(): + """ Report to HTML main command line. """ + parser = argparse.ArgumentParser( + prog="plist-to-html", + description="Parse and create HTML files from one or more analyzer " + "result files.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + __add_arguments_to_parser(parser) + + args = parser.parse_args() + + if isinstance(args.input, str): + args.input = [args.input] + + # Source files which modification time changed since the last analysis. + changed_source_files = set() + + html_builder = HtmlBuilder(args.layout_dir) + for input_path in args.input: + changed_files = parse(input_path, args.output_dir, args.layout_dir, + html_builder) + changed_source_files.update(changed_files) + + html_builder.create_index_html(args.output_dir) + html_builder.create_statistics_html(args.output_dir) + + print('\nTo view statistics in a browser run:\n> firefox {0}'.format( + os.path.join(args.output_dir, 'statistics.html'))) + + print('\nTo view the results in a browser run:\n> firefox {0}'.format( + os.path.join(args.output_dir, 'index.html'))) + + if changed_source_files: + changed_files = '\n'.join([' - ' + f for f in changed_source_files]) + print("\nThe following source file contents changed since the " + "latest analysis:\n{0}\nPlease analyze your project again to " + "update the reports!".format(changed_files)) + + +if __name__ == "__main__": + main() diff --git a/tools/report-converter/codechecker_report_converter/report/output/html/html.py b/tools/report-converter/codechecker_report_converter/report/output/html/html.py new file mode 100644 index 0000000000..a68bd4c068 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/output/html/html.py @@ -0,0 +1,512 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import io +import json +import logging +import os +import shutil +import sys + +from collections import defaultdict +from string import Template +from typing import Callable, Dict, List, Optional, Set, Tuple + +from codechecker_report_converter.report import BugPathEvent, \ + InvalidFileContentMsg, File, MacroExpansion, Report, report_file, \ + reports as reports_helper +from codechecker_report_converter.report.statistics import Statistics +from codechecker_report_converter.report.checker_labels import CheckerLabels + +if sys.version_info >= (3, 8): + from typing import TypedDict # pylint: disable=no-name-in-module +else: + from mypy_extensions import TypedDict + + +LOG = logging.getLogger('report-converter') + + +SkipReportHandler = Callable[ + [str, str, int, str, dict, Dict[int, str]], + Tuple[bool, list] +] + + +class HTMLBugPathEvent(TypedDict): + message: str + fileId: str + line: int + column: int + + +HTMLBugPathEvents = List[HTMLBugPathEvent] + + +class HTMLMacroExpansion(HTMLBugPathEvent): + name: str + + +HTMLMacroExpansions = List[HTMLMacroExpansion] + + +class HTMLReport(TypedDict): + fileId: str + reportHash: Optional[str] + checkerName: str + line: int + column: int + message: str + events: HTMLBugPathEvents + macros: HTMLMacroExpansions + notes: HTMLBugPathEvents + reviewStatus: Optional[str] + severity: Optional[str] + + +HTMLReports = List[HTMLReport] + + +class FileSource(TypedDict): + filePath: str + content: str + + +FileSources = Dict[str, FileSource] + + +class HtmlReportLink(TypedDict): + report: HTMLReport + link: str + + +def get_file_content(file_path: str) -> str: + """ Return file content of the given file. """ + with open(file_path, 'r', encoding='utf-8', errors='replace') as f: + return f.read() + + +class HtmlBuilder: + """ + Helper class to create html file from a report data. + """ + def __init__( + self, + layout_dir: str, + checker_labels: Optional[CheckerLabels] = None + ): + self._checker_labels = checker_labels + self.layout_dir = layout_dir + self.generated_html_reports: Dict[str, HTMLReports] = {} + self.html_reports: HTMLReports = [] + self.files: FileSources = {} + + css_dir = os.path.join(self.layout_dir, 'css') + js_dir = os.path.join(self.layout_dir, 'js') + codemirror_dir = os.path.join( + self.layout_dir, 'vendor', 'codemirror') + + # Mapping layout tags to files. + self._layout_tag_files = { + 'style_css': os.path.join(css_dir, 'style.css'), + 'buglist_css': os.path.join(css_dir, 'buglist.css'), + 'bugview_css': os.path.join(css_dir, 'bugview.css'), + 'statistics_css': os.path.join(css_dir, 'statistics.css'), + 'icon_css': os.path.join(css_dir, 'icon.css'), + 'table_css': os.path.join(css_dir, 'table.css'), + 'codemirror_license': os.path.join(codemirror_dir, + 'codemirror.LICENSE'), + 'codemirror_css': os.path.join(codemirror_dir, + 'codemirror.min.css'), + 'codemirror_js': os.path.join(codemirror_dir, 'codemirror.min.js'), + 'clike_js': os.path.join(codemirror_dir, 'clike.min.js'), + 'bug_viewer': os.path.join(js_dir, 'bugviewer.js'), + 'bug_list': os.path.join(js_dir, 'buglist.js'), + 'browser_support': os.path.join(js_dir, 'browsersupport.js') + } + + # Get the HTML layout file content. + self._layout = Template(get_file_content( + os.path.join(self.layout_dir, 'layout.html'))) + + self._index = Template(get_file_content( + os.path.join(self.layout_dir, 'index.html'))) + + self._statistics = Template(get_file_content( + os.path.join(self.layout_dir, 'statistics.html'))) + + # Get the content of the HTML layout dependencies. + self._tag_contents = {} + for tag in self._layout_tag_files: + self._tag_contents[tag] = get_file_content( + self._layout_tag_files[tag]) + + def get_severity(self, checker_name: str) -> str: + """ Returns severity level for the given checker name. """ + return self._checker_labels.severity(checker_name) \ + if self._checker_labels else 'UNSPECIFIED' + + def _add_source_file(self, file: File): + """ + Updates file source data by file id if the given file hasn't been + processed. + """ + if file.id in self.files: + return + + try: + file_content = file.content + except Exception: + file_content = InvalidFileContentMsg + + self.files[file.id] = { + 'filePath': file.path, 'content': file_content} + + def _add_html_reports( + self, + reports: List[Report] + ): + def to_bug_path_events( + events: List[BugPathEvent] + ) -> HTMLBugPathEvents: + """ Converts the given events to html compatible format. """ + html_events: HTMLBugPathEvents = [] + for event in events: + self._add_source_file(event.file) + html_events.append({ + 'message': event.message, + 'fileId': event.file.id, + 'line': event.line, + 'column': event.column, + }) + return html_events + + def to_macro_expansions( + macro_expansions: List[MacroExpansion] + ) -> HTMLMacroExpansions: + """ Converts the given events to html compatible format. """ + html_macro_expansions: HTMLMacroExpansions = [] + for macro_expansion in macro_expansions: + self._add_source_file(macro_expansion.file) + html_macro_expansions.append({ + 'message': macro_expansion.message, + 'name': macro_expansion.name, + 'fileId': macro_expansion.file.id, + 'line': macro_expansion.line, + 'column': macro_expansion.column, + }) + return html_macro_expansions + + for report in reports: + self._add_source_file(report.file) + + self.html_reports.append({ + 'fileId': report.file.id, + 'reportHash': report.report_hash, + 'checkerName': report.checker_name, + 'line': report.line, + 'column': report.column, + 'message': report.message, + 'events': to_bug_path_events(report.bug_path_events), + 'macros': to_macro_expansions(report.macro_expansions), + 'notes': to_bug_path_events(report.notes), + 'reviewStatus': report.review_status, + 'severity': self.get_severity(report.checker_name) + }) + + def create( + self, + output_file_path: str, + reports: List[Report] + ) -> Tuple[Optional[HTMLReports], Set[str]]: + """ + Create html file from the given analyzer result file to the output + path. + """ + changed_files = reports_helper.get_changed_files(reports) + + if changed_files: + return None, changed_files + + self._add_html_reports(reports) + + self.generated_html_reports[output_file_path] = self.html_reports + + substitute_data = self._tag_contents + substitute_data.update({ + 'report_data': json.dumps({ + 'files': self.files, + 'reports': self.html_reports + }) + }) + + content = self._layout.substitute(substitute_data) + + with open(output_file_path, 'w+', + encoding='utf-8', errors='replace') as f: + f.write(content) + + return self.html_reports, changed_files + + def create_index_html(self, output_dir: str): + """ + Creates an index.html file which lists all available bugs which was + found in the processed plist files. This also creates a link for each + bug to the created html file where the bug can be found. + """ + # Sort reports based on file path levels. + html_report_links: List[HtmlReportLink] = [] + for html_file, reports in self.generated_html_reports.items(): + for report in reports: + html_report_links.append({'link': html_file, 'report': report}) + + html_report_links.sort( + key=lambda data: self.files[data['report']['fileId']]['filePath']) + + with io.StringIO() as table_reports: + # Create table header. + table_reports.write(''' + +   + File + Severity + Checker name + Message + Bug path length + Review status + ''') + + # Create table lines. + for i, data in enumerate(html_report_links): + html_file = os.path.basename(data['link']) + report = data['report'] + + severity = report['severity'].lower() \ + if 'severity' in report \ + and report['severity'] is not None \ + else '' + + review_status = report['reviewStatus'] \ + if 'reviewStatus' in report and \ + report['reviewStatus'] is not None \ + else '' + + events = report['events'] + if events: + line = events[-1]['line'] + message = events[-1]['message'] + bug_path_length = len(events) + else: + line = report['line'] + message = report['message'] + bug_path_length = 1 + + rs = review_status.lower().replace(' ', '-') + file_path = self.files[report['fileId']]['filePath'] + + table_reports.write(f''' + + {i + 1} + + + {file_path} @ Line {line} + + + + + + {report['checkerName']} + {message} + {bug_path_length} + + {review_status} + + ''') + + substitute_data = self._tag_contents + substitute_data.update({'table_reports': table_reports.getvalue()}) + + content = self._index.substitute(substitute_data) + output_path = os.path.join(output_dir, 'index.html') + with open(output_path, 'w+', encoding='utf-8', + errors='replace') as html_output: + html_output.write(content) + + def create_statistics_html(self, output_dir: str): + """ + Creates an statistics.html file which contains statistics information + from the HTML generation process. + """ + def severity_order(severity: str) -> int: + """ + This function determines in which order severities should be + printed to the output. This function can be given via "key" + attribute to sort() function. + """ + severities = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'STYLE', + 'UNSPECIFIED'] + return severities.index(severity) + + num_of_analyzer_result_files = len(self.generated_html_reports) + + num_of_reports = 0 + for html_file in self.generated_html_reports: + num_of_reports += len(self.generated_html_reports[html_file]) + + checker_statistics: Dict[str, int] = defaultdict(int) + for html_file in self.generated_html_reports: + for report in self.generated_html_reports[html_file]: + checker = report['checkerName'] + checker_statistics[checker] += 1 + + checker_rows: List[List[str]] = [] + severity_statistics: Dict[str, int] = defaultdict(int) + + with io.StringIO() as string: + for checker_name in sorted(checker_statistics): + severity = self.get_severity(checker_name) + string.write(''' + + {0} + + + + {2} + + '''.format(checker_name, severity.lower(), + checker_statistics[checker_name])) + checker_rows.append([checker_name, severity, + str(checker_statistics[checker_name])]) + severity_statistics[severity] += \ + checker_statistics[checker_name] + checker_statistics_content = string.getvalue() + + severity_rows: List[List[str]] = [] + + with io.StringIO() as string: + for severity in sorted(severity_statistics, key=severity_order): + num = severity_statistics[severity] + string.write(''' + + + + + {1} + + '''.format(severity.lower(), num)) + severity_rows.append([severity, str(num)]) + severity_statistics_content = string.getvalue() + + substitute_data = self._tag_contents + substitute_data.update({ + 'num_of_analyzer_result_files': str(num_of_analyzer_result_files), + 'number_of_reports': str(num_of_reports), + 'checker_statistics': checker_statistics_content, + 'severity_statistics': severity_statistics_content}) + + content = self._statistics.substitute(substitute_data) + + output_path = os.path.join(output_dir, 'statistics.html') + with open(output_path, 'w+', encoding='utf-8', + errors='ignore') as html_output: + html_output.write(content) + + def finish(self, output_dir_path: str, statistics: Statistics): + """ Creates common html files and print summary messages. """ + self.create_index_html(output_dir_path) + self.create_statistics_html(output_dir_path) + statistics.write() + + print(f"\nTo view statistics in a browser run:\n> firefox " + f"{os.path.join(output_dir_path, 'statistics.html')}") + + print(f"\nTo view the results in a browser run:\n> firefox " + f"{os.path.join(output_dir_path, 'index.html')}") + + +def convert( + file_path: str, + reports: List[Report], + output_dir_path: str, + html_builder: HtmlBuilder +) -> Set[str]: + """ + Prints the results in the given file to HTML file. + + Returns the skipped analyzer result files because of source + file content change. + """ + html_filename = f"{os.path.basename(file_path)}.html" + html_output_path = os.path.join(output_dir_path, html_filename) + html_reports, changed_files = html_builder.create( + html_output_path, reports) + + if changed_files: + return changed_files + + if not html_reports: + LOG.info(f'No report data in {file_path} file.') + return changed_files + + LOG.info(f"Html file was generated: {html_output_path}") + return changed_files + + +def parse( + input_path: str, + output_path: str, + layout_dir: str, + html_builder: Optional[HtmlBuilder] = None +) -> Set[str]: + """ + Parses analyzer result files from the given input directory to the output + directory. + + Return a set of changed files. + """ + files = [] + input_path = os.path.abspath(input_path) + output_dir = os.path.abspath(output_path) + + if os.path.exists(output_path): + LOG.info("Previous analysis results in '%s' have been removed, " + "overwriting with current results.", output_dir) + shutil.rmtree(output_path) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if os.path.isfile(input_path): + files.append(input_path) + elif os.path.isdir(input_path): + _, _, file_names = next(os.walk(input_path), ([], [], [])) + files = [os.path.join(input_path, file_name) for file_name + in file_names] + + # Source files which modification time changed since the last analysis. + changed_source_files: Set[str] = set() + + if not html_builder: + html_builder = HtmlBuilder(layout_dir) + + for file_path in files: + if not report_file.is_supported(file_path): + LOG.info("\nSkipping input file %s as it is not supported " + "analyzer result file.", file_path) + continue + + LOG.info(f"\nParsing input file '%s'", file_path) + + reports = report_file.get_reports(file_path) + changed_source = convert(file_path, reports, output_path, html_builder) + + if changed_source: + changed_source_files.update(changed_source) + + return changed_source_files diff --git a/tools/plist_to_html/plist_to_html/static/css/buglist.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/buglist.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/buglist.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/buglist.css diff --git a/tools/plist_to_html/plist_to_html/static/css/bugview.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/bugview.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/bugview.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/bugview.css diff --git a/tools/plist_to_html/plist_to_html/static/css/icon.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/icon.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/icon.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/icon.css diff --git a/tools/plist_to_html/plist_to_html/static/css/statistics.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/statistics.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/statistics.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/statistics.css diff --git a/tools/plist_to_html/plist_to_html/static/css/style.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/style.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/style.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/style.css diff --git a/tools/plist_to_html/plist_to_html/static/css/table.css b/tools/report-converter/codechecker_report_converter/report/output/html/static/css/table.css similarity index 100% rename from tools/plist_to_html/plist_to_html/static/css/table.css rename to tools/report-converter/codechecker_report_converter/report/output/html/static/css/table.css diff --git a/tools/plist_to_html/plist_to_html/static/index.html b/tools/report-converter/codechecker_report_converter/report/output/html/static/index.html similarity index 100% rename from tools/plist_to_html/plist_to_html/static/index.html rename to tools/report-converter/codechecker_report_converter/report/output/html/static/index.html diff --git a/tools/plist_to_html/plist_to_html/static/js/browsersupport.js b/tools/report-converter/codechecker_report_converter/report/output/html/static/js/browsersupport.js similarity index 100% rename from tools/plist_to_html/plist_to_html/static/js/browsersupport.js rename to tools/report-converter/codechecker_report_converter/report/output/html/static/js/browsersupport.js diff --git a/tools/plist_to_html/plist_to_html/static/js/buglist.js b/tools/report-converter/codechecker_report_converter/report/output/html/static/js/buglist.js similarity index 100% rename from tools/plist_to_html/plist_to_html/static/js/buglist.js rename to tools/report-converter/codechecker_report_converter/report/output/html/static/js/buglist.js diff --git a/tools/plist_to_html/plist_to_html/static/js/bugviewer.js b/tools/report-converter/codechecker_report_converter/report/output/html/static/js/bugviewer.js similarity index 92% rename from tools/plist_to_html/plist_to_html/static/js/bugviewer.js rename to tools/report-converter/codechecker_report_converter/report/output/html/static/js/bugviewer.js index 369b8dc299..4417fa2643 100644 --- a/tools/plist_to_html/plist_to_html/static/js/bugviewer.js +++ b/tools/report-converter/codechecker_report_converter/report/output/html/static/js/bugviewer.js @@ -111,7 +111,7 @@ var BugViewer = { var nav = document.getElementById('report-nav'); var list = document.createElement('ul'); this._reports.forEach(function (report) { - var events = report['events']; + var events = report.events; var lastBugEvent = events[events.length - 1]; var item = document.createElement('li'); @@ -138,7 +138,7 @@ var BugViewer = { setReport : function (report) { this._currentReport = report; - var events = report['events']; + var events = report.events; var lastBugEvent = events[events.length - 1]; this.setCurrentBugEvent(lastBugEvent, events.length - 1); this.setCheckerName(report.checkerName); @@ -149,14 +149,14 @@ var BugViewer = { setCurrentBugEvent : function (event, idx) { this._currentBugEvent = event; - this.setSourceFileData(this._files[event.location.file]); + this.setSourceFileData(this._files[event.fileId]); this.drawBugPath(); - this.jumpTo(event.location.line, 0); - this.highlightBugEvent(event, idx); + this.jumpTo(event.line, 0); + this.highlightBugEvent(idx); }, - highlightBugEvent : function (event, idx) { + highlightBugEvent : function (idx) { this._lineWidgets.forEach(function (widget) { var lineIdx = widget.node.getAttribute('idx'); if (parseInt(lineIdx) === idx) { @@ -188,7 +188,7 @@ var BugViewer = { } this._sourceFileData = file; - this._filepath.innerHTML = file.path; + this._filepath.innerHTML = file.filePath; this._codeMirror.doc.setValue(file.content); this._refresh(); }, @@ -214,7 +214,7 @@ var BugViewer = { var name = 'macro expansion' + (event.name ? ': ' + event.name : ''); return '' + name + '' - + this.escapeHTML(event.expansion).replace(/(?:\r\n|\r|\n)/g, '
'); + + this.escapeHTML(event.message).replace(/(?:\r\n|\r|\n)/g, '
'); } else if (kind === 'note') { return 'note' + this.escapeHTML(event.message).replace(/(?:\r\n|\r|\n)/g, '
'); @@ -229,12 +229,11 @@ var BugViewer = { } events.forEach(function (event) { - if (event.location.file !== that._currentBugEvent.location.file) { + if (event.fileId !== that._currentBugEvent.fileId) { return; } - var left = - that._codeMirror.defaultCharWidth() * event.location.col + 'px'; + var left = that._codeMirror.defaultCharWidth() * event.column + 'px'; var element = document.createElement('div'); element.setAttribute('style', 'margin-left: ' + left); @@ -245,7 +244,7 @@ var BugViewer = { element.appendChild(msg); that._lineWidgets.push(that._codeMirror.addLineWidget( - event.location.line - 1, element)); + event.line - 1, element)); }); }, @@ -260,11 +259,10 @@ var BugViewer = { // Processing bug path events. var currentEvents = this._currentReport.events; currentEvents.forEach(function (event, step) { - if (event.location.file !== that._currentBugEvent.location.file) + if (event.fileId !== that._currentBugEvent.fileId) return; - var left = - that._codeMirror.defaultCharWidth() * event.location.col + 'px'; + var left = that._codeMirror.defaultCharWidth() * event.column + 'px'; var type = step === currentEvents.length - 1 ? 'error' : 'info'; var element = document.createElement('div'); @@ -309,7 +307,7 @@ var BugViewer = { that._lineWidgets.push(that._codeMirror.addLineWidget( - event.location.line - 1, element)); + event.line - 1, element)); }); }, diff --git a/tools/plist_to_html/plist_to_html/static/layout.html b/tools/report-converter/codechecker_report_converter/report/output/html/static/layout.html similarity index 100% rename from tools/plist_to_html/plist_to_html/static/layout.html rename to tools/report-converter/codechecker_report_converter/report/output/html/static/layout.html diff --git a/tools/plist_to_html/plist_to_html/static/statistics.html b/tools/report-converter/codechecker_report_converter/report/output/html/static/statistics.html similarity index 91% rename from tools/plist_to_html/plist_to_html/static/statistics.html rename to tools/report-converter/codechecker_report_converter/report/output/html/static/statistics.html index eafe93c705..51eb0be706 100644 --- a/tools/plist_to_html/plist_to_html/static/statistics.html +++ b/tools/report-converter/codechecker_report_converter/report/output/html/static/statistics.html @@ -29,8 +29,8 @@

Statistics

- - + + diff --git a/tools/report-converter/codechecker_report_converter/report/output/json.py b/tools/report-converter/codechecker_report_converter/report/output/json.py new file mode 100644 index 0000000000..566fe70c60 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/output/json.py @@ -0,0 +1,23 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" JSON output helpers. """ + +from typing import Dict, List + +from codechecker_report_converter.report import Report + + +def convert(reports: List[Report]) -> Dict: + """ Convert the given reports to JSON format. """ + version = 1 + + json_reports = [] + for report in reports: + json_reports.append(report.to_json()) + + return {"version": version, "reports": json_reports} diff --git a/tools/report-converter/codechecker_report_converter/report/output/plaintext.py b/tools/report-converter/codechecker_report_converter/report/output/plaintext.py new file mode 100644 index 0000000000..3cb4177a28 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/output/plaintext.py @@ -0,0 +1,201 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" Helper and converter functions for plain text format. """ + +import logging +import math +import os +import sys + +from collections import defaultdict +from typing import Dict, List, Optional, Set + +from codechecker_report_converter.report import BugPathEvent, \ + InvalidFileContentMsg, MacroExpansion, Report + + +LOG = logging.getLogger('report-converter') + + +def __get_source_file_for_analyzer_result_file( + analyzer_result_file_path: str, + metadata: Optional[Dict] +) -> Optional[str]: + """ Get source file for the given analyzer result file. """ + if not metadata: + return None + + result_source_files = {} + if 'result_source_files' in metadata: + result_source_files = metadata['result_source_files'] + else: + for tool in metadata.get('tools', {}): + result_src_files = tool.get('result_source_files', {}) + result_source_files.update(result_src_files.items()) + + if analyzer_result_file_path in result_source_files: + return result_source_files[analyzer_result_file_path] + + return None + + +def format_source_line(event: BugPathEvent) -> str: + """ Format bug path event. """ + line = event.file.get_line(event.line) + if line == '': + return '' + + marker_line = line[0:(event.column - 1)] + marker_line = ' ' * (len(marker_line) + marker_line.count('\t')) + + line = line.replace('\t', ' ') + + return f"{line}{marker_line}^" + + +def format_report(report: Report, content_is_not_changed: bool) -> str: + """ Format main report. """ + file_path = report.bug_path_events[-1].file.path + out = f"[{report.severity}] {file_path}:{report.line}:{report.column}: " \ + f"{report.message} [{report.checker_name}]" + + if content_is_not_changed and report.source_code_comments: + out += f" [{report.review_status.capitalize()}]" + + return out + + +def format_note(note: BugPathEvent) -> str: + """ Format bug path note. """ + file_name = os.path.basename(note.file.path) + return f"{file_name}:{note.line}:{note.column}: {note.message}" + + +def format_macro_expansion(macro: MacroExpansion) -> str: + """ Format macro expansions. """ + file_name = os.path.basename(macro.file.path) + return f"{file_name}:{macro.line}:{macro.column}: Macro '{macro.name}' " \ + f"expanded to '{macro.message}'" + + +def format_event(event: BugPathEvent) -> str: + """ Format bug path event. """ + file_name = os.path.basename(event.file.path) + return f"{file_name}:{event.line}:{event.column}: {event.message}" + + +def get_index_format(lst: List) -> str: + """ Get index format. """ + return f' %{int(math.floor(math.log10(len(lst))) + 1)}d, ' + + +def print_details(report: Report, output=sys.stdout): + """ Print detail information from the given report. """ + output.write(f' Report hash: {report.report_hash}\n') + + # Print out macros. + if report.macro_expansions: + output.write(' Macro expansions:\n') + index_format = get_index_format(report.macro_expansions) + for index, macro in enumerate(report.macro_expansions): + output.write(index_format % (index + 1)) + output.write(f"{format_macro_expansion(macro)}\n") + + # Print out notes. + if report.notes: + output.write(' Notes:\n') + index_format = get_index_format(report.notes) + for index, note in enumerate(report.notes): + output.write(index_format % (index + 1)) + output.write(f"{format_note(note)}\n") + + # Print out bug path events. + output.write(' Steps:\n') + index_format = get_index_format(report.bug_path_events) + for index, event in enumerate(report.bug_path_events): + output.write(index_format % (index + 1)) + output.write(f"{format_event(event)}\n") + + +def get_file_report_map( + reports: List[Report], + input_file_path: Optional[str] = None, + metadata: Optional[Dict] = None +) -> Dict[str, List[Report]]: + """ Get file report map. """ + file_report_map = defaultdict(list) + for report in reports: + file_report_map[report.file.path].append(report) + + if input_file_path: + source_file = __get_source_file_for_analyzer_result_file( + input_file_path, metadata) + + # Add source file to the map if it doesn't exists. + if source_file: + file_report_map[source_file] + + return file_report_map + + +def convert( + source_file_report_map: Dict[str, List[Report]], + processed_file_paths: Optional[Set[str]] = None, + print_steps: bool = False, + output=sys.stdout +): + """ Convert reports to plain text format. """ + if processed_file_paths is None: + processed_file_paths = set() + + for file_path in sorted(source_file_report_map, key=str.casefold): + num_of_active_reports = 0 + reports = sorted(source_file_report_map[file_path], + key=lambda report: report.line) + for report in reports: + last_event = report.bug_path_events[-1] + + # If file content is changed, do not print the source code comments + # (if available) and instead of the source line, print a warning + # message. + content_is_not_changed = \ + last_event.file.original_path not in report.changed_files + + if content_is_not_changed: + report.dump_source_code_comment_warnings() + + output.write(f"{format_report(report, content_is_not_changed)}\n") + + if content_is_not_changed: + # Print source code comments. + for source_code_comment in report.source_code_comments: + if source_code_comment.line: + output.write(f"{source_code_comment.line.rstrip()}\n") + + output.write(f"{format_source_line(last_event)}") + else: + output.write(InvalidFileContentMsg) + + output.write("\n") + + if print_steps: + print_details(report) + + output.write("\n") + output.flush() + + num_of_active_reports += 1 + + file_name = os.path.basename(file_path) + if num_of_active_reports: + output.write(f"Found {num_of_active_reports} defect(s) in " + f"{file_name}\n\n") + elif file_path not in processed_file_paths: + output.write(f"Found no defects in {file_name}\n") + + processed_file_paths.add(file_path) diff --git a/tools/report-converter/codechecker_report_converter/sphinx/__init__.py b/tools/report-converter/codechecker_report_converter/report/parser/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/sphinx/__init__.py rename to tools/report-converter/codechecker_report_converter/report/parser/__init__.py diff --git a/tools/report-converter/codechecker_report_converter/report/parser/base.py b/tools/report-converter/codechecker_report_converter/report/parser/base.py new file mode 100644 index 0000000000..fb1bf3bdbf --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/parser/base.py @@ -0,0 +1,78 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" +Base parser class to parse analyzer result files. +""" + +import logging + +from abc import ABCMeta, abstractmethod +from typing import Any, Dict, List, Optional + +from codechecker_report_converter.report import File, Report +from codechecker_report_converter.report.checker_labels import CheckerLabels +from codechecker_report_converter.report.hash import HashType + + +LOG = logging.getLogger('report-converter') + + +class AnalyzerInfo: + """ Hold information about the analyzer. """ + def __init__(self, name: str): + self.name = name + + +class BaseParser(metaclass=ABCMeta): + """ Base class to manage analyzer result file. """ + def __init__( + self, + checker_labels: Optional[CheckerLabels] = None, + file_cache: Optional[Dict[str, File]] = None + ): + self._checker_labels = checker_labels + self._file_cache = file_cache if file_cache is not None else {} + + def get_severity(self, checker_name: str) -> Optional[str]: + """ Get severity levels for the given checker name. """ + if self._checker_labels: + return self._checker_labels.severity(checker_name) + return None + + @abstractmethod + def get_reports( + self, + analyzer_result_file_path: str + ) -> List[Report]: + """ Get reports from the given analyzer result file. """ + raise NotImplementedError("Subclasses should implement this!") + + @abstractmethod + def convert( + self, + reports: List[Report], + analyzer_info: Optional[AnalyzerInfo] = None + ): + """ Converts the given reports. """ + raise NotImplementedError("Subclasses should implement this!") + + @abstractmethod + def write(self, data: Any, output_file_path: str): + """ Creates an analyzer output file from the given data. """ + raise NotImplementedError("Subclasses should implement this!") + + @abstractmethod + def replace_report_hash( + self, + analyzer_result_file_path: str, + hash_type=HashType.CONTEXT_FREE + ): + """ + Override hash in the given file by using the given version hash. + """ + raise NotImplementedError("Subclasses should implement this!") diff --git a/tools/report-converter/codechecker_report_converter/report/parser/plist.py b/tools/report-converter/codechecker_report_converter/report/parser/plist.py new file mode 100644 index 0000000000..f81e0bff48 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/parser/plist.py @@ -0,0 +1,685 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" +Parse the plist output of an analyzer +""" + +import importlib +import logging +import os +import plistlib +import traceback +import sys + +from plistlib import _PlistParser # type: ignore +from typing import Any, BinaryIO, Dict, List, Optional, Tuple + +from xml.parsers.expat import ExpatError + +if sys.version_info >= (3, 8): + from typing import TypedDict # pylint: disable=no-name-in-module +else: + from mypy_extensions import TypedDict + +from codechecker_report_converter import __title__, __version__ +from codechecker_report_converter.report import BugPathEvent, \ + BugPathPosition, File, get_or_create_file, MacroExpansion, Range, Report +from codechecker_report_converter.report.hash import get_report_hash, HashType +from codechecker_report_converter.report.parser.base import AnalyzerInfo, \ + BaseParser +from codechecker_report_converter.util import load_json_or_empty + + +LOG = logging.getLogger('report-converter') + + +EXTENSION = 'plist' + +PlistItem = Any + + +class _LXMLPlistEventHandler: + """ + Basic lxml event handler. + """ + def start(self, tag, attrib): + pass + + def end(self, tag): + pass + + def data(self, data): + pass + + def comment(self, text): + pass + + def close(self): + return "closed!" + + +class _LXMLPlistParser(_PlistParser): + """ + Plist parser which uses the lxml library to parse XML data. + + The benefit of this library that this is faster than other libraries so it + will improve the performance of the plist parsing. + """ + def __init__(self, dict_type=dict): + # Since Python 3.9 plistlib._PlistParser.__init__ has changed: + # https://github.com/python/cpython/commit/ce81a925ef + # To be backward compatible with old interpreters we need to call this + # function based on conditions: + params = _PlistParser.__init__.__code__.co_varnames + if len(params) == 3 and "use_builtin_types" in params: + # Before 3.9 interpreter. + _PlistParser.__init__(self, True, dict_type) + else: + _PlistParser.__init__(self, dict_type) # pylint: disable=E1120 + + self.event_handler = _LXMLPlistEventHandler() + self.event_handler.start = self.handle_begin_element + self.event_handler.end = self.handle_end_element + self.event_handler.data = self.handle_data + + from lxml.etree import XMLParser # pylint: disable=no-name-in-module + self.parser = XMLParser(target=self.event_handler) + + def parse(self, fileobj): + # pylint: disable=no-name-in-module + from lxml.etree import parse, XMLSyntaxError + + try: + parse(fileobj, self.parser) + except XMLSyntaxError as ex: + LOG.error("Invalid plist file '%s': %s", fileobj.name, ex) + return + + return self.root + + +class DiagLoc(TypedDict): + line: int + col: int + + +class DiagEdge(TypedDict): + start: Tuple[DiagLoc, DiagLoc] + end: Tuple[DiagLoc, DiagLoc] + + +class DiagPath(TypedDict): + kind: str + message: str + location: DiagLoc + edges: List[DiagEdge] + + +def is_same_control_item( + curr: DiagPath, + prev: DiagPath +) -> bool: + """ True if the given diag paths are same. """ + curr_start_range_begin = curr['edges'][0]['start'][0] + curr_start_range_end = curr['edges'][0]['start'][1] + + prev_end_range_begin = prev['edges'][0]['end'][0] + prev_end_range_end = prev['edges'][0]['end'][1] + + return curr_start_range_begin == prev_end_range_begin and \ + curr_start_range_end == prev_end_range_end + + +def parse(fp: BinaryIO): + """ + Read a .plist file. Return the unpacked root object (which usually is a + dictionary). + + Use 'lxml' library to read the given plist file if it is available, + otherwise use 'plistlib' library. + """ + try: + importlib.import_module('lxml') + parser = _LXMLPlistParser() + return parser.parse(fp) + except (ExpatError, TypeError, AttributeError) as err: + LOG.warning('Invalid plist file') + LOG.warning(err) + return + except ImportError: + LOG.debug("lxml library is not available. Use plistlib to parse plist " + "files.") + + try: + return plistlib.load(fp) + except (ExpatError, TypeError, AttributeError, ValueError, + plistlib.InvalidFileException) as err: + LOG.warning('Invalid plist file') + LOG.warning(err) + return + + +def get_file_index_map( + plist: Any, + analyzer_result_dir_path: str, + file_cache: Dict[str, File] +) -> Dict[int, File]: + """ Get file index map from the given plist object. """ + file_index_map: Dict[int, File] = {} + + for i, orig_file_path in enumerate(plist.get('files', [])): + file_path = os.path.normpath(os.path.join( + analyzer_result_dir_path, orig_file_path)) + file_index_map[i] = get_or_create_file(file_path, file_cache) + + return file_index_map + + +class Parser(BaseParser): + def get_reports( + self, + analyzer_result_file_path: str + ) -> List[Report]: + """ Get reports from the given analyzer result file. """ + reports: List[Report] = [] + + try: + with open(analyzer_result_file_path, 'rb') as fp: + plist = parse(fp) + + if not plist: + return reports + + metadata = plist.get('metadata') + analyzer_result_dir_path = os.path.dirname( + analyzer_result_file_path) + files = get_file_index_map( + plist, analyzer_result_dir_path, self._file_cache) + + for diag in plist.get('diagnostics', []): + report = self.__create_report( + analyzer_result_file_path, diag, files, metadata) + + if report.report_hash is None: + report.report_hash = get_report_hash( + report, HashType.PATH_SENSITIVE) + + reports.append(report) + except KeyError as ex: + LOG.warning("Failed to get file path id! Found files: %s. " + "KeyError: %s", files, ex) + except IndexError as iex: + LOG.warning("Indexing error during processing plist file %s", + analyzer_result_file_path) + LOG.warning(type(iex)) + LOG.warning(repr(iex)) + _, _, exc_traceback = sys.exc_info() + traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) + except Exception as ex: + LOG.warning("Error during processing reports from the plist " + "file: %s", analyzer_result_file_path) + traceback.print_exc() + LOG.warning(type(ex)) + LOG.warning(ex) + finally: + return reports + + def __create_report( + self, + analyzer_result_file_path: str, + diag: Dict, + files: Dict[int, File], + metadata: Dict[str, Any] + ) -> Report: + location = diag.get('location', {}) + checker_name = diag.get('check_name', "unknown") + analyzer_name = self.__get_analyzer_name(checker_name, metadata) + severity = self.get_severity(checker_name) + + return Report( + analyzer_result_file_path=analyzer_result_file_path, + file=files[location['file']], + line=location.get('line', -1), + column=location.get('col', -1), + message=diag.get('description', ''), + checker_name=checker_name, + severity=severity, + report_hash=diag.get('issue_hash_content_of_line_in_context'), + analyzer_name=analyzer_name, + category=diag.get('category'), + source_line=None, + bug_path_events=self.__get_bug_path_events(diag, files), + bug_path_positions=self.__get_bug_path_positions(diag, files), + notes=self.__get_notes(diag, files), + macro_expansions=self.__get_macro_expansions(diag, files)) + + def __get_analyzer_name( + self, + checker_name: str, + metadata: Dict[str, Any] + ) -> Optional[str]: + """ Get analyzer name for the given checker name. """ + if metadata: + name = metadata.get("analyzer", {}).get("name") + if name: + return name + + if checker_name.startswith('clang-diagnostic-'): + return 'clang-tidy' + + return None + + def __get_bug_event_locations(self, item: PlistItem): + """ Get bug path position for the given plist item. """ + location = item['location'] + ranges = item.get("ranges") + + # Range can provide more precise location information. + # Use that if available. + if ranges: + return location, ranges[0][0], ranges[0][1] + + return location, location, location + + def __get_bug_path_events( + self, + diag, + files: Dict[int, File] + ) -> List[BugPathEvent]: + """ Get bug path events. """ + events = [] + + for item in diag.get('path', []): + if item.get('kind') != 'event': + continue + + location, start_loc, end_loc = self.__get_bug_event_locations(item) + events.append(BugPathEvent( + message=item['message'], + file=files[location['file']], + line=location['line'], + column=location['col'], + range=Range( + start_loc['line'], start_loc['col'], + end_loc['line'], end_loc['col']))) + + return events + + def __get_bug_path_positions( + self, + diag, + files: Dict[int, File] + ) -> List[BugPathPosition]: + """ Get bug path positions. + + In plist file the source and target of the arrows are provided as + starting and ending ranges of the arrow. The path A->B->C is given as + A->B and B->C, thus range B is provided twice if multiple control event + kinds are followed each other. So in the loop we will not store the + start point if the previous path event was a control event. + """ + bug_path_positions = [] + + prev_control_item = None + for item in diag.get('path', []): + if item.get('kind') != 'control': + continue + + try: + edges = item['edges'][0] + + edge = None + if prev_control_item: + if not is_same_control_item(item, prev_control_item): + edge = edges['start'] + else: + edge = edges['start'] + + if edge: + bug_path_positions.append(BugPathPosition( + file=files[edge[1]['file']], + range=Range( + edge[0]['line'], edge[0]['col'], + edge[1]['line'], edge[1]['col']))) + + bug_path_positions.append(BugPathPosition( + file=files[edges['end'][1]['file']], + range=Range( + edges['end'][0]['line'], edges['end'][0]['col'], + edges['end'][1]['line'], edges['end'][1]['col']))) + + prev_control_item = item + except IndexError: + # Edges might be empty nothing can be stored. + continue + + return bug_path_positions + + def __get_notes( + self, + diag, + files: Dict[int, File] + ) -> List[BugPathEvent]: + """ Get notes. """ + notes = [] + + for note in diag.get('notes', []): + if not note['message']: + continue + + location, start_loc, end_loc = self.__get_bug_event_locations(note) + notes.append(BugPathEvent( + message=note['message'], + file=files[location['file']], + line=location['line'], + column=location['col'], + range=Range( + start_loc['line'], start_loc['col'], + end_loc['line'], end_loc['col']))) + + return notes + + def __get_macro_expansions( + self, + diag, + files: Dict[int, File] + ) -> List[MacroExpansion]: + """ Get macro expansion. """ + macro_expansions = [] + + for macro in diag.get('macro_expansions', []): + if not macro['expansion']: + continue + + location, start_loc, end_loc = self.__get_bug_event_locations( + macro) + macro_expansions.append(MacroExpansion( + message=macro['expansion'], + name=macro['name'], + file=files[location['file']], + line=location['line'], + column=location['col'], + range=Range( + start_loc['line'], start_loc['col'], + end_loc['line'], end_loc['col']))) + + return macro_expansions + + def __get_tool_info(self) -> Tuple[str, str]: + """ Get tool info. + + If this was called through CodeChecker, this function will return + CodeChecker information, otherwise this tool (report-converter) + information. + """ + data_files_dir_path = os.environ.get('CC_DATA_FILES_DIR') + if data_files_dir_path: + analyzer_version_file_path = os.path.join( + data_files_dir_path, 'config', 'analyzer_version.json') + if os.path.exists(analyzer_version_file_path): + data = load_json_or_empty(analyzer_version_file_path, {}) + version = data.get('version') + if version: + return 'CodeChecker', f"{version['major']}." \ + f"{version['minor']}.{version['revision']}" + + return __title__, __version__ + + def convert( + self, + reports: List[Report], + analyzer_info: Optional[AnalyzerInfo] = None + ): + """ Converts the given reports. """ + tool_name, tool_version = self.__get_tool_info() + + data: Dict[str, Any] = { + 'files': [], + 'diagnostics': [], + 'metadata': { + 'generated_by': { + 'name': tool_name, + 'version': tool_version + } + } + } + + if analyzer_info: + data['metadata']['analyzer'] = {'name': analyzer_info.name} + + files = set() + for report in reports: + files.update(report.files) + + file_index_map: Dict[str, int] = {} + for idx, file_path in enumerate(sorted(files)): + data['files'].append(file_path) + file_index_map[file_path] = idx + + for report in reports: + diagnostic = { + 'location': self._create_location( + report.line, report.column, + file_index_map[report.file.original_path]), + 'issue_hash_content_of_line_in_context': report.report_hash, + 'check_name': report.checker_name, + 'description': report.message, + 'category': report.category or 'unknown' + } + + if report.analyzer_name: + diagnostic['type'] = report.analyzer_name + + control_edges = [] + if report.bug_path_positions: + for i in range(len(report.bug_path_positions) - 1): + start = report.bug_path_positions[i] + end = report.bug_path_positions[i + 1] + if start.range and end.range: + control_edges.append(self._create_control_edge( + start.range, start.file, + end.range, end.file, + file_index_map)) + elif len(report.bug_path_events) > 1: + # Create bug path positions from bug path events. + for i in range(len(report.bug_path_events) - 1): + start = report.bug_path_events[i] + start_range = self._get_bug_path_event_range(start) + + end = report.bug_path_events[i + 1] + end_range = self._get_bug_path_event_range(end) + + if start_range == end_range: + continue + + control_edges.append(self._create_control_edge( + start_range, start.file, + end_range, end.file, + file_index_map)) + + path = [] + if control_edges: + path.append(self._create_control_edges(control_edges)) + + # Add bug path events after control points. + if report.bug_path_events: + for event in report.bug_path_events: + path.append(self._create_event(event, file_index_map)) + + diagnostic['path'] = path + + if report.notes: + diagnostic['notes'] = [] + for note in report.notes: + diagnostic['notes'].append( + self._create_note(note, file_index_map)) + + if report.macro_expansions: + diagnostic['macro_expansions'] = [] + for macro_expansion in report.macro_expansions: + diagnostic['macro_expansions'].append( + self._create_macro_expansion( + macro_expansion, file_index_map)) + + data['diagnostics'].append(diagnostic) + + return data + + def write(self, data: Any, output_file_path: str): + """ Creates an analyzer output file from the given data. """ + try: + with open(output_file_path, 'wb') as f: + plistlib.dump(data, f) + except TypeError as err: + LOG.error('Failed to write plist file: %s', output_file_path) + LOG.error(err) + import traceback + traceback.print_exc() + + def _get_bug_path_event_range(self, event: BugPathEvent) -> Range: + """ Get range for bug path event. """ + if event.range: + return event.range + + return Range(event.line, event.column, event.line, event.column) + + def _create_location( + self, + line: int, + column: int, + file_index: int + ): + """ Create a location section from the message. """ + return {'line': line, 'col': column, 'file': file_index} + + def _create_event( + self, + event: BugPathEvent, + file_index_map: Dict[str, int] + ): + """ Create an event. """ + data = { + 'kind': 'event', + 'location': self._create_location( + event.line, event.column, + file_index_map[event.file.original_path]), + 'depth': 0, + 'message': event.message} + + if event.range: + data['range'] = self._create_range( + event.range, file_index_map[event.file.original_path]) + + return data + + def _create_control_edges(self, edges: List[Dict]) -> Dict: + """ """ + return {'kind': 'control', 'edges': edges} + + def _create_control_edge( + self, + start_range: Range, + start_file: File, + end_range: Range, + end_file: File, + file_index_map: Dict[str, int] + ) -> Dict: + """ Creates a control point. """ + return { + 'start': self._create_range( + start_range, file_index_map[start_file.original_path]), + 'end': self._create_range( + end_range, file_index_map[end_file.original_path])} + + def _create_note( + self, + note: BugPathEvent, + file_index_map: Dict[str, int] + ): + """ Creates a note. """ + data = { + 'location': self._create_location( + note.line, note.column, + file_index_map[note.file.original_path]), + 'message': note.message} + + if note.range: + data['range'] = self._create_range( + note.range, file_index_map[note.file.original_path]) + + return data + + def _create_range( + self, + range: Range, + file_idx: int + ) -> List: + """ Creates a range. """ + return [ + self._create_location(range.start_line, range.start_col, file_idx), + self._create_location(range.end_line, range.end_col, file_idx)] + + def _create_macro_expansion( + self, + macro_expansion: MacroExpansion, + file_index_map: Dict[str, int] + ): + """ Creates a macro expansion. """ + return { + 'name': macro_expansion.name, + 'expansion': macro_expansion.message, + 'location': self._create_location( + macro_expansion.line, macro_expansion.column, + file_index_map[macro_expansion.file.original_path])} + + def replace_report_hash( + self, + plist_file_path: str, + hash_type=HashType.CONTEXT_FREE + ): + """ + Override hash in the given file by using the given version hash. + """ + try: + with open(plist_file_path, 'rb+') as f: + plist = plistlib.load(f) + f.seek(0) + f.truncate() + + metadata = plist.get('metadata') + analyzer_result_dir_path = os.path.dirname(plist_file_path) + + file_cache: Dict[str, File] = {} + files = get_file_index_map( + plist, analyzer_result_dir_path, file_cache) + + for diag in plist['diagnostics']: + report = self.__create_report( + plist_file_path, diag, files, metadata) + diag['issue_hash_content_of_line_in_context'] = \ + get_report_hash(report, hash_type) + + plistlib.dump(plist, f) + except (TypeError, AttributeError, + plistlib.InvalidFileException) as err: + LOG.warning('Failed to process plist file: %s wrong file format?', + plist_file_path) + LOG.warning(err) + except IndexError as iex: + LOG.warning('Indexing error during processing plist file %s', + plist_file_path) + LOG.warning(type(iex)) + LOG.warning(repr(iex)) + _, _, exc_traceback = sys.exc_info() + traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) + except Exception as ex: + LOG.warning('Error during processing reports from the plist ' + 'file: %s', plist_file_path) + traceback.print_exc() + LOG.warning(type(ex)) + LOG.warning(ex) diff --git a/tools/report-converter/codechecker_report_converter/report/report_file.py b/tools/report-converter/codechecker_report_converter/report/report_file.py new file mode 100644 index 0000000000..afffecfe0f --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/report_file.py @@ -0,0 +1,110 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os + +from typing import Dict, Iterator, List, Optional, Tuple + +from codechecker_report_converter.report import File, Report +from codechecker_report_converter.report.checker_labels import CheckerLabels +from codechecker_report_converter.report.hash import HashType +from codechecker_report_converter.report.parser import plist +from codechecker_report_converter.report.parser.base import AnalyzerInfo + + +LOG = logging.getLogger('report-converter') + + +SUPPORTED_ANALYZER_EXTENSIONS = [plist.EXTENSION] + + +__SUPPORTED_ANALYZER_EXTENSIONS = tuple([ + f".{extension}" for extension in SUPPORTED_ANALYZER_EXTENSIONS]) + + +def is_supported(analyzer_result_file_path: str) -> bool: + """ True if the given report file can be parsed. """ + return analyzer_result_file_path.endswith(__SUPPORTED_ANALYZER_EXTENSIONS) + + +def get_parser( + analyzer_result_file_path: str, + checker_labels: Optional[CheckerLabels] = None, + file_cache: Optional[Dict[str, File]] = None +): + """ Returns a parser object for the given analyzer result file. """ + if analyzer_result_file_path.endswith(f".{plist.EXTENSION}"): + return plist.Parser(checker_labels, file_cache) + + +def get_reports( + analyzer_result_file_path: str, + checker_labels: Optional[CheckerLabels] = None, + file_cache: Optional[Dict[str, File]] = None +) -> List[Report]: + """ Get reports from the given report file. """ + parser = get_parser(analyzer_result_file_path, checker_labels, file_cache) + + if parser: + return parser.get_reports(analyzer_result_file_path) + + return [] + + +def create( + output_file_path: str, + reports: List[Report], + checker_labels: Optional[CheckerLabels] = None, + analyzer_info: Optional[AnalyzerInfo] = None +): + """ Creates an analyzer output file from the given reports. """ + parser = get_parser(output_file_path, checker_labels) + + if parser: + data = parser.convert(reports, analyzer_info) + parser.write(data, output_file_path) + + +def replace_report_hash( + analyzer_result_file_path: str, + hash_type=HashType.CONTEXT_FREE +): + """ Override hash in the given file by using the given version hash. """ + parser = get_parser(analyzer_result_file_path) + + if parser: + parser.replace_report_hash( + analyzer_result_file_path, hash_type) + + +def analyzer_result_files( + input_paths: List[str] +) -> Iterator[Tuple[str, List[str]]]: + """ + Iterate over the input paths and returns an iterator of the supported + analyzer result file paths and metadata information if available. + """ + for input_path in input_paths: + input_path = os.path.abspath(input_path) + LOG.debug("Parsing input argument: '%s'", input_path) + + if os.path.isfile(input_path): + input_dir_path = os.path.dirname(input_path) + if is_supported(input_path): + yield input_dir_path, [input_path] + elif os.path.isdir(input_path): + input_dir_path = input_path + for root_dir, _, file_names in os.walk(input_path): + analyzer_result_file_paths: List[str] = [] + for file_name in file_names: + input_file_path = os.path.join(root_dir, file_name) + if is_supported(input_file_path): + analyzer_result_file_paths.append(input_file_path) + + yield root_dir, analyzer_result_file_paths diff --git a/tools/report-converter/codechecker_report_converter/report/reports.py b/tools/report-converter/codechecker_report_converter/report/reports.py new file mode 100644 index 0000000000..fd06fae719 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/reports.py @@ -0,0 +1,128 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import sys + +from typing import Any, Callable, Iterable, List, Optional, Set + +from codechecker_report_converter.report import Report, SkipListHandler +from codechecker_report_converter.report.hash import get_report_path_hash + +LOG = logging.getLogger('report-converter') + + +class GenericSuppressHandler: + get_suppressed: Callable[[Any, Report], bool] + store_suppress_bug_id: Callable[[Any, str, str, str, str], bool] + + +def get_mentioned_files(reports: List[Report]) -> Set[str]: + """ Get all mentioned files from the given reports. """ + files = set() + + for report in reports: + files.update(report.files) + + return files + + +def get_changed_files(reports: List[Report]): + """ Get all changed files from the given reports. """ + changed_files = set() + + for report in reports: + changed_files.update(report.changed_files) + + return changed_files + + +def dump_changed_files(changed_files: Set[str]): + """ Dump changed files. """ + if not changed_files: + return None + + file_paths = '\n'.join([' - ' + f for f in changed_files]) + LOG.warning("The following source file contents changed or missing since " + "the latest analysis:\n%s\nPlease re-analyze your " + "project to update the reports!", file_paths) + + +def skip( + reports: List[Report], + processed_path_hashes: Optional[Set[str]] = None, + skip_handler: Optional[SkipListHandler] = None, + suppr_handler: Optional[GenericSuppressHandler] = None, + src_comment_status_filter: Optional[Iterable[str]] = None +) -> List[Report]: + """ Skip reports. """ + kept_reports = [] + for report in reports: + if skip_handler and report.skip(skip_handler): + LOG.debug("Skip report because file path (%s) is on the skip " + "list.", report.file.path) + continue + + if suppr_handler and suppr_handler.get_suppressed(report): + LOG.debug("Suppressed by suppress file: %s:%s [%s] %s", + report.file.original_path, report.line, + report.checker_name, report.report_hash) + continue + + if report.source_code_comments: + if len(report.source_code_comments) > 1: + LOG.error("Multiple source code comment can be found for '%s' " + "checker in '%s' at line %d.", report.checker_name, + report.file.original_path, report.line) + sys.exit(1) + + if suppr_handler: + if not report.report_hash: + LOG.warning("Can't store suppress information for report " + "because no report hash is set: %s", report) + continue + + source_code_comment = report.source_code_comments[0] + suppr_handler.store_suppress_bug_id( + report.report_hash, + report.file.name, + source_code_comment.message, + source_code_comment.status) + + if src_comment_status_filter and \ + not report.check_source_code_comments( + src_comment_status_filter): + LOG.debug("Filtered out by --review-status filter option: " + "%s:%s [%s] %s [%s]", + report.file.original_path, report.line, + report.checker_name, report.report_hash, + report.review_status) + continue + else: + if src_comment_status_filter and \ + 'unreviewed' not in src_comment_status_filter: + LOG.debug("Filtered out by --review-status filter option: " + "%s:%s [%s] %s [unreviewed]", + report.file.original_path, report.line, + report.checker_name, report.report_hash) + continue + + if processed_path_hashes is not None: + report_path_hash = get_report_path_hash(report) + if report_path_hash in processed_path_hashes: + LOG.debug("Not showing report because it is a deduplication " + "of an already processed report!") + LOG.debug("Path hash: %s", report_path_hash) + LOG.debug(report) + continue + + processed_path_hashes.add(report_path_hash) + + kept_reports.append(report) + + return kept_reports diff --git a/tools/report-converter/codechecker_report_converter/report/statistics.py b/tools/report-converter/codechecker_report_converter/report/statistics.py new file mode 100644 index 0000000000..0f51eee019 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/report/statistics.py @@ -0,0 +1,73 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import logging +import os +import sys + +from collections import defaultdict, namedtuple + +from codechecker_report_converter import twodim +from codechecker_report_converter.report import Report + + +LOG = logging.getLogger('report-converter') + + +Checker = namedtuple('Checker', ['name', 'severity']) + + +class Statistics: + def __init__(self): + self.num_of_analyzer_result_files = 0 + self.num_of_reports = 0 + + self.severity_statistics = defaultdict(int) + self.checker_statistics = defaultdict(int) + self.file_statistics = defaultdict(int) + + def write(self, out=sys.stdout): + """ Print statistics. """ + if self.severity_statistics: + out.write("\n----==== Severity Statistics ====----\n") + header = ["Severity", "Number of reports"] + out.write(twodim.to_table( + [header] + list(self.severity_statistics.items()))) + out.write("\n----=================----\n") + + if self.checker_statistics: + out.write("\n----==== Checker Statistics ====----\n") + header = ["Checker name", "Severity", "Number of reports"] + out.write(twodim.to_table([header] + [ + (c.name, c.severity, n) + for (c, n) in self.checker_statistics.items()])) + out.write("\n----=================----\n") + + if self.file_statistics: + out.write("\n----==== File Statistics ====----\n") + header = ["File name", "Number of reports"] + out.write(twodim.to_table([header] + [ + (os.path.basename(file_path), n) + for (file_path, n) in self.file_statistics.items()])) + out.write("\n----=================----\n") + + out.write("\n----======== Summary ========----\n") + statistics_rows = [ + ["Number of processed analyzer result files", + str(self.num_of_analyzer_result_files)], + ["Number of analyzer reports", str(self.num_of_reports)]] + out.write(twodim.to_table(statistics_rows, False)) + out.write("\n----=================----\n") + + def add_report(self, report: Report): + """ Collect statistics from the given report. """ + self.num_of_reports += 1 + self.severity_statistics[report.severity] += 1 + self.checker_statistics[ + Checker(report.checker_name, report.severity)] += 1 + self.file_statistics[report.file.original_path] += 1 diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/address/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sanitizers/address/analyzer_result.py deleted file mode 100644 index d161fd35bf..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/address/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult -from codechecker_report_converter.plist_converter import PlistConverter - -from .output_parser import ASANParser - - -class ASANAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang AddressSanitizer. """ - - TOOL_NAME = 'asan' - NAME = 'AddressSanitizer' - URL = 'https://clang.llvm.org/docs/AddressSanitizer.html' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = ASANParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/address/output_parser.py b/tools/report-converter/codechecker_report_converter/sanitizers/address/output_parser.py deleted file mode 100644 index 87b0380331..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/address/output_parser.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import logging -import re - -from ...output_parser import get_next, Message, Event -from ..output_parser import SANParser - -LOG = logging.getLogger('ReportConverter') - - -class ASANParser(SANParser): - """ Parser for Clang AddressSanitizer console outputs. """ - - def __init__(self): - super(ASANParser, self).__init__() - - # Regex for parsing AddressSanitizer output message. - self.address_line_re = re.compile( - # Error code - r'==(?P\d+)==(ERROR|WARNING): AddressSanitizer: ' - # Checker message. - r'(?P[\S \t]+)') - - def parse_sanitizer_message(self, it, line): - """ Parses AddressSanitizer output message. - - The first event will be the main location of the bug. - """ - match = self.address_line_re.match(line) - if not match: - return None, line - - line = get_next(it) - stack_traces, events, line = self.parse_stack_trace(it, line) - - if not events: - return None, line - - notes = [Event(events[0].path, events[0].line, events[0].column, - ''.join(stack_traces))] - - return Message(events[0].path, events[0].line, events[0].column, - match.group('message').strip(), - "AddressSanitizer", - events, notes), line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/leak/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sanitizers/leak/analyzer_result.py deleted file mode 100644 index 1ad46bbd41..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/leak/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult -from codechecker_report_converter.plist_converter import PlistConverter - -from .output_parser import LSANParser - - -class LSANAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang LeakSanitizer. """ - - TOOL_NAME = 'lsan' - NAME = 'LeakSanitizer' - URL = 'https://clang.llvm.org/docs/LeakSanitizer.html' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = LSANParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/leak/output_parser.py b/tools/report-converter/codechecker_report_converter/sanitizers/leak/output_parser.py deleted file mode 100644 index 6e7cebda74..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/leak/output_parser.py +++ /dev/null @@ -1,55 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import logging -import re - -from ...output_parser import get_next, Message, Event -from ..output_parser import SANParser - -LOG = logging.getLogger('ReportConverter') - - -class LSANParser(SANParser): - """ Parser for Clang LeakSanitizer console outputs. """ - - def __init__(self): - super(LSANParser, self).__init__() - - # Regex for parsing MemorySanitizer output message. - self.leak_line_re = re.compile( - # Error code - r'==(?P\d+)==(ERROR|WARNING): LeakSanitizer: ' - # Checker message. - r'(?P[\S \t]+)') - - def parse_sanitizer_message(self, it, line): - """ Parses LeakSanitizer output message. - - The first event will be the main location of the bug. - """ - match = self.leak_line_re.match(line) - if not match: - return None, line - - line = get_next(it) - stack_traces, events, line = self.parse_stack_trace(it, line) - - if not events: - return None, line - - main_event = events[-1] - - notes = [Event(main_event.path, main_event.line, main_event.column, - ''.join(stack_traces))] - - return Message(main_event.path, main_event.line, main_event.column, - match.group('message').strip(), - "LeakSanitizer", - events, notes), line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/memory/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sanitizers/memory/analyzer_result.py deleted file mode 100644 index 773306155b..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/memory/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult -from codechecker_report_converter.plist_converter import PlistConverter - -from .output_parser import MSANParser - - -class MSANAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang MemorySanitizer. """ - - TOOL_NAME = 'msan' - NAME = 'MemorySanitizer' - URL = 'https://clang.llvm.org/docs/MemorySanitizer.html' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = MSANParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/memory/output_parser.py b/tools/report-converter/codechecker_report_converter/sanitizers/memory/output_parser.py deleted file mode 100644 index fc44519af2..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/memory/output_parser.py +++ /dev/null @@ -1,55 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import logging -import re - -from ...output_parser import get_next, Message, Event -from ..output_parser import SANParser - -LOG = logging.getLogger('ReportConverter') - - -class MSANParser(SANParser): - """ Parser for Clang MemorySanitizer console outputs. """ - - def __init__(self): - super(MSANParser, self).__init__() - - # Regex for parsing MemorySanitizer output message. - self.memory_line_re = re.compile( - # Error code - r'==(?P\d+)==(ERROR|WARNING): MemorySanitizer: ' - # Checker message. - r'(?P[\S \t]+)') - - def parse_sanitizer_message(self, it, line): - """ Parses MemorySanitizer output message. - - The first event will be the main location of the bug. - """ - match = self.memory_line_re.match(line) - if not match: - return None, line - - line = get_next(it) - stack_traces, events, line = self.parse_stack_trace(it, line) - - if not events: - return None, line - - main_event = events[-1] - - notes = [Event(main_event.path, main_event.line, main_event.column, - ''.join(stack_traces))] - - return Message(main_event.path, main_event.line, main_event.column, - match.group('message').strip(), - "MemorySanitizer", - events, notes), line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/output_parser.py b/tools/report-converter/codechecker_report_converter/sanitizers/output_parser.py deleted file mode 100644 index 4790f5370f..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/output_parser.py +++ /dev/null @@ -1,91 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from abc import abstractmethod - -import logging -import os -import re - -from ..output_parser import get_next, BaseParser, Event - -LOG = logging.getLogger('ReportConverter') - - -class SANParser(BaseParser): - """ Parser for Clang UndefinedBehaviourSanitizer console outputs. - - Example output - /a/b/main.cpp:13:10: runtime error: load of value 7... - """ - - def __init__(self): - super(SANParser, self).__init__() - - # Regex for parsing stack trace line. - # It has the following format: - # #1 0x42a51d in main /dummy/main.cpp:24:2 - self.stack_trace_re = re.compile(r'^\s+#\d+') - - self.file_re = re.compile( - r'(?P[\S]+?):(?P\d+)(:(?P\d+))?') - - @abstractmethod - def parse_sanitizer_message(self, it, line): - """ Parse the given line. """ - raise NotImplementedError("Subclasses should implement this!") - - def parse_message(self, it, line): - """Parse the given line. - - Returns a (message, next_line) pair or throws a StopIteration. - The message could be None. - """ - message, next_line = self.parse_sanitizer_message(it, line) - if message: - return message, next_line - - return None, next(it) - - def parse_stack_trace_line(self, line): - """ Parse the given stack trace line. - - Return an event if the file in the stack trace line exists otherwise - it returns None. - """ - file_match = self.file_re.search(line) - if not file_match: - return None - - file_path = file_match.group('path') - if file_path and os.path.exists(file_path): - col = file_match.group('column') - return Event(os.path.abspath(file_path), - int(file_match.group('line')), - int(col) if col else 0, - line.rstrip()) - - return None - - def parse_stack_trace(self, it, line): - """ Iterate over lines and parse stack traces. """ - events = [] - stack_traces = [] - - while line.strip(): - event = self.parse_stack_trace_line(line) - if event: - events.append(event) - - stack_traces.append(line) - line = get_next(it) - - events.reverse() - - return stack_traces, events, line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/thread/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sanitizers/thread/analyzer_result.py deleted file mode 100644 index dec2b43f44..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/thread/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult -from codechecker_report_converter.plist_converter import PlistConverter - -from .output_parser import TSANParser - - -class TSANAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang ThreadSanitizer. """ - - TOOL_NAME = 'tsan' - NAME = 'ThreadSanitizer' - URL = 'https://clang.llvm.org/docs/ThreadSanitizer.html' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = TSANParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/thread/output_parser.py b/tools/report-converter/codechecker_report_converter/sanitizers/thread/output_parser.py deleted file mode 100644 index 2512180534..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/thread/output_parser.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -import logging -import re - -from ...output_parser import get_next, Message, Event -from ..output_parser import SANParser - -LOG = logging.getLogger('ReportConverter') - - -class TSANParser(SANParser): - """ Parser for Clang AddressSanitizer console outputs. """ - - def __init__(self): - super(TSANParser, self).__init__() - - # Regex for parsing ThreadSanitizer output message. - self.tsan_line_re = re.compile( - # Error code - r'==(?P\d+)==(ERROR|WARNING): ThreadSanitizer: ' - # Checker message. - r'(?P[\S \t]+)') - - def parse_sanitizer_message(self, it, line): - """ Parses ThreadSanitizer output message. - - The first event will be the main location of the bug. - """ - match = self.tsan_line_re.match(line) - if not match: - return None, line - - line = get_next(it) - stack_traces, events, line = self.parse_stack_trace(it, line) - - if not events: - return None, line - - notes = [Event(events[0].path, events[0].line, events[0].column, - ''.join(stack_traces))] - - return Message(events[0].path, events[0].line, events[0].column, - match.group('message').strip(), - "ThreadSanitizer", - events, notes), line diff --git a/tools/report-converter/codechecker_report_converter/sanitizers/ub/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sanitizers/ub/analyzer_result.py deleted file mode 100644 index 7b0cd57205..0000000000 --- a/tools/report-converter/codechecker_report_converter/sanitizers/ub/analyzer_result.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - - -from codechecker_report_converter.analyzer_result import AnalyzerResult -from codechecker_report_converter.plist_converter import PlistConverter - -from .output_parser import UBSANParser - - -class UBSANAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Clang UndefinedBehaviourSanitizer. """ - - TOOL_NAME = 'ubsan' - NAME = 'UndefinedBehaviorSanitizer' - URL = 'https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = UBSANParser() - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/smatch/analyzer_result.py b/tools/report-converter/codechecker_report_converter/smatch/analyzer_result.py deleted file mode 100644 index feb9990687..0000000000 --- a/tools/report-converter/codechecker_report_converter/smatch/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import SmatchParser -from ..plist_converter import PlistConverter - - -class SmatchAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Smatch. """ - - TOOL_NAME = 'smatch' - NAME = 'Smatch' - URL = 'https://repo.or.cz/w/smatch.git' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = SmatchParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/codechecker_common/source_code_comment_handler.py b/tools/report-converter/codechecker_report_converter/source_code_comment_handler.py similarity index 79% rename from codechecker_common/source_code_comment_handler.py rename to tools/report-converter/codechecker_report_converter/source_code_comment_handler.py index 102c8cf696..340908cdc9 100644 --- a/codechecker_common/source_code_comment_handler.py +++ b/tools/report-converter/codechecker_report_converter/source_code_comment_handler.py @@ -9,13 +9,16 @@ Source code comment handling. """ - +import json +import logging import re -from codechecker_common import util -from codechecker_common.logger import get_logger +from typing import Dict, Iterable, List, Optional, Set, TextIO, Tuple + +from . import util -LOG = get_logger('system') + +LOG = logging.getLogger('report-converter') REVIEW_STATUS_VALUES = ["confirmed", "false_positive", "intentional", @@ -43,6 +46,46 @@ class SpellException(Exception): pass +class SourceCodeComment: + def __init__( + self, + checkers: Set[str], + message: str, + status: str, + line: Optional[str] = None + ): + self.checkers = checkers + self.message = message + self.status = status + self.line = line + + def to_json(self) -> Dict: + """ Creates a JSON dictionary. """ + return { + "checkers": list(self.checkers), + "message": self.message, + "status": self.status, + "line": self.line + } + + def __eq__(self, other) -> bool: + if isinstance(other, SourceCodeComment): + return self.checkers == other.checkers and \ + self.message == other.message and \ + self.status == other.status and \ + self.line == other.line + + raise NotImplementedError( + "Comparison SourceCodeComment object with '%s' is not supported", + type(other)) + + def __repr__(self): + return json.dumps(self.to_json()) + + +SourceCodeComments = List[SourceCodeComment] + + class SourceCodeCommentHandler: """ Handle source code comments. @@ -54,7 +97,7 @@ class SourceCodeCommentHandler: 'codechecker_confirmed'] @staticmethod - def __check_if_comment(source_line): + def __check_if_comment(source_line: str) -> bool: """ Check if the line is a comment. Accepted comment format is only if line starts with '//'. @@ -62,7 +105,7 @@ def __check_if_comment(source_line): return source_line.strip().startswith('//') @staticmethod - def __check_if_cstyle_comment(source_line): + def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]: """ Check if the line contains the start '/*' or the the end '*/' of a C style comment. @@ -72,7 +115,10 @@ def __check_if_cstyle_comment(source_line): cstyle_end = '*/' in src_line return cstyle_start, cstyle_end - def __process_source_line_comment(self, source_line_comment): + def __process_source_line_comment( + self, + source_line_comment: str + ) -> Optional[SourceCodeComment]: """ Process CodeChecker source code comment. @@ -96,7 +142,6 @@ def __process_source_line_comment(self, source_line_comment): line comment */ """ - # Remove extra spaces if any. formatted = ' '.join(source_line_comment.split()) @@ -109,7 +154,7 @@ def __process_source_line_comment(self, source_line_comment): res = re.match(ptn, formatted) if not res: - return + return None checkers_names = set() review_status = 'false_positive' @@ -136,11 +181,9 @@ def __process_source_line_comment(self, source_line_comment): elif status == 'codechecker_confirmed': review_status = 'confirmed' - return {'checkers': checkers_names, - 'message': message, - 'status': review_status} + return SourceCodeComment(checkers_names, message, review_status) - def has_source_line_comments(self, fp, line): + def has_source_line_comments(self, fp: TextIO, line: int) -> bool: """ Return True if there is any source code comment or False if not, for a given line. @@ -151,17 +194,21 @@ def has_source_line_comments(self, fp, line): # Misspell in the review status comment. LOG.warning(ex) return False - return len(comments) + return bool(comments) - def scan_source_line_comments(self, fp, line_numbers): + def scan_source_line_comments( + self, + fp: TextIO, + line_numbers: Iterable[int] + ) -> Tuple[List[Tuple[int, SourceCodeComments]], List[str]]: """collect all the source line review comments if exists in a source file at the given line numbers. returns a list of (line_num, comments) tuples where comments were found. """ - comments = [] - misspelled_comments = [] + comments: List[Tuple[int, SourceCodeComments]] = [] + misspelled_comments: List[str] = [] if not contains_codechecker_comment(fp): return comments, misspelled_comments @@ -173,7 +220,11 @@ def scan_source_line_comments(self, fp, line_numbers): misspelled_comments.append(str(ex)) return comments, misspelled_comments - def get_source_line_comments(self, fp, bug_line): + def get_source_line_comments( + self, + fp: TextIO, + bug_line: int + ) -> SourceCodeComments: """ Returns the preprocessed source code comments for a bug line. raise: SpellException in case there is a spell error in the @@ -193,7 +244,6 @@ def get_source_line_comments(self, fp, bug_line): cstyle_end_found = False while True: - source_line = util.get_linef(fp, previous_line_num) # cpp style comment @@ -236,12 +286,12 @@ def get_source_line_comments(self, fp, bug_line): review_comment = ' '.join(r_comment).strip() - comment = \ - self.__process_source_line_comment(review_comment) + source_line_comment = self.__process_source_line_comment( + review_comment) - if comment: - comment['line'] = orig_review_comment - source_line_comments.append(comment) + if source_line_comment: + source_line_comment.line = orig_review_comment + source_line_comments.append(source_line_comment) else: orig_review_comment = orig_review_comment.strip() raise SpellException( @@ -261,7 +311,12 @@ def get_source_line_comments(self, fp, bug_line): return source_line_comments - def filter_source_line_comments(self, fp, bug_line, checker_name): + def filter_source_line_comments( + self, + fp: TextIO, + bug_line: int, + checker_name: str + ) -> SourceCodeComments: """ This function filters the available source code comments for bug line by the checker name and returns a list of source code comments. @@ -295,7 +350,7 @@ def filter_source_line_comments(self, fp, bug_line, checker_name): checker_name_comments = [] for line_comment in source_line_comments: - for bug_name in line_comment['checkers']: + for bug_name in line_comment.checkers: if (bug_name in checker_name) or (bug_name == 'all'): checker_name_comments.append(line_comment) diff --git a/tools/report-converter/codechecker_report_converter/sparse/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sparse/analyzer_result.py deleted file mode 100644 index a56a63848b..0000000000 --- a/tools/report-converter/codechecker_report_converter/sparse/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import SparseParser -from ..plist_converter import PlistConverter - - -class SparseAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Sparse. """ - - TOOL_NAME = 'sparse' - NAME = 'Sparse' - URL = 'https://git.kernel.org/pub/scm/devel/sparse/sparse.git' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = SparseParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/sphinx/analyzer_result.py b/tools/report-converter/codechecker_report_converter/sphinx/analyzer_result.py deleted file mode 100644 index 0e63bce852..0000000000 --- a/tools/report-converter/codechecker_report_converter/sphinx/analyzer_result.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import SphinxParser -from ..plist_converter import PlistConverter - - -class SphinxAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of Sphinx. """ - - TOOL_NAME = 'sphinx' - NAME = 'Sphinx' - URL = 'https://github.com/sphinx-doc/sphinx' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = SphinxParser(analyzer_result) - - content = self._get_analyzer_result_file_content(analyzer_result) - if not content: - return - - messages = parser.parse_messages(content) - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/spotbugs/analyzer_result.py b/tools/report-converter/codechecker_report_converter/spotbugs/analyzer_result.py deleted file mode 100644 index 30627067be..0000000000 --- a/tools/report-converter/codechecker_report_converter/spotbugs/analyzer_result.py +++ /dev/null @@ -1,33 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import SpotBugsParser -from .plist_converter import SpotBugsPlistConverter - - -class SpotBugsAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of SpotBugs. """ - - TOOL_NAME = 'spotbugs' - NAME = 'spotbugs' - URL = 'https://spotbugs.github.io' - - def parse(self, analyzer_result): - """ Creates plist files from the given analyzer result to the given - output directory. - """ - parser = SpotBugsParser() - messages = parser.parse_messages(analyzer_result) - if not messages: - return None - - plist_converter = SpotBugsPlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/spotbugs/plist_converter.py b/tools/report-converter/codechecker_report_converter/spotbugs/plist_converter.py deleted file mode 100644 index 554d271bfa..0000000000 --- a/tools/report-converter/codechecker_report_converter/spotbugs/plist_converter.py +++ /dev/null @@ -1,22 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -from ..plist_converter import PlistConverter - - -class SpotBugsPlistConverter(PlistConverter): - """ SpotBugs plist converter. """ - - def _create_diag(self, message, files): - """ Creates a new plist diagnostic from the given message. """ - diag = super(SpotBugsPlistConverter, self) \ - ._create_diag(message, files) - diag['orig_issue_hash_content_of_line_in_context'] = \ - message.report_hash - - return diag diff --git a/tools/report-converter/codechecker_report_converter/tslint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/tslint/analyzer_result.py deleted file mode 100644 index d87762c36a..0000000000 --- a/tools/report-converter/codechecker_report_converter/tslint/analyzer_result.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging - -from codechecker_report_converter.analyzer_result import AnalyzerResult - -from .output_parser import TSLintParser -from ..plist_converter import PlistConverter - - -LOG = logging.getLogger('ReportConverter') - - -class TSLintAnalyzerResult(AnalyzerResult): - """ Transform analyzer result of the TSLint analyzer. """ - - TOOL_NAME = 'tslint' - NAME = 'TSLint' - URL = 'https://palantir.github.io/tslint' - - def parse(self, analyzer_result): - """ Creates plist objects from the given analyzer result. - - Returns a list of plist objects. - """ - parser = TSLintParser() - messages = parser.parse_messages(analyzer_result) - if not messages: - return - - plist_converter = PlistConverter(self.TOOL_NAME) - plist_converter.add_messages(messages) - return plist_converter.get_plist_results() diff --git a/tools/report-converter/codechecker_report_converter/tslint/output_parser.py b/tools/report-converter/codechecker_report_converter/tslint/output_parser.py deleted file mode 100644 index e38d4524dd..0000000000 --- a/tools/report-converter/codechecker_report_converter/tslint/output_parser.py +++ /dev/null @@ -1,60 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -import logging -import os -import json - -from ..output_parser import Message, BaseParser - -LOG = logging.getLogger('ReportConverter') - - -class TSLintParser(BaseParser): - """ Parser for TSLint output. """ - - def parse_messages(self, analyzer_result): - """ Parse the given analyzer result. """ - if not os.path.exists(analyzer_result): - LOG.error("Report file does not exist: %s", analyzer_result) - return - - try: - with open(analyzer_result, 'r', - encoding="utf-8", errors="ignore") as report_f: - reports = json.load(report_f) - except (IOError, json.decoder.JSONDecodeError): - LOG.error("Failed to parse the given analyzer result '%s'. Please " - "give a valid json file generated by TSLint.", - analyzer_result) - return - - for report in reports: - file_path = os.path.join(os.path.dirname(analyzer_result), - report.get('name')) - if not os.path.exists(file_path): - LOG.warning("Source file does not exists: %s", file_path) - continue - - message = self.__parse_report(report, file_path) - if message: - self.messages.append(message) - - return self.messages - - def __parse_report(self, bug, file_path): - """ Parse the given report and create a message from them. """ - checker_name = bug['ruleName'] - - message = bug['failure'] - - end_pos = bug['startPosition'] - line = int(end_pos['line'] + 1) - col = int(end_pos['character'] + 1) - - return Message(file_path, line, col, message, checker_name) diff --git a/codechecker_common/output/twodim.py b/tools/report-converter/codechecker_report_converter/twodim.py similarity index 79% rename from codechecker_common/output/twodim.py rename to tools/report-converter/codechecker_report_converter/twodim.py index fa7e59aabd..22ed74fbac 100644 --- a/codechecker_common/output/twodim.py +++ b/tools/report-converter/codechecker_report_converter/twodim.py @@ -11,12 +11,19 @@ import json + from operator import itemgetter +from typing import Iterable, List, Optional -def to_str(format_name, keys, rows, - sort_by_column_number=None, rev=False, - separate_footer=False): +def to_str( + format_name: str, + keys, + rows, + sort_by_column_number: Optional[int] = None, + rev=False, + separate_footer=False +) -> str: """ Converts the given two-dimensional array (with the specified keys) to the given format. @@ -29,32 +36,33 @@ def to_str(format_name, keys, rows, all_rows = [keys] + list(rows) if format_name == 'rows': - return __to_rows(rows) + return to_rows(rows) elif format_name == 'table' or format_name == 'plaintext': # TODO: 'plaintext' for now to support the 'CodeChecker cmd' interface. - return __to_table(all_rows, True, separate_footer) + return to_table(all_rows, True, separate_footer) elif format_name == 'csv': - return __to_csv(all_rows) + return to_csv(all_rows) elif format_name == 'dictlist': - return __to_dictlist(keys, rows) + return to_dictlist(keys, rows) elif format_name == 'json': - return json.dumps(__to_dictlist(keys, rows)) + return json.dumps(to_dictlist(keys, rows)) else: raise ValueError("Unsupported format") -def __to_rows(lines): +def to_rows(lines: Iterable[str]) -> str: """ Prints the given rows with minimal formatting. """ str_parts = [] - lines = [['' if e is None else e for e in line] for line in lines] + lns: List[List[str]] = [ + ['' if e is None else e for e in line] for line in lines] # Count the column width. - widths = [] - for line in lines: + widths: List[int] = [] + for line in lns: for i, size in enumerate([len(str(x)) for x in line]): while i >= len(widths): widths.append(0) @@ -68,12 +76,14 @@ def __to_rows(lines): print_string += "{" + str(i) + "} " else: print_string += "{" + str(i) + ":" + str(width) + "} " + if not print_string: - return + return '' + print_string = print_string[:-1] # Print the actual data. - for i, line in enumerate(lines): + for i, line in enumerate(lns): try: str_parts.append(print_string.format(*line)) except IndexError: @@ -83,7 +93,11 @@ def __to_rows(lines): return '\n'.join(str_parts) -def __to_table(lines, separate_head=True, separate_footer=False): +def to_table( + lines: Iterable[str], + separate_head=True, + separate_footer=False +) -> str: """ Pretty-prints the given two-dimensional array's lines. """ @@ -93,11 +107,12 @@ def __to_table(lines, separate_head=True, separate_footer=False): # It is possible that one of the item in the line is None which will # raise an exception when passed to the format function below. So this is # the reason why we need to convert None values to valid strings here. - lines = [['' if e is None else e for e in line] for line in lines] + lns: List[List[str]] = [ + ['' if e is None else e for e in line] for line in lines] # Count the column width. - widths = [] - for line in lines: + widths: List[int] = [] + for line in lns: for i, size in enumerate([len(str(x)) for x in line]): while i >= len(widths): widths.append(0) @@ -108,13 +123,15 @@ def __to_table(lines, separate_head=True, separate_footer=False): print_string = "" for i, width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " + if not print_string: - return + return '' + print_string = print_string[:-3] # Print the actual data. str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - for i, line in enumerate(lines): + for i, line in enumerate(lns): try: str_parts.append(print_string.format(*line)) except IndexError: @@ -122,7 +139,7 @@ def __to_table(lines, separate_head=True, separate_footer=False): "columns than the others") if i == 0 and separate_head: str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) - if separate_footer and i == len(lines) - 2: + if separate_footer and i == len(lns) - 2: str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1))) @@ -130,18 +147,19 @@ def __to_table(lines, separate_head=True, separate_footer=False): return '\n'.join(str_parts) -def __to_csv(lines): +def to_csv(lines: Iterable[str]) -> str: """ Pretty-print the given two-dimensional array's lines in CSV format. """ str_parts = [] - lines = [['' if e is None else e for e in line] for line in lines] + lns: List[List[str]] = [ + ['' if e is None else e for e in line] for line in lines] # Count the columns. columns = 0 - for line in lines: + for line in lns: if len(line) > columns: columns = len(line) @@ -150,11 +168,12 @@ def __to_csv(lines): print_string += "{" + str(i) + "}," if not print_string: - return + return '' + print_string = print_string[:-1] # Print the actual data. - for line in lines: + for line in lns: try: str_parts.append(print_string.format(*line)) except IndexError: @@ -164,7 +183,7 @@ def __to_csv(lines): return '\n'.join(str_parts) -def __to_dictlist(key_list, lines): +def to_dictlist(key_list, lines): """ Pretty-print the given two-dimensional array's lines into a JSON object list. The key_list acts as the "header" of the table, specifying the diff --git a/tools/report-converter/codechecker_report_converter/util.py b/tools/report-converter/codechecker_report_converter/util.py new file mode 100644 index 0000000000..55a42f8006 --- /dev/null +++ b/tools/report-converter/codechecker_report_converter/util.py @@ -0,0 +1,151 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +import json +import logging +import os +import portalocker +import sys + +from typing import Dict, List, Optional, TextIO + + +LOG = logging.getLogger('report-converter') + + +def get_last_mod_time(file_path: str) -> Optional[float]: + """ Return the last modification time of a file. """ + try: + return os.stat(file_path).st_mtime + except OSError as err: + LOG.debug("File is missing: %s", err) + return None + + +def get_linef(fp: TextIO, line_no: int) -> str: + """'fp' should be (readable) file object. + Return the line content at line_no or an empty line + if there is less lines than line_no. + """ + fp.seek(0) + for line in fp: + line_no -= 1 + if line_no == 0: + return line + return '' + + +def get_line(file_path: str, line_no: int, errors: str = 'ignore') -> str: + """ + Return the given line from the file. If line_no is larger than the number + of lines in the file then empty string returns. + If the file can't be opened for read, the function also returns empty + string. + + Try to encode every file as utf-8 to read the line content do not depend + on the platform settings. By default locale.getpreferredencoding() is used + which depends on the platform. + + Changing the encoding error handling can influence the hash content! + """ + try: + with open(file_path, mode='r', encoding='utf-8', errors=errors) as f: + return get_linef(f, line_no) + except IOError: + LOG.error("Failed to open file %s", file_path) + return '' + + +def trim_path_prefixes(path: str, prefixes: Optional[List[str]]) -> str: + """ + Removes the longest matching leading path from the file path. + """ + + # If no prefixes are specified. + if not prefixes: + return path + + # Find the longest matching prefix in the path. + longest_matching_prefix = None + for prefix in prefixes: + if not prefix.endswith('/'): + prefix += '/' + + if path.startswith(prefix) and (not longest_matching_prefix or + longest_matching_prefix < prefix): + longest_matching_prefix = prefix + + # If no prefix found or the longest prefix is the root do not trim the + # path. + if not longest_matching_prefix or longest_matching_prefix == '/': + return path + + return path[len(longest_matching_prefix):] + + +def load_json_or_empty(path: str, default=None, kind=None, lock=False): + """ + Load the contents of the given file as a JSON and return it's value, + or default if the file can't be loaded. + """ + + ret = default + try: + with open(path, 'r', encoding='utf-8', errors='ignore') as handle: + if lock: + portalocker.lock(handle, portalocker.LOCK_SH) + + ret = json.loads(handle.read()) + + if lock: + portalocker.unlock(handle) + except IOError as ex: + LOG.warning("Failed to open %s file: %s", + kind if kind else 'json', + path) + LOG.warning(ex) + except OSError as ex: + LOG.warning("Failed to open %s file: %s", + kind if kind else 'json', + path) + LOG.warning(ex) + except ValueError as ex: + LOG.warning("'%s' is not a valid %s file.", + kind if kind else 'json', + path) + LOG.warning(ex) + except TypeError as ex: + LOG.warning('Failed to process %s file: %s', + kind if kind else 'json', + path) + LOG.warning(ex) + + return ret + + +def dump_json_output( + data: Dict, + output_file_path: Optional[str] = None, + out=sys.stdout +) -> str: + """ + Write JSON data to the given output file and returns the written output. + """ + data_str = json.dumps(data) + + # Write output data to the file if given. + if output_file_path: + with open(output_file_path, mode='w', + encoding='utf-8', errors="ignore") as f: + f.write(data_str) + + LOG.info('JSON report file was created: %s', output_file_path) + elif out: + out.write(f"{data_str}\n") + + return data_str diff --git a/tools/report-converter/requirements_py/dev/requirements.txt b/tools/report-converter/requirements_py/dev/requirements.txt index 020e4a30e5..6d9bb6b474 100644 --- a/tools/report-converter/requirements_py/dev/requirements.txt +++ b/tools/report-converter/requirements_py/dev/requirements.txt @@ -1,3 +1,6 @@ nose==1.3.7 pycodestyle==2.7.0 pylint==2.8.2 +portalocker==2.2.1 +mypy==0.812 +mypy_extensions==0.4.3 diff --git a/tools/report-converter/setup.py b/tools/report-converter/setup.py index 5bd36802a6..b4fde1ba11 100644 --- a/tools/report-converter/setup.py +++ b/tools/report-converter/setup.py @@ -23,9 +23,6 @@ license='LICENSE.txt', packages=setuptools.find_packages(), include_package_data=True, - install_requires=[ - "codechecker_report_hash" - ], classifiers=[ "Environment :: Console", "Intended Audience :: Developers", @@ -36,7 +33,8 @@ python_requires='>=3.6', entry_points={ 'console_scripts': [ - 'report-converter = codechecker_report_converter.cli:main' + 'report-converter = codechecker_report_converter.cli:main', + 'plist-to-html = codechecker_report_converter.report.output.html.cli:main' ] }, ) diff --git a/tools/report-converter/tests/Makefile b/tools/report-converter/tests/Makefile index d948572115..5099dc2ee3 100644 --- a/tools/report-converter/tests/Makefile +++ b/tools/report-converter/tests/Makefile @@ -2,15 +2,28 @@ REPO_ROOT ?= REPO_ROOT=$(ROOT) +# Test project configuration, tests are run on these files. +TEST_PROJECT ?= TEST_PROJ=$(CURRENT_DIR)/tests/projects + +LAYOUT_DIR ?= LAYOUT_DIR=$(STATIC_DIR) + # Nose test runner configuration options. NOSECFG = --config .noserc -test: pycodestyle pylint test_unit test_functional +test: mypy pycodestyle pylint test_unit test_functional + +test_in_env: mypy_in_env pycodestyle_in_env pylint_in_env test_unit_in_env + +MYPY_TEST_CMD = mypy --ignore-missing-imports codechecker_report_converter + +mypy: + $(MYPY_TEST_CMD) -test_in_env: pycodestyle_in_env pylint_in_env test_unit_in_env +mypy_in_env: venv_dev + $(ACTIVATE_DEV_VENV) && $(MYPY_TEST_CMD) PYCODESTYLE_TEST_CMD = pycodestyle codechecker_report_converter tests \ - --exclude tests/unit/pyflakes_output_test_files/files + --exclude tests/unit/analyzers/pyflakes_output_test_files/files pycodestyle: $(PYCODESTYLE_TEST_CMD) @@ -27,7 +40,7 @@ pylint: pylint_in_env: venv $(ACTIVATE_DEV_VENV) && $(PYLINT_TEST_CMD) -UNIT_TEST_CMD = $(REPO_ROOT) \ +UNIT_TEST_CMD = $(REPO_ROOT) $(TEST_PROJECT) $(LAYOUT_DIR) \ nosetests $(NOSECFG) tests/unit test_unit: @@ -36,7 +49,7 @@ test_unit: test_unit_in_env: venv_dev $(ACTIVATE_DEV_VENV) && $(UNIT_TEST_CMD) -FUNCTIONAL_TEST_CMD = $(REPO_ROOT) \ +FUNCTIONAL_TEST_CMD = $(REPO_ROOT) $(TEST_PROJECT) \ nosetests $(NOSECFG) tests/functional test_functional: diff --git a/tools/report-converter/codechecker_report_converter/spotbugs/__init__.py b/tools/report-converter/tests/libtest/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/spotbugs/__init__.py rename to tools/report-converter/tests/libtest/__init__.py diff --git a/tools/plist_to_html/tests/libtest/env.py b/tools/report-converter/tests/libtest/env.py similarity index 100% rename from tools/plist_to_html/tests/libtest/env.py rename to tools/report-converter/tests/libtest/env.py diff --git a/tools/codechecker_report_hash/tests/projects/cpp/multi_error.cpp b/tools/report-converter/tests/projects/cpp/multi_error.cpp similarity index 100% rename from tools/codechecker_report_hash/tests/projects/cpp/multi_error.cpp rename to tools/report-converter/tests/projects/cpp/multi_error.cpp diff --git a/tools/codechecker_report_hash/tests/projects/cpp/multi_error.plist b/tools/report-converter/tests/projects/cpp/multi_error.plist similarity index 100% rename from tools/codechecker_report_hash/tests/projects/cpp/multi_error.plist rename to tools/report-converter/tests/projects/cpp/multi_error.plist diff --git a/tools/plist_to_html/tests/projects/macros/macros.cpp b/tools/report-converter/tests/projects/macros/macros.cpp similarity index 100% rename from tools/plist_to_html/tests/projects/macros/macros.cpp rename to tools/report-converter/tests/projects/macros/macros.cpp diff --git a/tools/plist_to_html/tests/projects/macros/macros.plist b/tools/report-converter/tests/projects/macros/macros.plist similarity index 100% rename from tools/plist_to_html/tests/projects/macros/macros.plist rename to tools/report-converter/tests/projects/macros/macros.plist diff --git a/tools/plist_to_html/tests/projects/notes/notes.cpp b/tools/report-converter/tests/projects/notes/notes.cpp similarity index 100% rename from tools/plist_to_html/tests/projects/notes/notes.cpp rename to tools/report-converter/tests/projects/notes/notes.cpp diff --git a/tools/plist_to_html/tests/projects/notes/notes.plist b/tools/report-converter/tests/projects/notes/notes.plist similarity index 100% rename from tools/plist_to_html/tests/projects/notes/notes.plist rename to tools/report-converter/tests/projects/notes/notes.plist diff --git a/tools/plist_to_html/tests/projects/simple/simple.cpp b/tools/report-converter/tests/projects/simple/simple.cpp similarity index 100% rename from tools/plist_to_html/tests/projects/simple/simple.cpp rename to tools/report-converter/tests/projects/simple/simple.cpp diff --git a/tools/plist_to_html/tests/projects/simple/simple.plist b/tools/report-converter/tests/projects/simple/simple.plist similarity index 100% rename from tools/plist_to_html/tests/projects/simple/simple.plist rename to tools/report-converter/tests/projects/simple/simple.plist diff --git a/tools/report-converter/codechecker_report_converter/tslint/__init__.py b/tools/report-converter/tests/unit/analyzers/__init__.py similarity index 100% rename from tools/report-converter/codechecker_report_converter/tslint/__init__.py rename to tools/report-converter/tests/unit/analyzers/__init__.py diff --git a/tools/report-converter/tests/unit/asan_output_test_files/asan.out b/tools/report-converter/tests/unit/analyzers/asan_output_test_files/asan.out similarity index 100% rename from tools/report-converter/tests/unit/asan_output_test_files/asan.out rename to tools/report-converter/tests/unit/analyzers/asan_output_test_files/asan.out diff --git a/tools/report-converter/tests/unit/asan_output_test_files/asan.plist b/tools/report-converter/tests/unit/analyzers/asan_output_test_files/asan.plist similarity index 96% rename from tools/report-converter/tests/unit/asan_output_test_files/asan.plist rename to tools/report-converter/tests/unit/analyzers/asan_output_test_files/asan.plist index 0a3509a04e..2284e9b03b 100644 --- a/tools/report-converter/tests/unit/asan_output_test_files/asan.plist +++ b/tools/report-converter/tests/unit/analyzers/asan_output_test_files/asan.plist @@ -25,10 +25,6 @@ notes - depth - 0 - kind - note location col diff --git a/tools/report-converter/tests/unit/asan_output_test_files/files/asan.cpp b/tools/report-converter/tests/unit/analyzers/asan_output_test_files/files/asan.cpp similarity index 100% rename from tools/report-converter/tests/unit/asan_output_test_files/files/asan.cpp rename to tools/report-converter/tests/unit/analyzers/asan_output_test_files/files/asan.cpp diff --git a/tools/report-converter/tests/unit/coccinelle_output_test_files/files/sample.c b/tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/files/sample.c similarity index 100% rename from tools/report-converter/tests/unit/coccinelle_output_test_files/files/sample.c rename to tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/files/sample.c diff --git a/tools/report-converter/tests/unit/coccinelle_output_test_files/sample.expected.plist b/tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/sample.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/coccinelle_output_test_files/sample.expected.plist rename to tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/sample.expected.plist diff --git a/tools/report-converter/tests/unit/coccinelle_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/coccinelle_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/coccinelle_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/divide_zero.expected.plist b/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/divide_zero.expected.plist new file mode 100644 index 0000000000..f3e453c520 --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/divide_zero.expected.plist @@ -0,0 +1,88 @@ + + + + + diagnostics + + + category + error + check_name + zerodiv + description + Division by zero. + issue_hash_content_of_line_in_context + 78ac45a6e5e219c543320c774a76b776 + location + + col + 13 + file + 0 + line + 17 + + path + + + depth + 0 + kind + event + location + + col + 13 + file + 0 + line + 17 + + message + Division by zero + range + + + col + 13 + file + 0 + line + 17 + + + col + 13 + file + 0 + line + 17 + + + + + type + cppcheck + + + files + + files/divide_zero.cpp + + metadata + + analyzer + + name + cppcheck + + generated_by + + name + report-converter + version + x.y.z + + + + diff --git a/tools/report-converter/tests/unit/cppcheck_output_test_files/files/divide_zero.cpp b/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/files/divide_zero.cpp similarity index 100% rename from tools/report-converter/tests/unit/cppcheck_output_test_files/files/divide_zero.cpp rename to tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/files/divide_zero.cpp diff --git a/tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.plist b/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/out/divide_zero.plist similarity index 97% rename from tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.plist rename to tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/out/divide_zero.plist index 006360e9b0..4006dfaec6 100644 --- a/tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.plist +++ b/tools/report-converter/tests/unit/analyzers/cppcheck_output_test_files/out/divide_zero.plist @@ -6,7 +6,7 @@ cppcheck version 1.87 files - files/divide_zero.cpp + ../files/divide_zero.cpp diagnostics diff --git a/tools/report-converter/tests/unit/cpplint_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/cpplint_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/cpplint_output_test_files/files/sample.cpp b/tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/files/sample.cpp similarity index 100% rename from tools/report-converter/tests/unit/cpplint_output_test_files/files/sample.cpp rename to tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/files/sample.cpp diff --git a/tools/report-converter/tests/unit/cpplint_output_test_files/sample.expected.plist b/tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/sample.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/cpplint_output_test_files/sample.expected.plist rename to tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/sample.expected.plist diff --git a/tools/report-converter/tests/unit/cpplint_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/cpplint_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/cpplint_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/eslint_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/eslint_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/eslint_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/eslint_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/eslint_output_test_files/files/.eslintrc b/tools/report-converter/tests/unit/analyzers/eslint_output_test_files/files/.eslintrc similarity index 100% rename from tools/report-converter/tests/unit/eslint_output_test_files/files/.eslintrc rename to tools/report-converter/tests/unit/analyzers/eslint_output_test_files/files/.eslintrc diff --git a/tools/report-converter/tests/unit/eslint_output_test_files/files/index.js b/tools/report-converter/tests/unit/analyzers/eslint_output_test_files/files/index.js similarity index 100% rename from tools/report-converter/tests/unit/eslint_output_test_files/files/index.js rename to tools/report-converter/tests/unit/analyzers/eslint_output_test_files/files/index.js diff --git a/tools/report-converter/tests/unit/eslint_output_test_files/reports.expected.plist b/tools/report-converter/tests/unit/analyzers/eslint_output_test_files/reports.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/eslint_output_test_files/reports.expected.plist rename to tools/report-converter/tests/unit/analyzers/eslint_output_test_files/reports.expected.plist diff --git a/tools/report-converter/tests/unit/eslint_output_test_files/reports.json b/tools/report-converter/tests/unit/analyzers/eslint_output_test_files/reports.json similarity index 100% rename from tools/report-converter/tests/unit/eslint_output_test_files/reports.json rename to tools/report-converter/tests/unit/analyzers/eslint_output_test_files/reports.json diff --git a/tools/report-converter/tests/unit/golint_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/golint_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/golint_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/golint_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/golint_output_test_files/files/simple.go b/tools/report-converter/tests/unit/analyzers/golint_output_test_files/files/simple.go similarity index 100% rename from tools/report-converter/tests/unit/golint_output_test_files/files/simple.go rename to tools/report-converter/tests/unit/analyzers/golint_output_test_files/files/simple.go diff --git a/tools/report-converter/tests/unit/golint_output_test_files/simple.expected.plist b/tools/report-converter/tests/unit/analyzers/golint_output_test_files/simple.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/golint_output_test_files/simple.expected.plist rename to tools/report-converter/tests/unit/analyzers/golint_output_test_files/simple.expected.plist diff --git a/tools/report-converter/tests/unit/golint_output_test_files/simple.out b/tools/report-converter/tests/unit/analyzers/golint_output_test_files/simple.out similarity index 100% rename from tools/report-converter/tests/unit/golint_output_test_files/simple.out rename to tools/report-converter/tests/unit/analyzers/golint_output_test_files/simple.out diff --git a/tools/report-converter/tests/unit/infer_output_test_files/NullDereference.java.plist b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/NullDereference.java.plist similarity index 98% rename from tools/report-converter/tests/unit/infer_output_test_files/NullDereference.java.plist rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/NullDereference.java.plist index 63c745a718..32ba723b0e 100644 --- a/tools/report-converter/tests/unit/infer_output_test_files/NullDereference.java.plist +++ b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/NullDereference.java.plist @@ -13,8 +13,6 @@ object `i` last assigned on line 7 could be null and is dereferenced at line 8. issue_hash_content_of_line_in_context eb365fd5c186d128877bdf77e61ad5bc - orig_issue_hash_content_of_line_in_context - 5bf747535b56f2690e3aa7c68198134d location col @@ -26,91 +24,6 @@ path - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 6 - - message - start of procedure main(...) - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 7 - - message - - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 2 - - message - start of procedure foo() - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 3 - - message - return from a call to Integer NullDereference.foo() - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 8 - - message - - edges @@ -278,6 +191,91 @@ kind control + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 6 + + message + start of procedure main(...) + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 7 + + message + + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 2 + + message + start of procedure foo() + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 3 + + message + return from a call to Integer NullDereference.foo() + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 8 + + message + + depth 0 diff --git a/tools/report-converter/tests/unit/infer_output_test_files/dead_store.cpp.plist b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/dead_store.cpp.plist similarity index 94% rename from tools/report-converter/tests/unit/infer_output_test_files/dead_store.cpp.plist rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/dead_store.cpp.plist index 5213f522f1..42eafedfd9 100644 --- a/tools/report-converter/tests/unit/infer_output_test_files/dead_store.cpp.plist +++ b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/dead_store.cpp.plist @@ -13,8 +13,6 @@ The value written to &i (type int) is never used. issue_hash_content_of_line_in_context 39f63b113fbd8c772fd3cd06bb2afcfa - orig_issue_hash_content_of_line_in_context - bb934aa5f0317f087aa2d897a1bee337 location col diff --git a/tools/report-converter/tests/unit/infer_output_test_files/files/NullDereference.java b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/files/NullDereference.java similarity index 100% rename from tools/report-converter/tests/unit/infer_output_test_files/files/NullDereference.java rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/files/NullDereference.java diff --git a/tools/report-converter/tests/unit/infer_output_test_files/files/dead_store.cpp b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/files/dead_store.cpp similarity index 100% rename from tools/report-converter/tests/unit/infer_output_test_files/files/dead_store.cpp rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/files/dead_store.cpp diff --git a/tools/report-converter/tests/unit/infer_output_test_files/infer-out-dead_store/report.json b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/infer-out-dead_store/report.json similarity index 100% rename from tools/report-converter/tests/unit/infer_output_test_files/infer-out-dead_store/report.json rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/infer-out-dead_store/report.json diff --git a/tools/report-converter/tests/unit/infer_output_test_files/infer-out-null_dereference/report.json b/tools/report-converter/tests/unit/analyzers/infer_output_test_files/infer-out-null_dereference/report.json similarity index 100% rename from tools/report-converter/tests/unit/infer_output_test_files/infer-out-null_dereference/report.json rename to tools/report-converter/tests/unit/analyzers/infer_output_test_files/infer-out-null_dereference/report.json diff --git a/tools/report-converter/tests/unit/kerneldoc_output_test_files/files/sample.c b/tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/files/sample.c similarity index 100% rename from tools/report-converter/tests/unit/kerneldoc_output_test_files/files/sample.c rename to tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/files/sample.c diff --git a/tools/report-converter/tests/unit/kerneldoc_output_test_files/sample.expected.plist b/tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/sample.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/kerneldoc_output_test_files/sample.expected.plist rename to tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/sample.expected.plist diff --git a/tools/report-converter/tests/unit/kerneldoc_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/kerneldoc_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/kerneldoc_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/lsan_output_test_files/files/lsan.c b/tools/report-converter/tests/unit/analyzers/lsan_output_test_files/files/lsan.c similarity index 100% rename from tools/report-converter/tests/unit/lsan_output_test_files/files/lsan.c rename to tools/report-converter/tests/unit/analyzers/lsan_output_test_files/files/lsan.c diff --git a/tools/report-converter/tests/unit/lsan_output_test_files/lsan.out b/tools/report-converter/tests/unit/analyzers/lsan_output_test_files/lsan.out similarity index 100% rename from tools/report-converter/tests/unit/lsan_output_test_files/lsan.out rename to tools/report-converter/tests/unit/analyzers/lsan_output_test_files/lsan.out diff --git a/tools/report-converter/tests/unit/lsan_output_test_files/lsan.plist b/tools/report-converter/tests/unit/analyzers/lsan_output_test_files/lsan.plist similarity index 96% rename from tools/report-converter/tests/unit/lsan_output_test_files/lsan.plist rename to tools/report-converter/tests/unit/analyzers/lsan_output_test_files/lsan.plist index 90ff679049..3ec3a6ba09 100644 --- a/tools/report-converter/tests/unit/lsan_output_test_files/lsan.plist +++ b/tools/report-converter/tests/unit/analyzers/lsan_output_test_files/lsan.plist @@ -25,10 +25,6 @@ notes - depth - 0 - kind - note location col diff --git a/tools/report-converter/tests/unit/mdl_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/mdl_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/mdl_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/mdl_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/mdl_output_test_files/files/readme.md b/tools/report-converter/tests/unit/analyzers/mdl_output_test_files/files/readme.md similarity index 100% rename from tools/report-converter/tests/unit/mdl_output_test_files/files/readme.md rename to tools/report-converter/tests/unit/analyzers/mdl_output_test_files/files/readme.md diff --git a/tools/report-converter/tests/unit/mdl_output_test_files/readme.expected.plist b/tools/report-converter/tests/unit/analyzers/mdl_output_test_files/readme.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/mdl_output_test_files/readme.expected.plist rename to tools/report-converter/tests/unit/analyzers/mdl_output_test_files/readme.expected.plist diff --git a/tools/report-converter/tests/unit/mdl_output_test_files/readme.out b/tools/report-converter/tests/unit/analyzers/mdl_output_test_files/readme.out similarity index 100% rename from tools/report-converter/tests/unit/mdl_output_test_files/readme.out rename to tools/report-converter/tests/unit/analyzers/mdl_output_test_files/readme.out diff --git a/tools/report-converter/tests/unit/msan_output_test_files/files/msan.cpp b/tools/report-converter/tests/unit/analyzers/msan_output_test_files/files/msan.cpp similarity index 100% rename from tools/report-converter/tests/unit/msan_output_test_files/files/msan.cpp rename to tools/report-converter/tests/unit/analyzers/msan_output_test_files/files/msan.cpp diff --git a/tools/report-converter/tests/unit/msan_output_test_files/msan.out b/tools/report-converter/tests/unit/analyzers/msan_output_test_files/msan.out similarity index 100% rename from tools/report-converter/tests/unit/msan_output_test_files/msan.out rename to tools/report-converter/tests/unit/analyzers/msan_output_test_files/msan.out diff --git a/tools/report-converter/tests/unit/msan_output_test_files/msan.plist b/tools/report-converter/tests/unit/analyzers/msan_output_test_files/msan.plist similarity index 96% rename from tools/report-converter/tests/unit/msan_output_test_files/msan.plist rename to tools/report-converter/tests/unit/analyzers/msan_output_test_files/msan.plist index 2f1d334dc2..a3ed2f451c 100644 --- a/tools/report-converter/tests/unit/msan_output_test_files/msan.plist +++ b/tools/report-converter/tests/unit/analyzers/msan_output_test_files/msan.plist @@ -25,10 +25,6 @@ notes - depth - 0 - kind - note location col diff --git a/tools/report-converter/tests/unit/pyflakes_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/pyflakes_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/pyflakes_output_test_files/files/simple.py b/tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/files/simple.py similarity index 100% rename from tools/report-converter/tests/unit/pyflakes_output_test_files/files/simple.py rename to tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/files/simple.py diff --git a/tools/report-converter/tests/unit/pyflakes_output_test_files/simple.expected.plist b/tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/simple.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/pyflakes_output_test_files/simple.expected.plist rename to tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/simple.expected.plist diff --git a/tools/report-converter/tests/unit/pyflakes_output_test_files/simple.out b/tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/simple.out similarity index 100% rename from tools/report-converter/tests/unit/pyflakes_output_test_files/simple.out rename to tools/report-converter/tests/unit/analyzers/pyflakes_output_test_files/simple.out diff --git a/tools/report-converter/tests/unit/pylint_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/pylint_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/pylint_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/pylint_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/pylint_output_test_files/files/simple.py b/tools/report-converter/tests/unit/analyzers/pylint_output_test_files/files/simple.py similarity index 100% rename from tools/report-converter/tests/unit/pylint_output_test_files/files/simple.py rename to tools/report-converter/tests/unit/analyzers/pylint_output_test_files/files/simple.py diff --git a/tools/report-converter/tests/unit/pylint_output_test_files/simple.expected.plist b/tools/report-converter/tests/unit/analyzers/pylint_output_test_files/simple.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/pylint_output_test_files/simple.expected.plist rename to tools/report-converter/tests/unit/analyzers/pylint_output_test_files/simple.expected.plist diff --git a/tools/report-converter/tests/unit/pylint_output_test_files/simple.json b/tools/report-converter/tests/unit/analyzers/pylint_output_test_files/simple.json similarity index 100% rename from tools/report-converter/tests/unit/pylint_output_test_files/simple.json rename to tools/report-converter/tests/unit/analyzers/pylint_output_test_files/simple.json diff --git a/tools/report-converter/tests/unit/smatch_output_test_files/files/sample.c b/tools/report-converter/tests/unit/analyzers/smatch_output_test_files/files/sample.c similarity index 100% rename from tools/report-converter/tests/unit/smatch_output_test_files/files/sample.c rename to tools/report-converter/tests/unit/analyzers/smatch_output_test_files/files/sample.c diff --git a/tools/report-converter/tests/unit/smatch_output_test_files/sample.expected.plist b/tools/report-converter/tests/unit/analyzers/smatch_output_test_files/sample.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/smatch_output_test_files/sample.expected.plist rename to tools/report-converter/tests/unit/analyzers/smatch_output_test_files/sample.expected.plist diff --git a/tools/report-converter/tests/unit/smatch_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/smatch_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/smatch_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/smatch_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/sparse_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/files/sample.c b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/files/sample.c similarity index 100% rename from tools/report-converter/tests/unit/sparse_output_test_files/files/sample.c rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/files/sample.c diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/files/sample.h b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/files/sample.h similarity index 100% rename from tools/report-converter/tests/unit/sparse_output_test_files/files/sample.h rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/files/sample.h diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/sample.c.expected.plist b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.c.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/sparse_output_test_files/sample.c.expected.plist rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.c.expected.plist diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/sample.h.expected.plist b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.h.expected.plist similarity index 69% rename from tools/report-converter/tests/unit/sparse_output_test_files/sample.h.expected.plist rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.h.expected.plist index 2418c7e794..5933d086d2 100644 --- a/tools/report-converter/tests/unit/sparse_output_test_files/sample.h.expected.plist +++ b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.h.expected.plist @@ -58,53 +58,6 @@ message got struct spinlock [noderef] __rcu * - - edges - - - end - - - col - 40 - file - 0 - line - 3 - - - col - 40 - file - 0 - line - 3 - - - start - - - col - 40 - file - 0 - line - 3 - - - col - 40 - file - 0 - line - 3 - - - - - kind - control - depth 0 diff --git a/tools/report-converter/tests/unit/sparse_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/sparse_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/sparse_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/sphinx_output_test_files/files/sample.rst b/tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/files/sample.rst similarity index 100% rename from tools/report-converter/tests/unit/sphinx_output_test_files/files/sample.rst rename to tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/files/sample.rst diff --git a/tools/report-converter/tests/unit/sphinx_output_test_files/sample.expected.plist b/tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/sample.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/sphinx_output_test_files/sample.expected.plist rename to tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/sample.expected.plist diff --git a/tools/report-converter/tests/unit/sphinx_output_test_files/sample.out b/tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/sample.out similarity index 100% rename from tools/report-converter/tests/unit/sphinx_output_test_files/sample.out rename to tools/report-converter/tests/unit/analyzers/sphinx_output_test_files/sample.out diff --git a/tools/report-converter/tests/unit/spotbugs_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/spotbugs_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/spotbugs_output_test_files/assign.plist b/tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/assign.plist similarity index 94% rename from tools/report-converter/tests/unit/spotbugs_output_test_files/assign.plist rename to tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/assign.plist index d18b9b9a51..77f03d6a64 100644 --- a/tools/report-converter/tests/unit/spotbugs_output_test_files/assign.plist +++ b/tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/assign.plist @@ -22,44 +22,8 @@ line 6 - orig_issue_hash_content_of_line_in_context - 31d25c93ca824a832958c47cb869f2dd path - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 1 - - message - In class Assign - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 6 - - message - In method Assign.main(String[]) - edges @@ -119,37 +83,11 @@ file 0 line - 6 + 1 message - Dead store to $L1 in Assign.main(String[]) + In class Assign - - type - spotbugs - - - category - unknown - check_name - DLS_DEAD_LOCAL_STORE - description - Dead store to $L1 in Assign.main(String[]) - issue_hash_content_of_line_in_context - 85a884ed377cf0e258abe7cf9acbbd81 - location - - col - 0 - file - 0 - line - 6 - - orig_issue_hash_content_of_line_in_context - 31d25c93ca824a832958c47cb869f2dd - path - depth 0 @@ -162,10 +100,10 @@ file 0 line - 1 + 6 message - In class Assign + In method Assign.main(String[]) depth @@ -182,8 +120,32 @@ 6 message - In method Assign.main(String[]) + Dead store to $L1 in Assign.main(String[]) + + type + spotbugs + + + category + unknown + check_name + DLS_DEAD_LOCAL_STORE + description + Dead store to $L1 in Assign.main(String[]) + issue_hash_content_of_line_in_context + 85a884ed377cf0e258abe7cf9acbbd81 + location + + col + 0 + file + 0 + line + 6 + + path + edges @@ -231,6 +193,40 @@ kind control + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 1 + + message + In class Assign + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 6 + + message + In method Assign.main(String[]) + depth 0 @@ -270,8 +266,6 @@ line 1 - orig_issue_hash_content_of_line_in_context - b83078cc9ee341727b6080508d3ea647 path @@ -330,44 +324,8 @@ line 6 - orig_issue_hash_content_of_line_in_context - e1cf1dc4a9b1c3d79dd9c35d9c7ce70f path - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 1 - - message - In class Assign - - - depth - 0 - kind - event - location - - col - 0 - file - 0 - line - 6 - - message - In method Assign.main(String[]) - edges @@ -427,37 +385,11 @@ file 0 line - 6 + 1 message - Double assignment of $L1 in Assign.main(String[]) + In class Assign - - type - spotbugs - - - category - unknown - check_name - UC_USELESS_VOID_METHOD - description - Method Assign.main(String[]) seems to be useless - issue_hash_content_of_line_in_context - 59a6fcef60c5ebd9d0fcfbca8983f932 - location - - col - 0 - file - 0 - line - 7 - - orig_issue_hash_content_of_line_in_context - 70d8a57a28b1c7b40c2e2f8d20e2524 - path - depth 0 @@ -470,10 +402,10 @@ file 0 line - 1 + 6 message - In class Assign + In method Assign.main(String[]) depth @@ -490,8 +422,32 @@ 6 message - In method Assign.main(String[]) + Double assignment of $L1 in Assign.main(String[]) + + type + spotbugs + + + category + unknown + check_name + UC_USELESS_VOID_METHOD + description + Method Assign.main(String[]) seems to be useless + issue_hash_content_of_line_in_context + 59a6fcef60c5ebd9d0fcfbca8983f932 + location + + col + 0 + file + 0 + line + 7 + + path + edges @@ -535,10 +491,84 @@ + + end + + + col + 0 + file + 0 + line + 7 + + + col + 0 + file + 0 + line + 7 + + + start + + + col + 0 + file + 0 + line + 6 + + + col + 0 + file + 0 + line + 6 + + + kind control + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 1 + + message + In class Assign + + + depth + 0 + kind + event + location + + col + 0 + file + 0 + line + 6 + + message + In method Assign.main(String[]) + depth 0 @@ -578,8 +608,6 @@ line 1 - orig_issue_hash_content_of_line_in_context - ee93958ba842675fc1c80739679e2031 path diff --git a/tools/report-converter/tests/unit/spotbugs_output_test_files/assign.xml b/tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/assign.xml similarity index 100% rename from tools/report-converter/tests/unit/spotbugs_output_test_files/assign.xml rename to tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/assign.xml diff --git a/tools/report-converter/tests/unit/spotbugs_output_test_files/files/Assign.java b/tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/files/Assign.java similarity index 100% rename from tools/report-converter/tests/unit/spotbugs_output_test_files/files/Assign.java rename to tools/report-converter/tests/unit/analyzers/spotbugs_output_test_files/files/Assign.java diff --git a/tools/report-converter/tests/unit/analyzers/test_asan_parser.py b/tools/report-converter/tests/unit/analyzers/test_asan_parser.py new file mode 100644 index 0000000000..d1450f3531 --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_asan_parser.py @@ -0,0 +1,70 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the OutputParser and PListConverter, which +used in sequence transform AddressSanitizer output to a plist file. +""" + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.sanitizers.address import \ + analyzer_result +from codechecker_report_converter.report.parser import plist + +OLD_PWD = None + + +def setup_module(): + """Setup the test tidy reprs for the test classes in the module.""" + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), 'asan_output_test_files')) + + +def teardown_module(): + """Restore environment after tests have ran.""" + global OLD_PWD + os.chdir(OLD_PWD) + + +class ASANAnalyzerResultTestCase(unittest.TestCase): + """ Test the output of the ASANAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def test_asan(self): + """ Test for the asan.plist file. """ + self.analyzer_result.transform( + 'asan.out', self.cc_result_dir, plist.EXTENSION) + + with open('asan.plist', mode='rb') as pfile: + exp = plistlib.load(pfile) + + plist_file = os.path.join(self.cc_result_dir, 'asan.cpp_asan.plist') + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'][0] = 'files/asan.cpp' + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/analyzers/test_clang_tidy_parser.py b/tools/report-converter/tests/unit/analyzers/test_clang_tidy_parser.py new file mode 100644 index 0000000000..87c1bb6cde --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_clang_tidy_parser.py @@ -0,0 +1,104 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the OutputParser and PListConverter, which +used in sequence transform a Clang Tidy output file to a plist file. +""" + + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.clang_tidy import analyzer_result +from codechecker_report_converter.report.parser import plist + + +OLD_PWD = None + + +def setup_module(): + """Setup the test tidy reprs for the test classes in the module.""" + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), 'tidy_output_test_files')) + + +def teardown_module(): + """Restore environment after tests have ran.""" + global OLD_PWD + os.chdir(OLD_PWD) + + +class ClangTidyAnalyzerResultTestCase(unittest.TestCase): + """ Test the output of the ClangTidyAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def __check_analyzer_result(self, analyzer_result, analyzer_result_plist, + source_files, expected_plist): + """ Check the result of the analyzer transformation. """ + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) + + plist_file = os.path.join(self.cc_result_dir, analyzer_result_plist) + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'] = source_files + + with open(expected_plist, mode='rb') as pfile: + exp = plistlib.load(pfile) + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + self.assertEqual(res, exp) + + def test_empty1(self): + """ Test for empty Messages. """ + ret = self.analyzer_result.transform( + 'empty1.out', self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_empty2(self): + """ Test for empty Messages with multiple line. """ + ret = self.analyzer_result.transform( + 'empty2.out', self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_tidy1(self): + """ Test for the tidy1.plist file. """ + self.__check_analyzer_result('tidy1.out', 'test.cpp_clang-tidy.plist', + ['files/test.cpp'], 'tidy1.plist') + + def test_tidy2(self): + """ Test for the tidy2.plist file. """ + self.__check_analyzer_result('tidy2.out', 'test2.cpp_clang-tidy.plist', + ['files/test2.cpp'], 'tidy2.plist') + + def test_tidy3(self): + """ Test for the tidy3.plist file. """ + self.__check_analyzer_result('tidy3.out', 'test3.cpp_clang-tidy.plist', + ['files/test3.cpp'], + 'tidy3_cpp.plist') + + self.__check_analyzer_result('tidy3.out', 'test3.hh_clang-tidy.plist', + ['files/test3.cpp', 'files/test3.hh'], + 'tidy3_hh.plist') diff --git a/tools/report-converter/tests/unit/test_coccinelle_parser.py b/tools/report-converter/tests/unit/analyzers/test_coccinelle_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_coccinelle_parser.py rename to tools/report-converter/tests/unit/analyzers/test_coccinelle_parser.py index 280735deae..f873985d64 100644 --- a/tools/report-converter/tests/unit/test_coccinelle_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_coccinelle_parser.py @@ -19,8 +19,8 @@ import unittest -from codechecker_report_converter.coccinelle.analyzer_result import \ - CoccinelleAnalyzerResult +from codechecker_report_converter.analyzers.coccinelle import analyzer_result +from codechecker_report_converter.report.parser import plist class CoccinelleAnalyzerResultTestCase(unittest.TestCase): @@ -28,7 +28,7 @@ class CoccinelleAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = CoccinelleAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'coccinelle_output_test_files') @@ -42,22 +42,23 @@ def test_no_cocci_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'sample.c') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'sample.c_coccinelle.plist') diff --git a/tools/report-converter/tests/unit/test_cppcheck_parser.py b/tools/report-converter/tests/unit/analyzers/test_cppcheck_parser.py similarity index 64% rename from tools/report-converter/tests/unit/test_cppcheck_parser.py rename to tools/report-converter/tests/unit/analyzers/test_cppcheck_parser.py index 859e0981f5..aec356654e 100644 --- a/tools/report-converter/tests/unit/test_cppcheck_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_cppcheck_parser.py @@ -18,8 +18,25 @@ import tempfile import unittest -from codechecker_report_converter.cppcheck.analyzer_result import \ - CppcheckAnalyzerResult +from codechecker_report_converter.analyzers.cppcheck import analyzer_result +from codechecker_report_converter.report.parser import plist + + +OLD_PWD = None + + +def setup_module(): + """ Setup the test. """ + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), + 'cppcheck_output_test_files')) + + +def teardown_module(): + """ Restore environment after tests have ran. """ + global OLD_PWD + os.chdir(OLD_PWD) class CppcheckAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +44,7 @@ class CppcheckAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = CppcheckAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'cppcheck_output_test_files') @@ -41,28 +58,33 @@ def test_no_plist_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'divide_zero.cpp') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_no_plist_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'non_existing') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single plist file. """ - analyzer_result = os.path.join(self.test_files, 'divide_zero.plist') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + analyzer_result = os.path.join( + self.test_files, 'out', 'divide_zero.plist') + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, - 'divide_zero_cppcheck.plist') + 'divide_zero.cpp_cppcheck.plist') with open(plist_file, mode='rb') as pfile: res = plistlib.load(pfile) + # Use relative path for this test. + res['files'][0] = 'files/divide_zero.cpp' + self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" @@ -75,14 +97,18 @@ def test_transform_single_file(self): def test_transform_directory(self): """ Test transforming a directory of plist files. """ - analyzer_result = os.path.join(self.test_files) - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + analyzer_result = os.path.join(self.test_files, 'out') + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, - 'divide_zero_cppcheck.plist') + 'divide_zero.cpp_cppcheck.plist') with open(plist_file, mode='rb') as pfile: res = plistlib.load(pfile) + # Use relative path for this test. + res['files'][0] = 'files/divide_zero.cpp' + self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" diff --git a/tools/report-converter/tests/unit/test_cpplint_parser.py b/tools/report-converter/tests/unit/analyzers/test_cpplint_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_cpplint_parser.py rename to tools/report-converter/tests/unit/analyzers/test_cpplint_parser.py index 12cdb17c46..653b645ea9 100644 --- a/tools/report-converter/tests/unit/test_cpplint_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_cpplint_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.cpplint.analyzer_result import \ - CpplintAnalyzerResult +from codechecker_report_converter.analyzers.cpplint import analyzer_result +from codechecker_report_converter.report.parser import plist class CpplintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class CpplintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = CpplintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'cpplint_output_test_files') @@ -41,22 +41,23 @@ def test_no_cpplint_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'sample.cpp') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'sample.cpp_cpplint.plist') diff --git a/tools/report-converter/tests/unit/test_eslint_parser.py b/tools/report-converter/tests/unit/analyzers/test_eslint_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_eslint_parser.py rename to tools/report-converter/tests/unit/analyzers/test_eslint_parser.py index b41732b78d..e7ee8ad917 100644 --- a/tools/report-converter/tests/unit/test_eslint_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_eslint_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.eslint.analyzer_result import \ - ESLintAnalyzerResult +from codechecker_report_converter.analyzers.eslint import analyzer_result +from codechecker_report_converter.report.parser import plist class ESLintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class ESLintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = ESLintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'eslint_output_test_files') @@ -41,22 +41,23 @@ def test_no_json_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'index.js') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'reports.json') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'index.js_eslint.plist') diff --git a/tools/report-converter/tests/unit/test_golint_parser.py b/tools/report-converter/tests/unit/analyzers/test_golint_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_golint_parser.py rename to tools/report-converter/tests/unit/analyzers/test_golint_parser.py index 480a5ef658..d2ce163531 100644 --- a/tools/report-converter/tests/unit/test_golint_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_golint_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.golint.analyzer_result import \ - GolintAnalyzerResult +from codechecker_report_converter.analyzers.golint import analyzer_result +from codechecker_report_converter.report.parser import plist class GolintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class GolintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = GolintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'golint_output_test_files') @@ -41,22 +41,23 @@ def test_no_go_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'simple.go') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'simple.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'simple.go_golint.plist') diff --git a/tools/report-converter/tests/unit/test_infer_parser.py b/tools/report-converter/tests/unit/analyzers/test_infer_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_infer_parser.py rename to tools/report-converter/tests/unit/analyzers/test_infer_parser.py index 899c1417bb..d0d7c70d4a 100644 --- a/tools/report-converter/tests/unit/test_infer_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_infer_parser.py @@ -17,8 +17,8 @@ import tempfile import unittest -from codechecker_report_converter.infer.analyzer_result import \ - InferAnalyzerResult +from codechecker_report_converter.analyzers.infer import analyzer_result +from codechecker_report_converter.report.parser import plist OLD_PWD = None @@ -43,7 +43,7 @@ class InferAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = InferAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'infer_output_test_files') @@ -56,8 +56,8 @@ def test_parsing_cpp_res_dir(self): """ Test transforming infer output directory (C++). """ analyzer_result = os.path.join(self.test_files, 'infer-out-dead_store') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertTrue(ret) plist_file = os.path.join(self.cc_result_dir, @@ -68,6 +68,9 @@ def test_parsing_cpp_res_dir(self): self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" + # Use relative path for this test. + res['files'][0] = os.path.join('files', 'dead_store.cpp') + plist_file = os.path.join(self.test_files, 'dead_store.cpp.plist') with open(plist_file, mode='rb') as pfile: @@ -81,8 +84,8 @@ def test_transform_single_cpp_res_file(self): 'infer-out-dead_store', 'report.json') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertTrue(ret) plist_file = os.path.join(self.cc_result_dir, @@ -93,6 +96,9 @@ def test_transform_single_cpp_res_file(self): self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" + # Use relative path for this test. + res['files'][0] = os.path.join('files', 'dead_store.cpp') + plist_file = os.path.join(self.test_files, 'dead_store.cpp.plist') with open(plist_file, mode='rb') as pfile: @@ -105,8 +111,8 @@ def test_parsing_java_res_dir(self): analyzer_result = os.path.join(self.test_files, 'infer-out-null_dereference') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertTrue(ret) plist_file = os.path.join(self.cc_result_dir, @@ -118,6 +124,9 @@ def test_parsing_java_res_dir(self): self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" + # Use relative path for this test. + res['files'][0] = os.path.join('files', 'NullDereference.java') + plist_file = os.path.join(self.test_files, 'NullDereference.java.plist') with open(plist_file, mode='rb') as pfile: @@ -131,8 +140,8 @@ def test_transform_single_java_res_file(self): 'infer-out-null_dereference', 'report.json') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertTrue(ret) plist_file = os.path.join(self.cc_result_dir, @@ -144,6 +153,9 @@ def test_transform_single_java_res_file(self): self.assertTrue(res['metadata']['generated_by']['version']) res['metadata']['generated_by']['version'] = "x.y.z" + # Use relative path for this test. + res['files'][0] = os.path.join('files', 'NullDereference.java') + plist_file = os.path.join(self.test_files, 'NullDereference.java.plist') with open(plist_file, mode='rb') as pfile: diff --git a/tools/report-converter/tests/unit/test_kerneldoc_parser.py b/tools/report-converter/tests/unit/analyzers/test_kerneldoc_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_kerneldoc_parser.py rename to tools/report-converter/tests/unit/analyzers/test_kerneldoc_parser.py index 4c67a36096..09014ca411 100644 --- a/tools/report-converter/tests/unit/test_kerneldoc_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_kerneldoc_parser.py @@ -19,8 +19,8 @@ import unittest -from codechecker_report_converter.kerneldoc.analyzer_result import \ - KernelDocAnalyzerResult +from codechecker_report_converter.analyzers.kerneldoc import analyzer_result +from codechecker_report_converter.report.parser import plist class KernelDocAnalyzerResultTestCase(unittest.TestCase): @@ -28,7 +28,7 @@ class KernelDocAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = KernelDocAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'kerneldoc_output_test_files') @@ -42,22 +42,23 @@ def test_no_kerneldoc_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'sample.c') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'sample.c_kernel-doc.plist') diff --git a/tools/report-converter/tests/unit/analyzers/test_lsan_parser.py b/tools/report-converter/tests/unit/analyzers/test_lsan_parser.py new file mode 100644 index 0000000000..fe5e8e214a --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_lsan_parser.py @@ -0,0 +1,73 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the OutputParser and PListConverter, which +used in sequence transform LeakSanitizer output to a plist file. +""" + + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.sanitizers.leak import \ + analyzer_result +from codechecker_report_converter.report.parser import plist + + +OLD_PWD = None + + +def setup_module(): + """Setup the test tidy reprs for the test classes in the module.""" + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), + 'lsan_output_test_files')) + + +def teardown_module(): + """Restore environment after tests have ran.""" + global OLD_PWD + os.chdir(OLD_PWD) + + +class LSANPListConverterTestCase(unittest.TestCase): + """ Test the output of the LSANAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def test_san(self): + """ Test for the lsan.plist file. """ + self.analyzer_result.transform( + 'lsan.out', self.cc_result_dir, plist.EXTENSION) + + with open('lsan.plist', mode='rb') as pfile: + exp = plistlib.load(pfile) + + plist_file = os.path.join(self.cc_result_dir, 'lsan.c_lsan.plist') + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'][0] = 'files/lsan.c' + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/test_mdl_parser.py b/tools/report-converter/tests/unit/analyzers/test_mdl_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_mdl_parser.py rename to tools/report-converter/tests/unit/analyzers/test_mdl_parser.py index ae4ce4188f..ab44d984e1 100644 --- a/tools/report-converter/tests/unit/test_mdl_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_mdl_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.markdownlint.analyzer_result import \ - MarkdownlintAnalyzerResult +from codechecker_report_converter.analyzers.markdownlint import analyzer_result +from codechecker_report_converter.report.parser import plist class MarkdownlintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class MarkdownlintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = MarkdownlintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'mdl_output_test_files') @@ -41,22 +41,23 @@ def test_no_md_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'readme.md') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'readme.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'readme.md_mdl.plist') diff --git a/tools/report-converter/tests/unit/test_msan_parser.py b/tools/report-converter/tests/unit/analyzers/test_msan_parser.py similarity index 52% rename from tools/report-converter/tests/unit/test_msan_parser.py rename to tools/report-converter/tests/unit/analyzers/test_msan_parser.py index 3dcd2325a5..a5fe0bbecd 100644 --- a/tools/report-converter/tests/unit/test_msan_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_msan_parser.py @@ -18,11 +18,9 @@ import tempfile import unittest -from codechecker_report_converter.output_parser import Event, Message -from codechecker_report_converter.sanitizers.memory.output_parser import \ - MSANParser -from codechecker_report_converter.sanitizers.memory.analyzer_result import \ - MSANAnalyzerResult +from codechecker_report_converter.analyzers.sanitizers.memory import \ + analyzer_result +from codechecker_report_converter.report.parser import plist OLD_PWD = None @@ -46,7 +44,7 @@ class MSANPListConverterTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = MSANAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() def tearDown(self): @@ -55,7 +53,8 @@ def tearDown(self): def test_msan(self): """ Test for the msan.plist file. """ - self.analyzer_result.transform('msan.out', self.cc_result_dir) + self.analyzer_result.transform( + 'msan.out', self.cc_result_dir, plist.EXTENSION) with open('msan.plist', mode='rb') as pfile: exp = plistlib.load(pfile) @@ -71,40 +70,3 @@ def test_msan(self): res['metadata']['generated_by']['version'] = "x.y.z" self.assertEqual(res, exp) - - -class MSANOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts an Memory Sanitizer - output file to zero or more Message object. - """ - - def setUp(self): - """ Setup the OutputParser. """ - self.parser = MSANParser() - self.msan_repr = [ - Message( - os.path.abspath('files/msan.cpp'), - 7, 7, - "use-of-uninitialized-value", - "MemorySanitizer", - [Event( - os.path.abspath('files/msan.cpp'), - 7, 7, - " #0 0x4940da in main files/msan.cpp:7:7" - )], - [Event( - os.path.abspath('files/msan.cpp'), - 7, 7, - " #0 0x4940da in main files/msan.cpp:7:7\n" - " #1 0x7fed9df58b96 in __libc_start_main (??)\n" - " #2 0x41b2d9 in _start (??)\n" - )]), - ] - - def test_msan(self): - """ Test the generated Messages of msan.out. """ - messages = self.parser.parse_messages_from_file('msan.out') - self.assertEqual(len(messages), len(self.msan_repr)) - for message in messages: - self.assertIn(message, self.msan_repr) diff --git a/tools/report-converter/tests/unit/test_pyflakes_parser.py b/tools/report-converter/tests/unit/analyzers/test_pyflakes_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_pyflakes_parser.py rename to tools/report-converter/tests/unit/analyzers/test_pyflakes_parser.py index d4f34ca0b7..aa84d8d511 100644 --- a/tools/report-converter/tests/unit/test_pyflakes_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_pyflakes_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.pyflakes.analyzer_result import \ - PyflakesAnalyzerResult +from codechecker_report_converter.analyzers.pyflakes import analyzer_result +from codechecker_report_converter.report.parser import plist class PyflakesAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class PyflakesAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = PyflakesAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'pyflakes_output_test_files') @@ -41,22 +41,23 @@ def test_no_go_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'simple.py') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'simple.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'simple.py_pyflakes.plist') diff --git a/tools/report-converter/tests/unit/test_pylint_parser.py b/tools/report-converter/tests/unit/analyzers/test_pylint_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_pylint_parser.py rename to tools/report-converter/tests/unit/analyzers/test_pylint_parser.py index 7cc99aba5d..8ed9c34fe3 100644 --- a/tools/report-converter/tests/unit/test_pylint_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_pylint_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.pylint.analyzer_result import \ - PylintAnalyzerResult +from codechecker_report_converter.analyzers.pylint import analyzer_result +from codechecker_report_converter.report.parser import plist class PylintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class PylintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = PylintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'pylint_output_test_files') @@ -41,22 +41,23 @@ def test_no_json_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'simple.py') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single json file. """ analyzer_result = os.path.join(self.test_files, 'simple.json') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'simple.py_pylint.plist') diff --git a/tools/report-converter/tests/unit/test_smatch_parser.py b/tools/report-converter/tests/unit/analyzers/test_smatch_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_smatch_parser.py rename to tools/report-converter/tests/unit/analyzers/test_smatch_parser.py index f2936b41bc..36393ed532 100644 --- a/tools/report-converter/tests/unit/test_smatch_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_smatch_parser.py @@ -11,16 +11,14 @@ used in sequence transform Smatch output to a plist file. """ - import os import plistlib import shutil import tempfile import unittest - -from codechecker_report_converter.smatch.analyzer_result import \ - SmatchAnalyzerResult +from codechecker_report_converter.analyzers.smatch import analyzer_result +from codechecker_report_converter.report.parser import plist class SmatchAnalyzerResultTestCase(unittest.TestCase): @@ -28,7 +26,7 @@ class SmatchAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = SmatchAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'smatch_output_test_files') @@ -42,22 +40,23 @@ def test_no_smatch_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'sample.c') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'sample.c_smatch.plist') diff --git a/tools/report-converter/tests/unit/analyzers/test_sparse_parser.py b/tools/report-converter/tests/unit/analyzers/test_sparse_parser.py new file mode 100644 index 0000000000..12d120afe0 --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_sparse_parser.py @@ -0,0 +1,80 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the SparseAnalyzerResult, which +used in sequence transform Smatch output to a plist file. +""" + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.sparse import analyzer_result +from codechecker_report_converter.report.parser import plist + + +class SparseAnalyzerResultTestCase(unittest.TestCase): + """ Test the output of the SparseAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + self.test_files = os.path.join(os.path.dirname(__file__), + 'sparse_output_test_files') + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def test_no_smatch_output_file(self): + """ Test transforming single C file. """ + analyzer_result = os.path.join(self.test_files, 'files', + 'sample.c') + + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_transform_dir(self): + """ Test transforming a directory. """ + analyzer_result = os.path.join(self.test_files) + + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_transform_single_file(self): + """ Test transforming single output file. """ + analyzer_result = os.path.join(self.test_files, 'sample.out') + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) + + for test_file in ['sample.c', 'sample.h']: + # Test sample.c plist file + plist_file = os.path.join( + self.cc_result_dir, f'{test_file}_sparse.plist') + + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'][0] = os.path.join('files', test_file) + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + plist_file = os.path.join( + self.test_files, f'{test_file}.expected.plist') + with open(plist_file, mode='rb') as pfile: + exp = plistlib.load(pfile) + + self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/test_sphinx_parser.py b/tools/report-converter/tests/unit/analyzers/test_sphinx_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_sphinx_parser.py rename to tools/report-converter/tests/unit/analyzers/test_sphinx_parser.py index 1c9ebce2bd..1e25a221ff 100644 --- a/tools/report-converter/tests/unit/test_sphinx_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_sphinx_parser.py @@ -11,7 +11,6 @@ which used in sequence transform Sphinx output to a plist file. """ - import os import plistlib import shutil @@ -19,8 +18,8 @@ import unittest -from codechecker_report_converter.sphinx.analyzer_result import \ - SphinxAnalyzerResult +from codechecker_report_converter.analyzers.sphinx import analyzer_result +from codechecker_report_converter.report.parser import plist class SphinxResultTestCase(unittest.TestCase): @@ -28,7 +27,7 @@ class SphinxResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = SphinxAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'sphinx_output_test_files') @@ -42,22 +41,23 @@ def test_no_sphinx_output_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'sample.rst') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming a directory. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single output file. """ analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'sample.rst_sphinx.plist') diff --git a/tools/report-converter/tests/unit/test_spotbugs_parser.py b/tools/report-converter/tests/unit/analyzers/test_spotbugs_parser.py similarity index 82% rename from tools/report-converter/tests/unit/test_spotbugs_parser.py rename to tools/report-converter/tests/unit/analyzers/test_spotbugs_parser.py index e30d999675..3d787ddd60 100644 --- a/tools/report-converter/tests/unit/test_spotbugs_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_spotbugs_parser.py @@ -17,8 +17,8 @@ import tempfile import unittest -from codechecker_report_converter.spotbugs.analyzer_result import \ - SpotBugsAnalyzerResult +from codechecker_report_converter.analyzers.spotbugs import analyzer_result +from codechecker_report_converter.report.parser import plist OLD_PWD = None @@ -43,7 +43,7 @@ class SpotBugsAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = SpotBugsAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'spotbugs_output_test_files') @@ -57,22 +57,23 @@ def test_no_xml_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'Assign.java') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_parsing_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'files') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files, 'assign.xml') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'Assign.java_spotbugs.plist') diff --git a/tools/report-converter/tests/unit/analyzers/test_tsan_parser.py b/tools/report-converter/tests/unit/analyzers/test_tsan_parser.py new file mode 100644 index 0000000000..06a8b81807 --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_tsan_parser.py @@ -0,0 +1,72 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the OutputParser and PListConverter, which +used in sequence transform ThreadSanitizer output to a plist file. +""" + + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.sanitizers.thread import \ + analyzer_result +from codechecker_report_converter.report.parser import plist + +OLD_PWD = None + + +def setup_module(): + """Setup the test tidy reprs for the test classes in the module.""" + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), + 'tsan_output_test_files')) + + +def teardown_module(): + """Restore environment after tests have ran.""" + global OLD_PWD + os.chdir(OLD_PWD) + + +class TSANAnalyzerResultTestCase(unittest.TestCase): + """ Test the output of the TSANAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def test_tsan(self): + """ Test for the tsan.plist file. """ + self.analyzer_result.transform( + 'tsan.out', self.cc_result_dir, plist.EXTENSION) + + with open('tsan.plist', mode='rb') as pfile: + exp = plistlib.load(pfile) + + plist_file = os.path.join(self.cc_result_dir, 'tsan.cpp_tsan.plist') + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'][0] = 'files/tsan.cpp' + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/test_tslint_parser.py b/tools/report-converter/tests/unit/analyzers/test_tslint_parser.py similarity index 81% rename from tools/report-converter/tests/unit/test_tslint_parser.py rename to tools/report-converter/tests/unit/analyzers/test_tslint_parser.py index 78db47737b..c5ee7405c9 100644 --- a/tools/report-converter/tests/unit/test_tslint_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_tslint_parser.py @@ -18,8 +18,8 @@ import tempfile import unittest -from codechecker_report_converter.tslint.analyzer_result import \ - TSLintAnalyzerResult +from codechecker_report_converter.analyzers.tslint import analyzer_result +from codechecker_report_converter.report.parser import plist class TSLintAnalyzerResultTestCase(unittest.TestCase): @@ -27,7 +27,7 @@ class TSLintAnalyzerResultTestCase(unittest.TestCase): def setUp(self): """ Setup the test. """ - self.analyzer_result = TSLintAnalyzerResult() + self.analyzer_result = analyzer_result.AnalyzerResult() self.cc_result_dir = tempfile.mkdtemp() self.test_files = os.path.join(os.path.dirname(__file__), 'tslint_output_test_files') @@ -41,22 +41,23 @@ def test_no_json_file(self): analyzer_result = os.path.join(self.test_files, 'files', 'index.ts') - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_dir(self): """ Test transforming single plist file. """ analyzer_result = os.path.join(self.test_files) - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) + ret = self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) self.assertFalse(ret) def test_transform_single_file(self): """ Test transforming single json file. """ analyzer_result = os.path.join(self.test_files, 'reports.json') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) plist_file = os.path.join(self.cc_result_dir, 'index.ts_tslint.plist') diff --git a/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py b/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py new file mode 100644 index 0000000000..1c6d19786f --- /dev/null +++ b/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py @@ -0,0 +1,95 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +This module tests the correctness of the OutputParser and PListConverter, which +used in sequence transform UndefinedBehaviorSanitizer output to a plist file. +""" + + +import os +import plistlib +import shutil +import tempfile +import unittest + +from codechecker_report_converter.analyzers.sanitizers.ub import \ + analyzer_result +from codechecker_report_converter.report.parser import plist + +OLD_PWD = None + + +def setup_module(): + """ Setup the test tidy reprs for the test classes in the module. """ + global OLD_PWD + OLD_PWD = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), + 'ubsan_output_test_files')) + + +def teardown_module(): + """ Restore environment after tests have ran. """ + global OLD_PWD + os.chdir(OLD_PWD) + + +class UBSANPListConverterTestCase(unittest.TestCase): + """ Test the output of the UBSANAnalyzerResult. """ + + def setUp(self): + """ Setup the test. """ + self.analyzer_result = analyzer_result.AnalyzerResult() + self.cc_result_dir = tempfile.mkdtemp() + + def tearDown(self): + """ Clean temporary directory. """ + shutil.rmtree(self.cc_result_dir) + + def __check_analyzer_result(self, analyzer_result, analyzer_result_plist, + source_files, expected_plist): + """ Check the result of the analyzer transformation. """ + self.analyzer_result.transform( + analyzer_result, self.cc_result_dir, plist.EXTENSION) + + plist_file = os.path.join(self.cc_result_dir, analyzer_result_plist) + with open(plist_file, mode='rb') as pfile: + res = plistlib.load(pfile) + + # Use relative path for this test. + res['files'] = source_files + + self.assertTrue(res['metadata']['generated_by']['version']) + res['metadata']['generated_by']['version'] = "x.y.z" + + with open(expected_plist, mode='rb') as pfile: + exp = plistlib.load(pfile) + + self.assertEqual(res, exp) + + def test_empty1(self): + """ Test for empty Messages. """ + ret = self.analyzer_result.transform( + 'empty1.out', self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_empty2(self): + """ Test for empty Messages with multiple line. """ + ret = self.analyzer_result.transform( + 'empty2.out', self.cc_result_dir, plist.EXTENSION) + self.assertFalse(ret) + + def test_ubsan1(self): + """ Test for the ubsan1.plist file. """ + self.__check_analyzer_result('ubsan1.out', 'ubsan1.cpp_ubsan.plist', + ['files/ubsan1.cpp'], 'ubsan1.plist') + + def test_ubsan2(self): + """ Test for the ubsan2.plist file. """ + self.__check_analyzer_result('ubsan2.out', 'ubsan2.cpp_ubsan.plist', + ['files/ubsan2.cpp'], 'ubsan2.plist') diff --git a/analyzer/tests/unit/tidy_output_test_files/abs.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/abs.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/abs.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/abs.out diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/empty.plist b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty.plist similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/empty.plist rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty.plist diff --git a/analyzer/tests/unit/tidy_output_test_files/empty1.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty1.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/empty1.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty1.out diff --git a/analyzer/tests/unit/tidy_output_test_files/empty2.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty2.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/empty2.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/empty2.out diff --git a/analyzer/tests/unit/tidy_output_test_files/files/Makefile b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/Makefile similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/Makefile rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/Makefile diff --git a/analyzer/tests/unit/tidy_output_test_files/files/test.cpp b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test.cpp similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/test.cpp rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test.cpp diff --git a/analyzer/tests/unit/tidy_output_test_files/files/test2.cpp b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test2.cpp similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/test2.cpp rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test2.cpp diff --git a/analyzer/tests/unit/tidy_output_test_files/files/test3.cpp b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test3.cpp similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/test3.cpp rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test3.cpp diff --git a/analyzer/tests/unit/tidy_output_test_files/files/test3.hh b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test3.hh similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/test3.hh rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test3.hh diff --git a/analyzer/tests/unit/tidy_output_test_files/files/test4.cpp b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test4.cpp similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/files/test4.cpp rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/files/test4.cpp diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy1.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy1.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1.out diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy1.plist b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1.plist similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/tidy1.plist rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1.plist diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy1_v6.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1_v6.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy1_v6.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy1_v6.out diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy2.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy2.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2.out diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2.plist b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2.plist similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/tidy2.plist rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2.plist index 015bca22c9..ccf2477dfe 100644 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2.plist +++ b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2.plist @@ -65,57 +65,6 @@ path - - depth - 0 - kind - event - location - - col - 7 - file - 0 - line - 9 - - message - Left side of '||' is false - - - depth - 0 - kind - event - location - - col - 3 - file - 0 - line - 9 - - message - Taking false branch - - - depth - 0 - kind - event - location - - col - 12 - file - 0 - line - 13 - - message - Division by zero - edges @@ -203,6 +152,57 @@ kind control + + depth + 0 + kind + event + location + + col + 7 + file + 0 + line + 9 + + message + Left side of '||' is false + + + depth + 0 + kind + event + location + + col + 3 + file + 0 + line + 9 + + message + Taking false branch + + + depth + 0 + kind + event + location + + col + 12 + file + 0 + line + 13 + + message + Division by zero + depth 0 diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy2_v6.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2_v6.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy2_v6.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy2_v6.out diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy3.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy3.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3.out diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy3_cpp.plist b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3_cpp.plist similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/tidy3_cpp.plist rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3_cpp.plist diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy3_hh.plist b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3_hh.plist similarity index 93% rename from tools/report-converter/tests/unit/tidy_output_test_files/tidy3_hh.plist rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3_hh.plist index 23dc08da5a..4f95bdadd1 100644 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy3_hh.plist +++ b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy3_hh.plist @@ -18,114 +18,12 @@ col 6 file - 0 + 1 line 6 path - - depth - 0 - kind - event - location - - col - 3 - file - 1 - line - 4 - - message - 'x' initialized to a null pointer value - - - depth - 0 - kind - event - location - - col - 11 - file - 1 - line - 6 - - message - Assuming 'argc' is > 3 - - - depth - 0 - kind - event - location - - col - 3 - file - 1 - line - 6 - - message - Taking true branch - - - depth - 0 - kind - event - location - - col - 9 - file - 1 - line - 7 - - message - Passing null pointer value via 1st parameter 'x' - - - depth - 0 - kind - event - location - - col - 5 - file - 1 - line - 7 - - message - Calling 'bar' - - - depth - 0 - kind - event - location - - col - 6 - file - 0 - line - 6 - - message - Dereference of null pointer (loaded from variable 'x') - edges @@ -136,7 +34,7 @@ col 11 file - 1 + 0 line 6 @@ -144,7 +42,7 @@ col 11 file - 1 + 0 line 6 @@ -155,7 +53,7 @@ col 3 file - 1 + 0 line 4 @@ -163,7 +61,7 @@ col 3 file - 1 + 0 line 4 @@ -176,7 +74,7 @@ col 3 file - 1 + 0 line 6 @@ -184,7 +82,7 @@ col 3 file - 1 + 0 line 6 @@ -195,7 +93,7 @@ col 11 file - 1 + 0 line 6 @@ -203,7 +101,7 @@ col 11 file - 1 + 0 line 6 @@ -216,7 +114,7 @@ col 9 file - 1 + 0 line 7 @@ -224,7 +122,7 @@ col 9 file - 1 + 0 line 7 @@ -235,7 +133,7 @@ col 3 file - 1 + 0 line 6 @@ -243,7 +141,7 @@ col 3 file - 1 + 0 line 6 @@ -256,7 +154,7 @@ col 5 file - 1 + 0 line 7 @@ -264,7 +162,7 @@ col 5 file - 1 + 0 line 7 @@ -275,7 +173,7 @@ col 9 file - 1 + 0 line 7 @@ -283,7 +181,7 @@ col 9 file - 1 + 0 line 7 @@ -296,7 +194,7 @@ col 6 file - 0 + 1 line 6 @@ -304,7 +202,7 @@ col 6 file - 0 + 1 line 6 @@ -315,7 +213,7 @@ col 5 file - 1 + 0 line 7 @@ -323,7 +221,7 @@ col 5 file - 1 + 0 line 7 @@ -341,13 +239,115 @@ location col + 3 + file + 0 + line + 4 + + message + 'x' initialized to a null pointer value + + + depth + 0 + kind + event + location + + col + 11 + file + 0 + line 6 + + message + Assuming 'argc' is > 3 + + + depth + 0 + kind + event + location + + col + 3 file 0 line 6 message + Taking true branch + + + depth + 0 + kind + event + location + + col + 9 + file + 0 + line + 7 + + message + Passing null pointer value via 1st parameter 'x' + + + depth + 0 + kind + event + location + + col + 5 + file + 0 + line + 7 + + message + Calling 'bar' + + + depth + 0 + kind + event + location + + col + 6 + file + 1 + line + 6 + + message + Dereference of null pointer (loaded from variable 'x') + + + depth + 0 + kind + event + location + + col + 6 + file + 1 + line + 6 + + message Dereference of null pointer (loaded from variable 'x') @@ -357,8 +357,8 @@ files - files/test3.hh files/test3.cpp + files/test3.hh metadata diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy4.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy4.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy4.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy4.out diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy5.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy5.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy5.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy5.out diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy5_v6.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy5_v6.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy5_v6.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy5_v6.out diff --git a/analyzer/tests/unit/tidy_output_test_files/tidy6.out b/tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy6.out similarity index 100% rename from analyzer/tests/unit/tidy_output_test_files/tidy6.out rename to tools/report-converter/tests/unit/analyzers/tidy_output_test_files/tidy6.out diff --git a/tools/report-converter/tests/unit/tsan_output_test_files/files/tsan.cpp b/tools/report-converter/tests/unit/analyzers/tsan_output_test_files/files/tsan.cpp similarity index 100% rename from tools/report-converter/tests/unit/tsan_output_test_files/files/tsan.cpp rename to tools/report-converter/tests/unit/analyzers/tsan_output_test_files/files/tsan.cpp diff --git a/tools/report-converter/tests/unit/tsan_output_test_files/tsan.out b/tools/report-converter/tests/unit/analyzers/tsan_output_test_files/tsan.out similarity index 100% rename from tools/report-converter/tests/unit/tsan_output_test_files/tsan.out rename to tools/report-converter/tests/unit/analyzers/tsan_output_test_files/tsan.out diff --git a/tools/report-converter/tests/unit/tsan_output_test_files/tsan.plist b/tools/report-converter/tests/unit/analyzers/tsan_output_test_files/tsan.plist similarity index 82% rename from tools/report-converter/tests/unit/tsan_output_test_files/tsan.plist rename to tools/report-converter/tests/unit/analyzers/tsan_output_test_files/tsan.plist index 79efc05f97..1a42fcc44b 100644 --- a/tools/report-converter/tests/unit/tsan_output_test_files/tsan.plist +++ b/tools/report-converter/tests/unit/analyzers/tsan_output_test_files/tsan.plist @@ -25,10 +25,6 @@ notes - depth - 0 - kind - note location col @@ -50,40 +46,6 @@ path - - depth - 0 - kind - event - location - - col - 2 - file - 0 - line - 24 - - message - #1 main files/tsan.cpp:24:2 (a.out+0x4b529e) - - - depth - 0 - kind - event - location - - col - 14 - file - 0 - line - 18 - - message - #0 insert_in_table(unsigned long, int) files/tsan.cpp:18:14 (a.out+0x4b525b) - edges @@ -127,10 +89,84 @@ + + end + + + col + 2 + file + 0 + line + 24 + + + col + 2 + file + 0 + line + 24 + + + start + + + col + 14 + file + 0 + line + 18 + + + col + 14 + file + 0 + line + 18 + + + kind control + + depth + 0 + kind + event + location + + col + 2 + file + 0 + line + 24 + + message + #1 main files/tsan.cpp:24:2 (a.out+0x4b529e) + + + depth + 0 + kind + event + location + + col + 14 + file + 0 + line + 18 + + message + #0 insert_in_table(unsigned long, int) files/tsan.cpp:18:14 (a.out+0x4b525b) + depth 0 diff --git a/tools/report-converter/tests/unit/tslint_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/tslint_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/tslint_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/tslint_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/tslint_output_test_files/files/index.ts b/tools/report-converter/tests/unit/analyzers/tslint_output_test_files/files/index.ts similarity index 100% rename from tools/report-converter/tests/unit/tslint_output_test_files/files/index.ts rename to tools/report-converter/tests/unit/analyzers/tslint_output_test_files/files/index.ts diff --git a/tools/report-converter/tests/unit/tslint_output_test_files/files/tslint.json b/tools/report-converter/tests/unit/analyzers/tslint_output_test_files/files/tslint.json similarity index 100% rename from tools/report-converter/tests/unit/tslint_output_test_files/files/tslint.json rename to tools/report-converter/tests/unit/analyzers/tslint_output_test_files/files/tslint.json diff --git a/tools/report-converter/tests/unit/tslint_output_test_files/reports.expected.plist b/tools/report-converter/tests/unit/analyzers/tslint_output_test_files/reports.expected.plist similarity index 100% rename from tools/report-converter/tests/unit/tslint_output_test_files/reports.expected.plist rename to tools/report-converter/tests/unit/analyzers/tslint_output_test_files/reports.expected.plist diff --git a/tools/report-converter/tests/unit/tslint_output_test_files/reports.json b/tools/report-converter/tests/unit/analyzers/tslint_output_test_files/reports.json similarity index 100% rename from tools/report-converter/tests/unit/tslint_output_test_files/reports.json rename to tools/report-converter/tests/unit/analyzers/tslint_output_test_files/reports.json diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/Makefile b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/Makefile similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/Makefile rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/Makefile diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/abs.out b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/abs.out similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/abs.out rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/abs.out diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/empty.plist b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty.plist similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/empty.plist rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty.plist diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/empty1.out b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty1.out similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/empty1.out rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty1.out diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/empty2.out b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty2.out similarity index 100% rename from tools/report-converter/tests/unit/tidy_output_test_files/empty2.out rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/empty2.out diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.cpp b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.cpp similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.cpp rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.cpp diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.out b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.out similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.out rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.out diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.plist b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.plist similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan1.plist rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan1.plist diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.cpp b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.cpp similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.cpp rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.cpp diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.out b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.out similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.out rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.out diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.plist b/tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.plist similarity index 100% rename from tools/report-converter/tests/unit/ubsan_output_test_files/ubsan2.plist rename to tools/report-converter/tests/unit/analyzers/ubsan_output_test_files/ubsan2.plist diff --git a/tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.expected.plist b/tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.expected.plist deleted file mode 100644 index 4522c0ff0e..0000000000 --- a/tools/report-converter/tests/unit/cppcheck_output_test_files/divide_zero.expected.plist +++ /dev/null @@ -1,79 +0,0 @@ - - - - - clang_version -cppcheck version 1.87 - files - - files/divide_zero.cpp - - diagnostics - - - path - - - kindevent - location - - line17 - col13 - file0 - - ranges - - - - line17 - col13 - file0 - - - line17 - col13 - file0 - - - - depth0 - extended_message - Division by zero - message - Division by zero - - - descriptionDivision by zero. - categoryerror - typeDivision by zero. - check_namezerodiv - - issue_hash_content_of_line_in_contexta3cd5e774bb367cad6f9031b7eb4d0a3 - issue_context_kind - issue_context - issue_hash_function_offset - location - - line17 - col13 - file0 - - - - metadata - - analyzer - - name - cppcheck - - generated_by - - name - report-converter - version - x.y.z - - - - diff --git a/tools/report-converter/codechecker_report_converter/clang_tidy/plist_converter.py b/tools/report-converter/tests/unit/output/__init__.py similarity index 51% rename from tools/report-converter/codechecker_report_converter/clang_tidy/plist_converter.py rename to tools/report-converter/tests/unit/output/__init__.py index 6bad618bd2..4259749345 100644 --- a/tools/report-converter/codechecker_report_converter/clang_tidy/plist_converter.py +++ b/tools/report-converter/tests/unit/output/__init__.py @@ -5,15 +5,3 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- - - -from ..plist_converter import PlistConverter - - -class ClangTidyPlistConverter(PlistConverter): - """ Warning messages to plist converter. """ - - def _get_checker_category(self, checker): - """ Returns the check's category.""" - parts = checker.split('-') - return parts[0] if parts else 'unknown' diff --git a/tools/plist_to_html/tests/unit/plist_to_html/__init__.py b/tools/report-converter/tests/unit/output/gerrit/__init__.py similarity index 100% rename from tools/plist_to_html/tests/unit/plist_to_html/__init__.py rename to tools/report-converter/tests/unit/output/gerrit/__init__.py diff --git a/codechecker_common/tests/unit/test_files/lib.cpp b/tools/report-converter/tests/unit/output/gerrit/test_files/lib.cpp similarity index 100% rename from codechecker_common/tests/unit/test_files/lib.cpp rename to tools/report-converter/tests/unit/output/gerrit/test_files/lib.cpp diff --git a/codechecker_common/tests/unit/test_files/main.cpp b/tools/report-converter/tests/unit/output/gerrit/test_files/main.cpp similarity index 100% rename from codechecker_common/tests/unit/test_files/main.cpp rename to tools/report-converter/tests/unit/output/gerrit/test_files/main.cpp diff --git a/tools/report-converter/tests/unit/output/gerrit/test_gerrit_converter.py b/tools/report-converter/tests/unit/output/gerrit/test_gerrit_converter.py new file mode 100644 index 0000000000..a7a81961e9 --- /dev/null +++ b/tools/report-converter/tests/unit/output/gerrit/test_gerrit_converter.py @@ -0,0 +1,186 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" Tests for gerrit output conversion. """ + +import json +import os +import tempfile +import unittest + +from codechecker_report_converter.report import File, Report +from codechecker_report_converter.report.output import gerrit + + +class TestReportToGerrit(unittest.TestCase): + @classmethod + def setup_class(cls): + cls._test_files_dir = os.path.join( + os.path.dirname(__file__), 'test_files') + + cls._src_files = [ + File(os.path.join(cls._test_files_dir, 'main.cpp')), + File(os.path.join(cls._test_files_dir, 'lib.cpp'))] + + def tearDown(self): + if "CC_REPO_DIR" in os.environ: + os.environ.pop("CC_REPO_DIR") + + if "CC_REPORT_URL" in os.environ: + os.environ.pop("CC_REPORT_URL") + + def test_report_to_gerrit_conversion(self): + """ Conversion without directory path just the source filename. """ + report = Report( + self._src_files[0], 3, 3, 'some description', 'my_checker', + report_hash='dummy_hash', + severity='LOW') + + os.environ["CC_REPO_DIR"] = self._test_files_dir + res = gerrit.convert([report]) + + expected = { + "tag": "jenkins", + "message": "CodeChecker found 1 issue(s) in the code.", + "labels": {"Code-Review": -1, "Verified": -1}, + "comments": { + "main.cpp": [ + { + "range": { + "start_line": 3, + "start_character": 3, + "end_line": 3, + "end_character": 3, + }, + "message": "[LOW] main.cpp:3:3: " + "some description [my_checker]\n sizeof(42);\n", + } + ] + }, + } + self.assertEquals(res, expected) + + def test_report_to_gerrit_conversion_abs_filepath(self): + """ Conversion report with absolute filepath. """ + report = Report( + self._src_files[0], 3, 3, 'some description', 'my_checker', + report_hash='dummy_hash', + severity='LOW') + + res = gerrit.convert([report]) + + expected = { + "tag": "jenkins", + "message": "CodeChecker found 1 issue(s) in the code.", + "labels": {"Code-Review": -1, "Verified": -1}, + "comments": { + self._src_files[0].path: [ + { + "range": { + "start_line": 3, + "start_character": 3, + "end_line": 3, + "end_character": 3, + }, + "message": "[LOW] {0}:3:3: some description " + "[my_checker]\n sizeof(42);\n".format( + self._src_files[0].path), + } + ] + }, + } + self.assertEquals(res, expected) + + def test_report_to_gerrit_conversion_report_url(self): + """ Conversion report with absolute filepath and CC_REPORT_URL env. """ + report = Report( + self._src_files[0], 3, 3, 'some description', 'my_checker', + report_hash='dummy_hash', + severity='LOW') + + os.environ["CC_REPO_DIR"] = self._test_files_dir + os.environ["CC_REPORT_URL"] = "localhost:8080/index.html" + res = gerrit.convert([report]) + + expected = { + "tag": "jenkins", + "message": "CodeChecker found 1 issue(s) in the code. " + "See: 'localhost:8080/index.html'", + "labels": {"Code-Review": -1, "Verified": -1}, + "comments": { + "main.cpp": [ + { + "range": { + "start_line": 3, + "start_character": 3, + "end_line": 3, + "end_character": 3, + }, + "message": "[LOW] main.cpp:3:3: " + "some description [my_checker]\n sizeof(42);\n", + } + ] + }, + } + self.assertEquals(res, expected) + + def test_report_to_gerrit_conversion_filter_changed_files(self): + """Conversion report with changed files filter. + + Reports from the lib.cpp file should be not in the converted list. + """ + report = Report( + self._src_files[0], 3, 3, 'some description', 'my_checker', + report_hash='dummy_hash', + severity='LOW') + + lib_report = Report( + self._src_files[1], 3, 3, 'some description', 'my_checker', + report_hash='dummy_hash', + severity='LOW') + + dummy_changed_files_content = { + "/COMMIT_MSG": { + "status": "A", + "lines_inserted": 1, + "size_delta": 1, + "size": 100, + }, + "main.cpp": { + "lines_inserted": 1, + "lines_deleted": 1, + "size_delta": 1, + "size": 100, + } + } + fd, changed_files_file = tempfile.mkstemp() + os.write(fd, json.dumps(dummy_changed_files_content).encode("utf-8")) + os.close(fd) + + os.environ["CC_CHANGED_FILES"] = changed_files_file + + res = gerrit.convert([report, report, lib_report]) + os.remove(os.environ["CC_CHANGED_FILES"]) + + # Remove environment variable not to influence the other tests. + os.environ.pop("CC_CHANGED_FILES") + + review_comments = res["comments"] + + # Reports were found in two source files. + self.assertEquals(len(review_comments), 1) + + # Two reports in the main.cpp file. + self.assertEquals(len(review_comments[report.file.path]), 2) + + self.assertIn( + "CodeChecker found 3 issue(s) in the code.", res["message"]) + self.assertIn( + "following reports are introduced in files which are not changed", + res["message"]) + self.assertIn(lib_report.file.path, res["message"]) diff --git a/tools/report-converter/tests/unit/output/html/__init__.py b/tools/report-converter/tests/unit/output/html/__init__.py new file mode 100644 index 0000000000..cea05ef6e6 --- /dev/null +++ b/tools/report-converter/tests/unit/output/html/__init__.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- +""" Setup for the test package analyze. """ + + +import os +import shutil + +from libtest import env + + +# Test workspace should be initialized in this module. +TEST_WORKSPACE = None + + +def setup_package(): + """ Setup the environment for the tests. """ + + global TEST_WORKSPACE + TEST_WORKSPACE = env.get_workspace('plist_to_html') + + os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE + + +def teardown_package(): + """ Delete the workspace associated with this test. """ + + # TODO: If environment variable is set keep the workspace + # and print out the path. + global TEST_WORKSPACE + + print("Removing: " + TEST_WORKSPACE) + shutil.rmtree(TEST_WORKSPACE) diff --git a/tools/plist_to_html/tests/unit/plist_to_html/plist_to_html_test.py b/tools/report-converter/tests/unit/output/html/plist_to_html_test.py similarity index 73% rename from tools/plist_to_html/tests/unit/plist_to_html/plist_to_html_test.py rename to tools/report-converter/tests/unit/output/html/plist_to_html_test.py index bed8ccae41..5519dd578c 100644 --- a/tools/plist_to_html/tests/unit/plist_to_html/plist_to_html_test.py +++ b/tools/report-converter/tests/unit/output/html/plist_to_html_test.py @@ -8,7 +8,6 @@ import os -import plistlib import shutil import unittest @@ -16,7 +15,9 @@ from libtest import env -from plist_to_html import PlistToHtml +from codechecker_report_converter.report.output.html import \ + html as report_to_html +from codechecker_report_converter.report import report_file def get_project_path(test_project) -> str: @@ -24,11 +25,6 @@ def get_project_path(test_project) -> str: return os.path.join(env.test_proj_root(), test_project) -def load_plist_data(plist_filepath) -> dict: - with open(plist_filepath, 'rb') as plist_file: - return plistlib.load(plist_file) - - class PlistToHtmlTest(unittest.TestCase): test_workspace: ClassVar[str] layout_dir: ClassVar[str] @@ -63,19 +59,19 @@ def __test_html_builder(self, proj: str): Test building html file from the given proj's plist file. """ proj_dir = os.path.join(self.test_workspace, 'test_files', proj) - plist_file = os.path.join(proj_dir, proj + '.plist') + plist_file = os.path.join(proj_dir, f"{proj}.plist") - plist = load_plist_data(plist_file) - report_data = PlistToHtml.get_report_data_from_plist(plist) + reports = report_file.get_reports(plist_file) output_dir = os.path.join(proj_dir, 'html') if not os.path.exists(output_dir): os.mkdir(output_dir) - output_path = os.path.join(output_dir, proj + '.html') + output_path = os.path.join(output_dir, f"{proj}.plist.html") + + html_builder = report_to_html.HtmlBuilder(self.layout_dir) + report_to_html.convert(plist_file, reports, output_dir, html_builder) - html_builder = PlistToHtml.HtmlBuilder(self.layout_dir) - html_builder.create(output_path, report_data) self.assertTrue(os.path.exists(output_path)) html_builder.create_index_html(output_dir) @@ -89,15 +85,17 @@ def test_get_report_data_notes(self): proj_notes = os.path.join(self.test_workspace, 'test_files', 'notes') plist_file = os.path.join(proj_notes, 'notes.plist') - plist = load_plist_data(plist_file) - res = PlistToHtml.get_report_data_from_plist(plist) + reports = report_file.get_reports(plist_file) - self.assertEqual(len(res['files']), 1) + html_builder = report_to_html.HtmlBuilder(self.layout_dir) + html_builder._add_html_reports(reports) - reports = res['reports'] - self.assertEqual(len(reports), 1) + self.assertEqual(len(html_builder.files), 1) - report = reports[0] + html_reports = html_builder.html_reports + self.assertEqual(len(html_reports), 1) + + report = html_reports[0] self.assertEqual(len(report['notes']), 1) self.assertEqual(len(report['macros']), 0) self.assertGreaterEqual(len(report['events']), 1) @@ -108,15 +106,20 @@ def test_get_report_data_macros(self): proj_macros = os.path.join(self.test_workspace, 'test_files', 'macros') plist_file = os.path.join(proj_macros, 'macros.plist') - plist = load_plist_data(plist_file) - res = PlistToHtml.get_report_data_from_plist(plist) + reports = report_file.get_reports(plist_file) + + html_builder = report_to_html.HtmlBuilder(self.layout_dir) + html_builder._add_html_reports(reports) - self.assertEqual(len(res['files']), 1) + self.assertEqual(len(html_builder.files), 1) - reports = res['reports'] + html_reports = html_builder.html_reports + self.assertEqual(len(html_reports), 1) + + report = html_reports[0] self.assertEqual(len(reports), 1) - report = reports[0] + report = html_reports[0] self.assertEqual(len(report['notes']), 0) self.assertEqual(len(report['macros']), 1) self.assertGreaterEqual(len(report['events']), 1) @@ -127,21 +130,23 @@ def test_get_report_data_simple(self): proj_simple = os.path.join(self.test_workspace, 'test_files', 'simple') plist_file = os.path.join(proj_simple, 'simple.plist') - plist = load_plist_data(plist_file) - res = PlistToHtml.get_report_data_from_plist(plist) + reports = report_file.get_reports(plist_file) + + html_builder = report_to_html.HtmlBuilder(self.layout_dir) + html_builder._add_html_reports(reports) - self.assertEqual(len(res['files']), 1) + self.assertEqual(len(html_builder.files), 1) - reports = res['reports'] - self.assertEqual(len(reports), 2) + html_reports = html_builder.html_reports + self.assertEqual(len(html_reports), 2) - dead_stores = [r for r in reports if + dead_stores = [r for r in html_reports if r['checkerName'] == 'deadcode.DeadStores'][0] self.assertEqual(len(dead_stores['notes']), 0) self.assertEqual(len(dead_stores['macros']), 0) self.assertGreaterEqual(len(dead_stores['events']), 1) - divide_zero = [r for r in reports if + divide_zero = [r for r in html_reports if r['checkerName'] == 'core.DivideZero'][0] self.assertEqual(len(divide_zero['notes']), 0) self.assertEqual(len(divide_zero['macros']), 0) diff --git a/tools/report-converter/tests/unit/parser/__init__.py b/tools/report-converter/tests/unit/parser/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/tools/report-converter/tests/unit/parser/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/tools/report-converter/tests/unit/parser/plist/__init__.py b/tools/report-converter/tests/unit/parser/plist/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/tools/report-converter/tests/unit/parser/plist/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/web/server/tests/unit/plist_test_files/clang-3.7-noerror.plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.7-noerror.plist similarity index 100% rename from web/server/tests/unit/plist_test_files/clang-3.7-noerror.plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.7-noerror.plist diff --git a/web/server/tests/unit/plist_test_files/clang-3.7.plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.7.plist similarity index 100% rename from web/server/tests/unit/plist_test_files/clang-3.7.plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.7.plist diff --git a/web/server/tests/unit/plist_test_files/clang-3.8-trunk.plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.8-trunk.plist similarity index 100% rename from web/server/tests/unit/plist_test_files/clang-3.8-trunk.plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-3.8-trunk.plist diff --git a/web/server/tests/unit/plist_test_files/clang-4.0.plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-4.0.plist similarity index 100% rename from web/server/tests/unit/plist_test_files/clang-4.0.plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-4.0.plist diff --git a/web/server/tests/unit/plist_test_files/clang-5.0-trunk.plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-5.0-trunk.plist similarity index 100% rename from web/server/tests/unit/plist_test_files/clang-5.0-trunk.plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/clang-5.0-trunk.plist diff --git a/web/server/tests/unit/plist_test_files/empty_file b/tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file similarity index 100% rename from web/server/tests/unit/plist_test_files/empty_file rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file diff --git a/web/server/tests/unit/plist_test_files/gen_plist/gen_noerror_plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_noerror_plist similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/gen_noerror_plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_noerror_plist diff --git a/web/server/tests/unit/plist_test_files/gen_plist/gen_plist b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_plist similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/gen_plist rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_plist diff --git a/web/server/tests/unit/plist_test_files/gen_plist/gen_plist.md b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_plist.md similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/gen_plist.md rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/gen_plist.md diff --git a/web/server/tests/unit/plist_test_files/gen_plist/noerror.cpp b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/noerror.cpp similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/noerror.cpp rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/noerror.cpp diff --git a/web/server/tests/unit/plist_test_files/gen_plist/test.cpp b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/test.cpp similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/test.cpp rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/test.cpp diff --git a/web/server/tests/unit/plist_test_files/gen_plist/test.h b/tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/test.h similarity index 100% rename from web/server/tests/unit/plist_test_files/gen_plist/test.h rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/gen_plist/test.h diff --git a/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py b/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py new file mode 100644 index 0000000000..f41e868294 --- /dev/null +++ b/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py @@ -0,0 +1,277 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +""" +Test the parsing of the plist generated by multiple clang versions. + +With the newer clang releases more information is available in the plist files. + +* Before Clang v3.7: + - Not supported + +* Clang v3.7: + - Checker name is available in the plist + - Report hash is not avilable (generated based on the report path elements + see report handling and plist parsing modules for more details + +* After Clang v3.8: + - Checker name is available + - Report hash is available + +""" + + +import os +import unittest + +from copy import deepcopy + +from codechecker_report_converter.report import BugPathEvent, \ + BugPathPosition, File, Range, Report, report_file +from codechecker_report_converter.report.reports import get_mentioned_files + + +gen_plist_dir_path = os.path.join( + os.path.dirname(__file__), 'plist_test_files', 'gen_plist') + +SRC_FILES = [ + File(os.path.join(gen_plist_dir_path, 'test.cpp')), + File(os.path.join(gen_plist_dir_path, 'test.h'))] + + +# Base skeletons for reports where the checker name is already available. +div_zero_skel = Report( + SRC_FILES[1], 7, 14, 'Division by zero', 'core.DivideZero', + report_hash='79e31a6ba028f0b7d9779faf4a6cb9cf', + category='Logic error', + type='Division by zero', + bug_path_events=[ + BugPathEvent( + "'base' initialized to 0", + SRC_FILES[0], 20, 5, + Range(20, 5, 20, 12)), + BugPathEvent( + "Passing the value 0 via 1st parameter 'base'", + SRC_FILES[0], 21, 15, + Range(21, 15, 21, 18)), + BugPathEvent( + "Calling 'test_func'", + SRC_FILES[0], 21, 5, + Range(21, 5, 21, 19)), + BugPathEvent( + "Entered call from 'main'", + SRC_FILES[0], 6, 1, + Range(6, 1, 6, 1)), + BugPathEvent( + "Passing the value 0 via 1st parameter 'num'", + SRC_FILES[0], 8, 22, + Range(8, 22, 8, 25)), + BugPathEvent( + "Calling 'generate_id'", + SRC_FILES[0], 8, 10, + Range(8, 10, 8, 26)), + BugPathEvent( + "Entered call from 'test_func'", + SRC_FILES[1], 6, 1, + Range(6, 1, 6, 1)), + BugPathEvent( + "Division by zero", + SRC_FILES[1], 7, 14, + Range(7, 12, 7, 17)) + ], + bug_path_positions=[ + BugPathPosition(SRC_FILES[0], Range(19, 5, 19, 7)), + BugPathPosition(SRC_FILES[0], Range(20, 5, 20, 7)), + BugPathPosition(SRC_FILES[0], Range(21, 5, 21, 13)), + BugPathPosition(SRC_FILES[0], Range(6, 1, 6, 4)), + BugPathPosition(SRC_FILES[0], Range(7, 5, 7, 7)), + BugPathPosition(SRC_FILES[0], Range(8, 5, 8, 6)), + BugPathPosition(SRC_FILES[0], Range(8, 22, 8, 25)), + BugPathPosition(SRC_FILES[0], Range(8, 10, 8, 20)), + BugPathPosition(SRC_FILES[1], Range(6, 1, 6, 3)), + BugPathPosition(SRC_FILES[1], Range(7, 14, 7, 14)) + ], + notes=[], + macro_expansions=[]) + + +stack_addr_skel_msg = \ + "Address of stack memory associated with local variable " \ + "'str' is still referred to by the global variable 'p' " \ + "upon returning to the caller. " \ + "This will be a dangling reference" + +stack_addr_skel = Report( + SRC_FILES[0], 16, 1, + stack_addr_skel_msg, + 'core.StackAddressEscape', + report_hash='f7b5072d428e890f2d309217f3ead16f', + category='Logic error', + type='Stack address stored into global variable', + bug_path_events=[ + BugPathEvent( + stack_addr_skel_msg, SRC_FILES[0], 16, 1, Range(14, 3, 14, 29)) + ], + bug_path_positions=[ + BugPathPosition(SRC_FILES[0], Range(14, 3, 14, 6)), + BugPathPosition(SRC_FILES[0], Range(15, 3, 15, 3)), + BugPathPosition(SRC_FILES[0], Range(16, 1, 16, 1)) + ], + notes=[], + macro_expansions=[]) + + +class PlistParserTestCaseNose(unittest.TestCase): + """Test the parsing of the plist generated by multiple clang versions.""" + + @classmethod + def setup_class(cls): + """Initialize test source file.""" + # Bugs found by these checkers in the test source files. + cls.__found_checker_names = [ + 'core.DivideZero', + 'core.StackAddressEscape', + 'deadcode.DeadStores'] + + # Already generated plist files for the tests. + cls.__this_dir = os.path.dirname(__file__) + cls.__plist_test_files = os.path.join( + cls.__this_dir, 'plist_test_files') + + def test_empty_file(self): + """Plist file is empty.""" + empty_plist = os.path.join(self.__plist_test_files, 'empty_file') + reports = report_file.get_reports(empty_plist) + self.assertEqual(reports, []) + + def test_no_bug_file(self): + """There was no bug in the checked file.""" + no_bug_plist = os.path.join( + self.__plist_test_files, 'clang-3.7-noerror.plist') + reports = report_file.get_reports(no_bug_plist) + self.assertEqual(reports, []) + + def test_clang37_plist(self): + """ + Check plist generated by clang 3.7 checker name should be in the plist + file generating a report hash is still needed. + """ + clang37_plist = os.path.join( + self.__plist_test_files, 'clang-3.7.plist') + reports = report_file.get_reports(clang37_plist) + self.assertEqual(len(reports), 3) + + files = get_mentioned_files(reports) + self.assertEqual(files, set(SRC_FILES)) + + for report in reports: + # Checker name should be available for all the reports. + self.assertNotEqual(report.checker_name, 'NOT FOUND') + + if report.checker_name == 'core.DivideZero': + skel = deepcopy(div_zero_skel) + skel.report_hash = '51bd152830c2599e98c89cfc78890d0b' + + self.assertEqual(report, skel) + + if report.checker_name == 'core.StackAddressEscape': + # core.StackAddressEscape hash is changed because the checker + # name is available and it is included in the hash. + skel = deepcopy(stack_addr_skel) + skel.report_hash = '3439d5e09aeb5b69a835a6f0a307dfb6' + + self.assertEqual(report, skel) + + def test_clang38_trunk_plist(self): + """ + Check plist generated by clang 3.8 trunk checker name and report hash + should be in the plist file. + """ + clang38_plist = os.path.join( + self.__plist_test_files, 'clang-3.8-trunk.plist') + reports = report_file.get_reports(clang38_plist) + self.assertEqual(len(reports), 3) + + files = get_mentioned_files(reports) + self.assertEqual(files, set(SRC_FILES)) + + for report in reports: + self.assertIn(report.checker_name, self.__found_checker_names) + + if report.checker_name == 'core.DivideZero': + # Test data is still valid for this version. + self.assertEqual(report, div_zero_skel) + + if report.checker_name == 'core.StackAddressEscape': + self.assertEqual(report, stack_addr_skel) + + def test_clang40_plist(self): + """ + Check plist generated by clang 4.0 checker name and report hash + should be in the plist file. + """ + clang40_plist = os.path.join( + self.__plist_test_files, 'clang-4.0.plist') + reports = report_file.get_reports(clang40_plist) + self.assertEqual(len(reports), 3) + + files = get_mentioned_files(reports) + self.assertEqual(files, set(SRC_FILES)) + + for report in reports: + # Checker name should be in the plist file. + self.assertNotEqual(report.checker_name, 'NOT FOUND') + self.assertIn(report.checker_name, self.__found_checker_names) + + if report.checker_name == 'core.DivideZero': + # Test data is still valid for this version. + self.assertEqual(report, div_zero_skel) + + if report.checker_name == 'core.StackAddressEscape': + skel = deepcopy(stack_addr_skel) + skel.report_hash = 'a6d3464f8aab9eb31a8ea7e167e84322' + + self.assertEqual(report, skel) + + def test_clang50_trunk_plist(self): + """ + Check plist generated by clang 5.0 trunk checker name and report hash + should be in the plist file. + """ + clang50_trunk_plist = os.path.join( + self.__plist_test_files, 'clang-5.0-trunk.plist') + reports = report_file.get_reports(clang50_trunk_plist) + self.assertEqual(len(reports), 3) + + files = get_mentioned_files(reports) + self.assertEqual(files, set(SRC_FILES)) + + for report in reports: + # Checker name should be in the plist file. + self.assertNotEqual(report.checker_name, 'NOT FOUND') + self.assertIn(report.checker_name, self.__found_checker_names) + + if report.checker_name == 'core.DivideZero': + # Test data is still valid for this version. + self.assertEqual(report, div_zero_skel) + + self.assertEqual( + report.bug_path_events, div_zero_skel.bug_path_events) + self.assertEqual( + report.bug_path_positions, + div_zero_skel.bug_path_positions) + + if report.checker_name == 'core.StackAddressEscape': + skel = deepcopy(stack_addr_skel) + skel.report_hash = 'a6d3464f8aab9eb31a8ea7e167e84322' + + self.assertEqual(report, skel) + self.assertEqual( + report.bug_path_events, skel.bug_path_events) + self.assertEqual( + report.bug_path_positions, skel.bug_path_positions) diff --git a/web/server/tests/unit/test_report_path_hash.py b/tools/report-converter/tests/unit/parser/plist/test_report_path_hash.py similarity index 68% rename from web/server/tests/unit/test_report_path_hash.py rename to tools/report-converter/tests/unit/parser/plist/test_report_path_hash.py index d518ff5368..6243d0bdd5 100644 --- a/web/server/tests/unit/test_report_path_hash.py +++ b/tools/report-converter/tests/unit/parser/plist/test_report_path_hash.py @@ -11,8 +11,8 @@ import os import unittest -from codechecker_common import plist_parser -from codechecker_report_hash.hash import get_report_path_hash +from codechecker_report_converter.report import report_file +from codechecker_report_converter.report.hash import get_report_path_hash class ReportPathHashHandler(unittest.TestCase): @@ -33,18 +33,9 @@ def test_report_path_hash_generation(self): """ clang50_trunk_plist = os.path.join( self.__plist_test_files, 'clang-5.0-trunk.plist') - files, reports = plist_parser.parse_plist_file(clang50_trunk_plist, - False) + reports = report_file.get_reports(clang50_trunk_plist) self.assertEqual(len(reports), 3) - # Generate dummy file_ids which should come from the database. - file_ids = {} - for i, file_name in enumerate(files, 1): - file_ids[file_name] = i - - msg = "This test is prepared to handle 3 reports." - self.assertEqual(len(reports), 3, msg) - report_hash_to_path_hash = { '79e31a6ba028f0b7d9779faf4a6cb9cf': 'acb1d3dc1459f681bd3c743e6c015b37', @@ -56,5 +47,5 @@ def test_report_path_hash_generation(self): for report in reports: path_hash = get_report_path_hash(report) - bug_hash = report.main['issue_hash_content_of_line_in_context'] - self.assertEqual(path_hash, report_hash_to_path_hash[bug_hash]) + self.assertEqual( + path_hash, report_hash_to_path_hash[report.report_hash]) diff --git a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/__init__.py b/tools/report-converter/tests/unit/report_hash/__init__.py similarity index 96% rename from tools/codechecker_report_hash/tests/unit/codechecker_report_hash/__init__.py rename to tools/report-converter/tests/unit/report_hash/__init__.py index fac9787e1b..03039caa4b 100644 --- a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/__init__.py +++ b/tools/report-converter/tests/unit/report_hash/__init__.py @@ -42,7 +42,7 @@ def setup_package(): """ Setup the environment for the tests. """ global TEST_WORKSPACE - TEST_WORKSPACE = get_workspace('codechecker_report_hash') + TEST_WORKSPACE = get_workspace('report_hash') print(TEST_WORKSPACE) os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE diff --git a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/codechecker_report_hash_test.py b/tools/report-converter/tests/unit/report_hash/codechecker_report_hash_test.py similarity index 63% rename from tools/codechecker_report_hash/tests/unit/codechecker_report_hash/codechecker_report_hash_test.py rename to tools/report-converter/tests/unit/report_hash/codechecker_report_hash_test.py index de8c5283f9..e74412ad60 100644 --- a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/codechecker_report_hash_test.py +++ b/tools/report-converter/tests/unit/report_hash/codechecker_report_hash_test.py @@ -9,14 +9,15 @@ """ Unit tests for the CodeChecker report hash. """ import os -import plistlib import unittest import shutil +import plistlib import tempfile -from collections import namedtuple -from codechecker_report_hash.hash import get_report_hash, \ - get_report_path_hash, HashType, replace_report_hash +from codechecker_report_converter.report.hash import get_report_hash, \ + get_report_path_hash, HashType +from codechecker_report_converter.report.report_file import get_reports, \ + replace_report_hash class CodeCheckerReportHashTest(unittest.TestCase): @@ -47,9 +48,8 @@ def setUpClass(self): def test_gen_report_hash_path_sensitive(self): """ Test path sensitive report hash generation for multiple errors. """ - test_plist = os.path.join(self.test_file_dir, 'cpp', - 'multi_error.plist') - plist = plistlib.readPlist(test_plist) + test_plist = os.path.join( + self.test_file_dir, 'cpp', 'multi_error.plist') expected_report_hash = { 'f48840093ef89e291fb68a95a5181612': @@ -57,20 +57,16 @@ def test_gen_report_hash_path_sensitive(self): 'e4907182b363faf2ec905fc32cc5a4ab': '774799eb31f5fb8514988a7f6736b33e'} - files = plist['files'] - for diag in plist['diagnostics']: - file_path = files[diag['location']['file']] - report_hash = get_report_hash(diag, file_path, - HashType.PATH_SENSITIVE) - actual_report_hash = diag['issue_hash_content_of_line_in_context'] + reports = get_reports(test_plist) + for report in reports: + report_hash = get_report_hash(report, HashType.PATH_SENSITIVE) self.assertEqual(report_hash, - expected_report_hash[actual_report_hash]) + expected_report_hash[report.report_hash]) def test_gen_report_hash_context_free(self): """ Test context free hash generation for multi errors. """ - test_plist = os.path.join(self.test_file_dir, 'cpp', - 'multi_error.plist') - plist = plistlib.readPlist(test_plist) + test_plist = os.path.join( + self.test_file_dir, 'cpp', 'multi_error.plist') expected_report_hash = { 'f48840093ef89e291fb68a95a5181612': @@ -78,20 +74,16 @@ def test_gen_report_hash_context_free(self): 'e4907182b363faf2ec905fc32cc5a4ab': '5a92e13f07c81c6d3197e7d910827e6e'} - files = plist['files'] - for diag in plist['diagnostics']: - file_path = files[diag['location']['file']] - report_hash = get_report_hash(diag, file_path, - HashType.CONTEXT_FREE) - actual_report_hash = diag['issue_hash_content_of_line_in_context'] + reports = get_reports(test_plist) + for report in reports: + report_hash = get_report_hash(report, HashType.CONTEXT_FREE) self.assertEqual(report_hash, - expected_report_hash[actual_report_hash]) + expected_report_hash[report.report_hash]) def test_gen_report_path_hash(self): """ Test path hash generation for multiple errors. """ - test_plist = os.path.join(self.test_file_dir, 'cpp', - 'multi_error.plist') - plist = plistlib.readPlist(test_plist) + test_plist = os.path.join( + self.test_file_dir, 'cpp', 'multi_error.plist') expected_path_hash = { 'f48840093ef89e291fb68a95a5181612': @@ -99,15 +91,11 @@ def test_gen_report_path_hash(self): 'e4907182b363faf2ec905fc32cc5a4ab': '71a4dc24bf88af2b13be83d8d15bd6f0'} - for diag in plist['diagnostics']: - diag['bug_path'] = diag['path'] - diag['files'] = \ - {i: filepath for i, filepath in enumerate(plist['files'])} - path_hash = get_report_path_hash( - namedtuple('Report', diag.keys())(*diag.values())) - actual_report_hash = diag['issue_hash_content_of_line_in_context'] - self.assertEqual(path_hash, - expected_path_hash[actual_report_hash]) + reports = get_reports(test_plist) + for report in reports: + report_hash = get_report_path_hash(report) + self.assertEqual(report_hash, + expected_path_hash[report.report_hash]) def test_replace_report_hash_in_empty_plist(self): """ Test replacing hash in an empty plist file. """ @@ -129,7 +117,6 @@ def test_gen_report_hash_diag_messages(self): """ Test diagnostic message hash generation for multi errors. """ test_plist = os.path.join( self.test_file_dir, 'cpp', 'multi_error.plist') - plist = plistlib.readPlist(test_plist) expected_report_hash = { 'f48840093ef89e291fb68a95a5181612': @@ -137,11 +124,8 @@ def test_gen_report_hash_diag_messages(self): 'e4907182b363faf2ec905fc32cc5a4ab': 'd08c2f8c5c4d8533e1de3fa88241f813'} - files = plist['files'] - for diag in plist['diagnostics']: - file_path = files[diag['location']['file']] - report_hash = get_report_hash( - diag, file_path, HashType.DIAGNOSTIC_MESSAGE) - actual_report_hash = diag['issue_hash_content_of_line_in_context'] + reports = get_reports(test_plist) + for report in reports: + report_hash = get_report_hash(report, HashType.DIAGNOSTIC_MESSAGE) self.assertEqual(report_hash, - expected_report_hash[actual_report_hash]) + expected_report_hash[report.report_hash]) diff --git a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/remove_whitespace_test.py b/tools/report-converter/tests/unit/report_hash/remove_whitespace_test.py similarity index 97% rename from tools/codechecker_report_hash/tests/unit/codechecker_report_hash/remove_whitespace_test.py rename to tools/report-converter/tests/unit/report_hash/remove_whitespace_test.py index 7cea255655..bb0e8a5ab4 100644 --- a/tools/codechecker_report_hash/tests/unit/codechecker_report_hash/remove_whitespace_test.py +++ b/tools/report-converter/tests/unit/report_hash/remove_whitespace_test.py @@ -10,7 +10,7 @@ import unittest -from codechecker_report_hash.hash import _remove_whitespace +from codechecker_report_converter.report.hash import _remove_whitespace class RemoveWhitespaceTest(unittest.TestCase): diff --git a/tools/report-converter/tests/unit/source_code_comment/__init__.py b/tools/report-converter/tests/unit/source_code_comment/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/tools/report-converter/tests/unit/source_code_comment/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/web/server/tests/unit/source_code_comment_test_files/test_file_1 b/tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_1 similarity index 100% rename from web/server/tests/unit/source_code_comment_test_files/test_file_1 rename to tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_1 diff --git a/web/server/tests/unit/source_code_comment_test_files/test_file_2 b/tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_2 similarity index 100% rename from web/server/tests/unit/source_code_comment_test_files/test_file_2 rename to tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_2 diff --git a/web/server/tests/unit/source_code_comment_test_files/test_file_3 b/tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_3 similarity index 100% rename from web/server/tests/unit/source_code_comment_test_files/test_file_3 rename to tools/report-converter/tests/unit/source_code_comment/source_code_comment_test_files/test_file_3 diff --git a/tools/report-converter/tests/unit/source_code_comment/test_source_code_comment.py b/tools/report-converter/tests/unit/source_code_comment/test_source_code_comment.py new file mode 100644 index 0000000000..7609e810a8 --- /dev/null +++ b/tools/report-converter/tests/unit/source_code_comment/test_source_code_comment.py @@ -0,0 +1,772 @@ +# -*- coding: utf-8 -*- +# +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- + +"""Tests for source code comments in source file.""" + + +import os +import unittest + +from codechecker_report_converter.source_code_comment_handler import \ + SourceCodeComment, SourceCodeCommentHandler + + +class SourceCodeCommentTestCase(unittest.TestCase): + """Tests for source code comments in source file.""" + + @classmethod + def setup_class(cls): + """Initialize test source file references.""" + cls.__test_src_dir = os.path.join( + os.path.dirname(__file__), 'source_code_comment_test_files') + + cls.__tmp_srcfile_1 = open(os.path.join(cls.__test_src_dir, + 'test_file_1'), + encoding='utf-8', errors="ignore") + cls.__tmp_srcfile_2 = open(os.path.join(cls.__test_src_dir, + 'test_file_2'), + encoding='utf-8', errors="ignore") + cls.__tmp_srcfile_3 = open(os.path.join(cls.__test_src_dir, + 'test_file_3'), + encoding='utf-8', errors="ignore") + + @classmethod + def teardown_class(cls): + cls.__tmp_srcfile_1.close() + cls.__tmp_srcfile_2.close() + cls.__tmp_srcfile_3.close() + + def test_src_comment_first_line(self): + """Bug is reported for the first line.""" + bug_line = 3 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertFalse(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 0) + + def test_no_comment(self): + """There is no comment above the bug line.""" + bug_line = 9 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertFalse(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 0) + + def test_no_src_comment_comment(self): + """There is no source comment above the bug line.""" + bug_line = 16 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some comment', + status='false_positive', + line='// codechecker_suppress [all] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_multi_liner_all(self): + """There is source code comment above the bug line.""" + bug_line = 23 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some long comment', + status='false_positive', + line='// codechecker_suppress [all] some long\n ' + '// comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_one_liner_all(self): + """There is source code comment above the bug line.""" + bug_line = 29 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1', 'my_checker_2'}, + message='some comment', + status='false_positive', + line='// codechecker_suppress [my_checker_1, ' + 'my_checker_2] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_multi_liner_all_2(self): + """There is source code comment above the bug line.""" + bug_line = 36 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my.checker_1', 'my.checker_2'}, + message='some really long comment', + status='false_positive', + line='// codechecker_suppress [my.checker_1 ' + 'my.checker_2] some really\n // long comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_one_liner_some_checkers(self): + """There is source code comment above the bug line.""" + bug_line = 43 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my.Checker_1', 'my.Checker_2'}, + message='some really really long comment', + status='false_positive', + line='// codechecker_suppress [my.Checker_1, my.Checker_2] ' + 'some really\n // really\n' + ' // long comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_multi_liner_some_checkers(self): + """There is source code comment above the bug line.""" + bug_line = 50 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertFalse(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 0) + + def test_comment_characters(self): + """Check for different special comment characters.""" + bug_line = 57 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my.checker_1', 'my.checker_2'}, + message="i/';0 (*&^%$#@!)", + status='false_positive', + line="// codechecker_suppress [my.checker_1, " + "my.checker_2]\n // i/';0 (*&^%$#@!)\n") + self.assertEqual(expected, source_line_comments[0]) + + def test_fancy_comment_characters(self): + """Check fancy comment.""" + bug_line = 64 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message="áúőóüöáé [▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬]", + status='false_positive', + line='// codechecker_suppress [ my_checker_1 ]\n // ' + 'áúőóüöáé [▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬]\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_no_fancy_comment(self): + """Check no fancy comment.""" + bug_line = 70 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='WARNING! source code comment is missing', + status='false_positive', + line='// codechecker_suppress [ my_checker_1 ]\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_malformed_commment_format(self): + """Check malformed comment.""" + bug_line = 1 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_2, + bug_line) + self.assertFalse(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_2, bug_line) + self.assertEqual(len(source_line_comments), 0) + + def test_source_code_comment(self): + """Check source code comment.""" + bug_line = 2 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some comment', + status='false_positive', + line='// codechecker_suppress [ all ] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_false_positive_comment(self): + """Check False positive comment.""" + bug_line = 7 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some comment', + status='false_positive', + line='// codechecker_false_positive [ all ] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_intentional_comment(self): + """Check Intentional comment.""" + bug_line = 12 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some comment', + status='intentional', + line='// codechecker_intentional [ all ] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_confirmed_comment(self): + """Check Confirmed comment.""" + bug_line = 17 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'all'}, + message='some comment', + status='confirmed', + line='// codechecker_confirmed [ all ] some comment\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_multiple_comments(self): + """Check multiple comment.""" + bug_line = 23 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='intentional comment', + status='intentional', + line='// codechecker_intentional [ my.checker_1 ] ' + 'intentional comment\n'), + SourceCodeComment( + checkers={'my.checker_2'}, + message='confirmed bug', + status='confirmed', + line='// codechecker_confirmed [ my.checker_2 ] ' + 'confirmed bug\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_1') + self.assertEqual(len(current_line_comments), 1) + + self.assertEqual(current_line_comments[0].message, expected[0].message) + self.assertEqual(current_line_comments[0].status, expected[0].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_2') + self.assertEqual(len(current_line_comments), 1) + + self.assertEqual(current_line_comments[0].message, expected[1].message) + self.assertEqual(current_line_comments[0].status, expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.dummy') + self.assertEqual(len(current_line_comments), 0) + + def test_multiple_multi_line_comments(self): + """Check multi line long line comments.""" + bug_line = 31 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='long intentional bug comment', + status='intentional', + line='// codechecker_intentional [ my.checker_1 ] ' + 'long intentional\n // bug comment\n'), + SourceCodeComment( + checkers={'my.checker_2'}, + message='long confirmed bug comment', + status='confirmed', + line='// codechecker_confirmed [ my.checker_2 ] ' + 'long confirmed\n // bug comment\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + def test_multiple_all_comments(self): + """Check multiple comment.""" + bug_line = 37 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = \ + sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='intentional comment', + status='intentional', + line='// codechecker_intentional [ my.checker_1 ] ' + 'intentional comment\n'), + SourceCodeComment( + checkers={'all'}, + message='some comment', + status='false_positive', + line='// codechecker_false_positive [ all ] ' + 'some comment\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_1') + self.assertEqual(len(current_line_comments), 2) + + self.assertEqual(current_line_comments[0].message, + expected[0].message) + self.assertEqual(current_line_comments[0].status, + expected[0].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, '') + self.assertEqual(len(current_line_comments), 1) + self.assertEqual(current_line_comments[0].message, + expected[1].message) + self.assertEqual(current_line_comments[0].status, + expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.dummy') + self.assertEqual(len(current_line_comments), 1) + + self.assertEqual(len(current_line_comments), 1) + self.assertEqual(current_line_comments[0].message, + expected[1].message) + self.assertEqual(current_line_comments[0].status, + expected[1].status) + + def test_multiple_checker_name_comments(self): + """ + Check multiple comment where same checker name are given for multiple + source code comment. + """ + + bug_line = 43 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_3, + bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='intentional comment', + status='intentional', + line='// codechecker_intentional [ my.checker_1 ] ' + 'intentional comment\n'), + SourceCodeComment( + checkers={'my.checker_2', 'my.checker_1'}, + message='some comment', + status='false_positive', + line='// codechecker_false_positive [ ' + 'my.checker_2, my.checker_1 ] some comment\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_1') + self.assertEqual(len(current_line_comments), 2) + + def test_cstyle_comment(self): + """ + C style comment in one line. + /* codechecker_suppress [ my_checker_1 ] suppress comment */ + """ + + bug_line = 76 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_1, + bug_line) + + for line in source_line_comments: + print(line) + + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='suppress comment', + status='false_positive', + line='/* codechecker_suppress [ my_checker_1 ] ' + 'suppress comment */\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_cstyle_comment_multi_line(self): + """ + Multi line C style comment. + /* codechecker_suppress [ my_checker_1 ] + some longer + comment */ + """ + + bug_line = 83 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='some longer comment', + status='false_positive', + line='/* codechecker_suppress [ my_checker_1 ]\n ' + 'some longer\n comment */\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_cstyle_comment_multi_nomsg(self): + """ + Multi line C style comment. + /* codechecker_suppress [ my_checker_1 ] + */ + """ + + bug_line = 89 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_1, + bug_line) + + for line in source_line_comments: + print(line) + + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='WARNING! source code comment is missing', + status='false_positive', + line='/* codechecker_suppress [ my_checker_1 ]\n */\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_cstyle_comment_multi_star(self): + """ + Multi line C style comment. + + /* codechecker_suppress [ my_checker_1 ] + * multi line + * comment + * again + */ + """ + + bug_line = 98 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_1, bug_line) + + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='multi line comment again', + status='false_positive', + line="/* codechecker_suppress [ my_checker_1 ]\n * " + "multi line\n * comment\n * again\n */\n") + self.assertEqual(expected, source_line_comments[0]) + + def test_cstyle_comment_multi_line_mismatch(self): + """ + Multi line C style comment start '/*' is in a different line + from the codechecker review status comment. + + /* + codechecker_suppress [ my_checker_1 ] + multi line + comment + again + */ + """ + + bug_line = 108 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_1, bug_line) + self.assertEqual(len(source_line_comments), 1) + + expected = SourceCodeComment( + checkers={'my_checker_1'}, + message='multi line comment again', + status='false_positive', + line=' codechecker_suppress [ my_checker_1 ]\n ' + 'multi line\n comment\n again\n */\n') + self.assertEqual(expected, source_line_comments[0]) + + def test_cstyle_multi_comment_multi_line(self): + """ + Multi line C style comment with multiple review status comment. + + /* codechecker_false_positive [ my.checker_2, my.checker_1 ] comment + codechecker_intentional [ my.checker_1 ] intentional comment */ + + """ + + bug_line = 49 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='intentional comment', + status='intentional', + line='codechecker_intentional [ my.checker_1 ] ' + 'intentional comment */\n'), + SourceCodeComment( + checkers={'my.checker_1', 'my.checker_2'}, + message='some comment', + status='false_positive', + line='/* codechecker_false_positive [ ' + 'my.checker_2, my.checker_1 ] some comment\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_1') + + self.assertEqual(len(current_line_comments), 2) + self.assertEqual(current_line_comments[0].message, + expected[0].message) + self.assertEqual(current_line_comments[0].status, + expected[0].status) + self.assertEqual(current_line_comments[1].message, + expected[1].message) + self.assertEqual(current_line_comments[1].status, + expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_2') + + self.assertEqual(len(current_line_comments), 1) + + self.assertEqual(current_line_comments[0].message, + expected[1].message) + self.assertEqual(current_line_comments[0].status, + expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.dummy') + self.assertEqual(len(current_line_comments), 0) + + def test_cstyle_multi_comment_multi_line_long(self): + """ + Multi line C style comment with multiple review status comment. + + /* codechecker_false_positive [ my.checker_2, my.checker_1 ] comment + which + is + long + codechecker_intentional [ my.checker_1 ] intentional comment + long + again */ + + """ + + bug_line = 60 + sc_handler = SourceCodeCommentHandler() + res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, + bug_line) + self.assertTrue(res) + + source_line_comments = sc_handler.get_source_line_comments( + self.__tmp_srcfile_3, bug_line) + self.assertEqual(len(source_line_comments), 2) + + expected = [ + SourceCodeComment( + checkers={'my.checker_1'}, + message='intentional comment long again', + status='intentional', + line='codechecker_intentional [ my.checker_1 ] ' + 'intentional comment\n long\n again */\n'), + SourceCodeComment( + checkers={'my.checker_1', 'my.checker_2'}, + message='comment which is long', + status='false_positive', + line='/* codechecker_false_positive [ ' + 'my.checker_2, my.checker_1 ] comment\n ' + 'which\n is\n long\n')] + + self.assertEqual(expected[0], source_line_comments[0]) + self.assertEqual(expected[1], source_line_comments[1]) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_1') + self.assertEqual(len(current_line_comments), 2) + self.assertEqual(current_line_comments[0].message, + expected[0].message) + self.assertEqual(current_line_comments[0].status, + expected[0].status) + self.assertEqual(current_line_comments[1].message, + expected[1].message) + self.assertEqual(current_line_comments[1].status, + expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.checker_2') + self.assertEqual(len(current_line_comments), 1) + + self.assertEqual(current_line_comments[0].message, + expected[1].message) + self.assertEqual(current_line_comments[0].status, + expected[1].status) + + current_line_comments = sc_handler.filter_source_line_comments( + self.__tmp_srcfile_3, bug_line, 'my.dummy') + self.assertEqual(len(current_line_comments), 0) diff --git a/tools/report-converter/tests/unit/test_asan_parser.py b/tools/report-converter/tests/unit/test_asan_parser.py deleted file mode 100644 index 160937fa80..0000000000 --- a/tools/report-converter/tests/unit/test_asan_parser.py +++ /dev/null @@ -1,113 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform AddressSanitizer output to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - -from codechecker_report_converter.output_parser import Event, Message -from codechecker_report_converter.sanitizers.address.output_parser import \ - ASANParser -from codechecker_report_converter.sanitizers.address.analyzer_result import \ - ASANAnalyzerResult - -OLD_PWD = None - - -def setup_module(): - """Setup the test tidy reprs for the test classes in the module.""" - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), 'asan_output_test_files')) - - -def teardown_module(): - """Restore environment after tests have ran.""" - global OLD_PWD - os.chdir(OLD_PWD) - - -class ASANAnalyzerResultTestCase(unittest.TestCase): - """ Test the output of the ASANAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = ASANAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def test_asan(self): - """ Test for the asan.plist file. """ - self.analyzer_result.transform('asan.out', self.cc_result_dir) - - with open('asan.plist', mode='rb') as pfile: - exp = plistlib.load(pfile) - - plist_file = os.path.join(self.cc_result_dir, 'asan.cpp_asan.plist') - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'][0] = 'files/asan.cpp' - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - self.assertEqual(res, exp) - - -class ASANOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts an Address Sanitizer - output file to zero or more Message object. - """ - - def setUp(self): - """Setup the OutputParser.""" - self.parser = ASANParser() - self.asan_repr = [ - Message( - os.path.abspath('files/asan.cpp'), - 5, 10, - "heap-use-after-free on address 0x614000000044 at pc " - "0x0000004f4b45 bp 0x7ffd40559120 sp 0x7ffd40559118", - "AddressSanitizer", - [Event( - os.path.abspath('files/asan.cpp'), - 5, 10, - " #0 0x4f4b44 in main files/asan.cpp:5:10" - )], - [Event( - os.path.abspath('files/asan.cpp'), - 5, 10, - "READ of size 4 at 0x614000000044 thread T0\n" - " #0 0x4f4b44 in main files/asan.cpp:5:10\n" - " #1 0x7f334b52eb96 in __libc_start_main (??)\n" - " #2 0x41aaf9 in _start (??)\n" - )] - ), - ] - - def test_asan(self): - """ Test the generated Messages of msan.out. """ - messages = self.parser.parse_messages_from_file('asan.out') - self.assertEqual(len(messages), len(self.asan_repr)) - - for message in messages: - self.assertIn(message, self.asan_repr) diff --git a/tools/report-converter/tests/unit/test_clang_tidy_parser.py b/tools/report-converter/tests/unit/test_clang_tidy_parser.py deleted file mode 100644 index 565d9aa400..0000000000 --- a/tools/report-converter/tests/unit/test_clang_tidy_parser.py +++ /dev/null @@ -1,400 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform a Clang Tidy output file to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - -from codechecker_report_converter.output_parser import Message, Event -from codechecker_report_converter.clang_tidy.output_parser import \ - ClangTidyParser -from codechecker_report_converter.clang_tidy.analyzer_result import \ - ClangTidyAnalyzerResult - -OLD_PWD = None - - -def setup_module(): - """Setup the test tidy reprs for the test classes in the module.""" - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), 'tidy_output_test_files')) - - -def teardown_module(): - """Restore environment after tests have ran.""" - global OLD_PWD - os.chdir(OLD_PWD) - - -class ClangTidyOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts a Clang Tidy output - file to zero or more tidy_output_converter.Message. - """ - - def setUp(self): - """Setup the OutputParser.""" - self.parser = ClangTidyParser() - - # tidy1.out Message/Note representation - self.tidy1_repr = [ - Message( - os.path.abspath('files/test.cpp'), - 8, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - [Event( - os.path.abspath('files/test.cpp'), - 8, 12, - 'Division by zero')]), - Message( - os.path.abspath('files/test.cpp'), - 8, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy2.out Message/Note representation - self.tidy2_repr = [ - Message( - os.path.abspath('files/test2.cpp'), - 5, 7, - "unused variable 'y'", - 'clang-diagnostic-unused-variable'), - Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - [ - Event( - os.path.abspath('files/test2.cpp'), - 9, 7, - "Left side of '||' is false"), - Event( - os.path.abspath('files/test2.cpp'), - 9, 3, - 'Taking false branch'), - Event( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero') - ]), - Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero'), - ] - - # tidy2_v6.out Message/Note representation - self.tidy2_v6_repr = [ - Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - [ - Event( - os.path.abspath('files/test2.cpp'), - 9, 7, - "Left side of '||' is false"), - Event( - os.path.abspath('files/test2.cpp'), - 9, 16, - "Assuming 'x' is 0"), - Event( - os.path.abspath('files/test2.cpp'), - 9, 3, - 'Taking false branch'), - Event( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'Division by zero') - ]), - Message( - os.path.abspath('files/test2.cpp'), - 13, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero'), - ] - - # tidy3.out Message/Note representation - self.tidy3_repr = [ - Message( - os.path.abspath('files/test3.cpp'), - 4, 12, - 'use nullptr', - 'modernize-use-nullptr', - None, - None, - [Event( - os.path.abspath('files/test3.cpp'), - 4, 12, - 'nullptr')]), - Message( - os.path.abspath('files/test3.hh'), - 6, 6, - "Dereference of null pointer (loaded from variable 'x')", - 'clang-analyzer-core.NullDereference', - [ - Event( - os.path.abspath('files/test3.cpp'), - 4, 3, - "'x' initialized to a null pointer value"), - Event( - os.path.abspath('files/test3.cpp'), - 6, 11, - "Assuming 'argc' is > 3"), - Event( - os.path.abspath('files/test3.cpp'), - 6, 3, - 'Taking true branch'), - Event( - os.path.abspath('files/test3.cpp'), - 7, 9, - "Passing null pointer value via 1st parameter 'x'"), - Event( - os.path.abspath('files/test3.cpp'), - 7, 5, - "Calling 'bar'"), - Event( - os.path.abspath('files/test3.hh'), - 6, 6, - "Dereference of null pointer (loaded from variable " - "'x')") - ]) - ] - - # tidy5.out Message/Note representation - self.tidy5_repr = [ - Message( - os.path.abspath('files/test4.cpp'), - 3, 26, - 'identifier after literal will be treated ' - 'as a reserved user-defined literal suffix in C++11', - 'clang-diagnostic-c++11-compat-reserved-user-defined-literal'), - Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'Division by zero', - 'clang-analyzer-core.DivideZero', - [Event( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'Division by zero')]), - Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy5_v6.out Message/Note representation - self.tidy5_v6_repr = [ - Message( - os.path.abspath('files/test4.cpp'), - 3, 26, - 'invalid suffix on literal; C++11 requires a space ' - 'between literal and identifier', - 'clang-diagnostic-reserved-user-defined-literal'), - Message( - os.path.abspath('files/test4.cpp'), - 10, 12, - 'remainder by zero is undefined', - 'clang-diagnostic-division-by-zero') - ] - - # tidy6.out Message/Note representation - self.tidy6_repr = [ - Message( - os.path.abspath('files/test5.cpp'), - 10, 9, - 'no matching function for call to \'get_type\'', - 'clang-diagnostic-error', - [ - Event( - os.path.abspath('files/test5.cpp'), - 2, 18, - 'candidate template ignored: substitution failure ' - '[with T = int *]: type \'int *\' cannot be used ' - 'prior to \'::\' because it has no members'), - Event( - os.path.abspath('files/test5.cpp'), - 5, 6, - 'candidate template ignored: substitution failure ' - '[with T = int]: array size is negative'), - ] - )] - - def test_absolute_path(self): - """Test for absolute paths in Messages.""" - for tfile in ['abs.out', 'tidy1.out']: - messages = self.parser.parse_messages_from_file(tfile) - self.assertNotEqual(len(messages), 0) - for message in messages: - self.assertTrue(os.path.isabs(message.path)) - - def test_empty1(self): - """Test an empty ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('empty1.out') - self.assertEqual(messages, []) - - def test_empty2(self): - """Test a ClangTidy output file that only contains empty lines.""" - messages = self.parser.parse_messages_from_file('empty2.out') - self.assertEqual(messages, []) - - def test_tidy1(self): - """Test the generated Messages of tidy1.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy1.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy1_v6(self): - """Test the generated Messages of tidy1.out ClangTidy v6 output - file.""" - messages = self.parser.parse_messages_from_file('tidy1_v6.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy2(self): - """Test the generated Messages of tidy2.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy2.out') - self.assertEqual(len(messages), len(self.tidy2_repr)) - for message in messages: - self.assertIn(message, self.tidy2_repr) - - def test_tidy2_v6(self): - """Test the generated Messages of tidy2.out ClangTidy v6 output - file.""" - messages = self.parser.parse_messages_from_file('tidy2_v6.out') - self.assertEqual(len(messages), len(self.tidy2_v6_repr)) - for message in messages: - self.assertIn(message, self.tidy2_v6_repr) - - def test_tidy3(self): - """Test the generated Messages of tidy3.out ClangTidy output file.""" - messages = self.parser.parse_messages_from_file('tidy3.out') - self.assertEqual(len(messages), len(self.tidy3_repr)) - for message in messages: - self.assertIn(message, self.tidy3_repr) - - def test_tidy4(self): - """ - Test the generated Messages of tidy4.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy4.out') - self.assertEqual(len(messages), len(self.tidy1_repr)) - for message in messages: - self.assertIn(message, self.tidy1_repr) - - def test_tidy5(self): - """ - Test the grenerated Messages of tidy5.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy5.out') - for message in messages: - self.assertIn(message, self.tidy5_repr) - - def test_tidy5_v6(self): - """ - Test the grenerated Messages of tidy5_v6.out ClangTidy output file. - This is an uncomplete file which is equal with tidy1.out except it's - missing the last two lines. - """ - messages = self.parser.parse_messages_from_file('tidy5_v6.out') - for message in messages: - self.assertIn(message, self.tidy5_v6_repr) - - def test_tidy6(self): - """ - Test the generated Messages of tidy6.out ClangTidy output file. - """ - messages = self.parser.parse_messages_from_file('tidy6.out') - for message in messages: - self.assertIn(message, self.tidy6_repr) - - -class ClangTidyAnalyzerResultTestCase(unittest.TestCase): - """ Test the output of the ClangTidyAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = ClangTidyAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def __check_analyzer_result(self, analyzer_result, analyzer_result_plist, - source_files, expected_plist): - """ Check the result of the analyzer transformation. """ - - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) - - plist_file = os.path.join(self.cc_result_dir, analyzer_result_plist) - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'] = source_files - - with open(expected_plist, mode='rb') as pfile: - exp = plistlib.load(pfile) - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - self.assertEqual(res, exp) - - def test_empty1(self): - """ Test for empty Messages. """ - ret = self.analyzer_result.transform('empty1.out', self.cc_result_dir) - self.assertFalse(ret) - - def test_empty2(self): - """ Test for empty Messages with multiple line. """ - ret = self.analyzer_result.transform('empty2.out', self.cc_result_dir) - self.assertFalse(ret) - - def test_tidy1(self): - """ Test for the tidy1.plist file. """ - self.__check_analyzer_result('tidy1.out', 'test.cpp_clang-tidy.plist', - ['files/test.cpp'], 'tidy1.plist') - - def test_tidy2(self): - """ Test for the tidy2.plist file. """ - self.__check_analyzer_result('tidy2.out', 'test2.cpp_clang-tidy.plist', - ['files/test2.cpp'], 'tidy2.plist') - - def test_tidy3(self): - """ Test for the tidy3.plist file. """ - self.__check_analyzer_result('tidy3.out', 'test3.cpp_clang-tidy.plist', - ['files/test3.cpp'], - 'tidy3_cpp.plist') - - self.__check_analyzer_result('tidy3.out', 'test3.hh_clang-tidy.plist', - ['files/test3.hh', 'files/test3.cpp'], - 'tidy3_hh.plist') diff --git a/tools/report-converter/tests/unit/test_lsan_parser.py b/tools/report-converter/tests/unit/test_lsan_parser.py deleted file mode 100644 index b0696100ce..0000000000 --- a/tools/report-converter/tests/unit/test_lsan_parser.py +++ /dev/null @@ -1,115 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform LeakSanitizer output to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - -from codechecker_report_converter.output_parser import Event, Message -from codechecker_report_converter.sanitizers.leak.output_parser import \ - LSANParser -from codechecker_report_converter.sanitizers.leak.analyzer_result import \ - LSANAnalyzerResult - -OLD_PWD = None - - -def setup_module(): - """Setup the test tidy reprs for the test classes in the module.""" - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), - 'lsan_output_test_files')) - - -def teardown_module(): - """Restore environment after tests have ran.""" - global OLD_PWD - os.chdir(OLD_PWD) - - -class LSANPListConverterTestCase(unittest.TestCase): - """ Test the output of the LSANAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = LSANAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def test_san(self): - """ Test for the lsan.plist file. """ - self.analyzer_result.transform('lsan.out', self.cc_result_dir) - - with open('lsan.plist', mode='rb') as pfile: - exp = plistlib.load(pfile) - - plist_file = os.path.join(self.cc_result_dir, 'lsan.c_lsan.plist') - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'][0] = 'files/lsan.c' - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - self.assertEqual(res, exp) - - -class LSANOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts an Leak Sanitizer - output file to zero or more Message object. - """ - - def setUp(self): - """ Setup the OutputParser. """ - self.parser = LSANParser() - self.lsan_repr = [ - Message( - os.path.abspath('files/lsan.c'), - 4, 7, - "detected memory leaks", - "LeakSanitizer", - [Event( - os.path.abspath('files/lsan.c'), - 4, 7, - " #1 0x4da26a in main files/lsan.c:4:7" - )], - [Event( - os.path.abspath('files/lsan.c'), - 4, 7, - "Direct leak of 7 byte(s) in 1 object(s) allocated from:\n" - " #0 0x4af01b in __interceptor_malloc /projects/" - "compiler-rt/lib/asan/asan_malloc_linux.cc:52:3\n" - " #1 0x4da26a in main files/lsan.c:4:7\n" - " #2 0x7f076fd9cec4 in __libc_start_main " - "libc-start.c:287\n" - "SUMMARY: AddressSanitizer: 7 byte(s) " - "leaked in 1 allocation(s)\n" - )]), - ] - - def test_lsan(self): - """ Test the generated Messages of lsan.out. """ - messages = self.parser.parse_messages_from_file('lsan.out') - self.assertEqual(len(messages), len(self.lsan_repr)) - for message in messages: - self.assertIn(message, self.lsan_repr) diff --git a/tools/report-converter/tests/unit/test_sparse_parser.py b/tools/report-converter/tests/unit/test_sparse_parser.py deleted file mode 100644 index 9cf60d9ca5..0000000000 --- a/tools/report-converter/tests/unit/test_sparse_parser.py +++ /dev/null @@ -1,100 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the SparseAnalyzerResult, which -used in sequence transform Smatch output to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - - -from codechecker_report_converter.sparse.analyzer_result import \ - SparseAnalyzerResult - - -class SparseAnalyzerResultTestCase(unittest.TestCase): - """ Test the output of the SparseAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = SparseAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - self.test_files = os.path.join(os.path.dirname(__file__), - 'sparse_output_test_files') - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def test_no_smatch_output_file(self): - """ Test transforming single C file. """ - analyzer_result = os.path.join(self.test_files, 'files', - 'sample.c') - - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) - self.assertFalse(ret) - - def test_transform_dir(self): - """ Test transforming a directory. """ - analyzer_result = os.path.join(self.test_files) - - ret = self.analyzer_result.transform(analyzer_result, - self.cc_result_dir) - self.assertFalse(ret) - - def test_transform_single_file(self): - """ Test transforming single output file. """ - analyzer_result = os.path.join(self.test_files, 'sample.out') - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) - - # Test sample.c plist file - plist_file = os.path.join(self.cc_result_dir, - 'sample.c_sparse.plist') - - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'][0] = os.path.join('files', 'sample.c') - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - plist_file = os.path.join(self.test_files, - 'sample.c.expected.plist') - with open(plist_file, mode='rb') as pfile: - exp = plistlib.load(pfile) - - self.assertEqual(res, exp) - - # Test sample.h plist file - plist_file = os.path.join(self.cc_result_dir, - 'sample.h_sparse.plist') - - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'][0] = os.path.join('files', 'sample.h') - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - plist_file = os.path.join(self.test_files, - 'sample.h.expected.plist') - with open(plist_file, mode='rb') as pfile: - exp = plistlib.load(pfile) - - self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/test_tsan_parser.py b/tools/report-converter/tests/unit/test_tsan_parser.py deleted file mode 100644 index a089db4b42..0000000000 --- a/tools/report-converter/tests/unit/test_tsan_parser.py +++ /dev/null @@ -1,121 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform ThreadSanitizer output to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - -from codechecker_report_converter.output_parser import Event, Message -from codechecker_report_converter.sanitizers.thread.output_parser import \ - TSANParser -from codechecker_report_converter.sanitizers.thread.analyzer_result import \ - TSANAnalyzerResult - -OLD_PWD = None - - -def setup_module(): - """Setup the test tidy reprs for the test classes in the module.""" - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), - 'tsan_output_test_files')) - - -def teardown_module(): - """Restore environment after tests have ran.""" - global OLD_PWD - os.chdir(OLD_PWD) - - -class TSANAnalyzerResultTestCase(unittest.TestCase): - """ Test the output of the TSANAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = TSANAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def test_tsan(self): - """ Test for the tsan.plist file. """ - self.analyzer_result.transform('tsan.out', self.cc_result_dir) - - with open('tsan.plist', mode='rb') as pfile: - exp = plistlib.load(pfile) - - plist_file = os.path.join(self.cc_result_dir, 'tsan.cpp_tsan.plist') - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'][0] = 'files/tsan.cpp' - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - self.assertEqual(res, exp) - - -class TSANOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts an Thread Sanitizer - output file to zero or more Message object. - """ - - def setUp(self): - """Setup the OutputParser.""" - self.parser = TSANParser() - self.tsan_repr = [ - Message( - os.path.abspath('files/tsan.cpp'), - 24, 2, - "SEGV on unknown address 0x000000000000 (pc 0x0000004b525c bp " - "0x7fff93b54920 sp 0x7fff93b548b0 T23755)", - "ThreadSanitizer", - [Event( - os.path.abspath('files/tsan.cpp'), - 24, 2, - " #1 main files/tsan.cpp:24:2 (a.out+0x4b529e)"), - Event( - os.path.abspath('files/tsan.cpp'), - 18, 14, - " #0 insert_in_table(unsigned long, int) " - "files/tsan.cpp:18:14 (a.out+0x4b525b)" - )], - [Event( - os.path.abspath('files/tsan.cpp'), - 24, 2, - "==23755==The signal is caused by a WRITE memory access.\n" - "==23755==Hint: address points to the zero page.\n" - " #0 insert_in_table(unsigned long, int) " - "files/tsan.cpp:18:14 (a.out+0x4b525b)\n" - " #1 main files/tsan.cpp:24:2 (a.out+0x4b529e)\n" - " #2 __libc_start_main /build/glibc-OTsEL5/glibc-2.27/" - "csu/../csu/libc-start.c:310 (libc.so.6+0x21b96)\n" - " #3 _start (a.out+0x41c8d9)\n" - )]), - ] - - def test_tsan(self): - """ Test the generated Messages of tsan.out. """ - messages = self.parser.parse_messages_from_file('tsan.out') - self.assertEqual(len(messages), len(self.tsan_repr)) - for message in messages: - self.assertIn(message, self.tsan_repr) diff --git a/tools/report-converter/tests/unit/test_ubsan_parser.py b/tools/report-converter/tests/unit/test_ubsan_parser.py deleted file mode 100644 index 7156d0f6a8..0000000000 --- a/tools/report-converter/tests/unit/test_ubsan_parser.py +++ /dev/null @@ -1,160 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -This module tests the correctness of the OutputParser and PListConverter, which -used in sequence transform UndefinedBehaviorSanitizer output to a plist file. -""" - - -import os -import plistlib -import shutil -import tempfile -import unittest - -from codechecker_report_converter.output_parser import Message -from codechecker_report_converter.sanitizers.ub.output_parser import \ - UBSANParser -from codechecker_report_converter.sanitizers.ub.analyzer_result import \ - UBSANAnalyzerResult - -OLD_PWD = None - - -def setup_module(): - """ Setup the test tidy reprs for the test classes in the module. """ - global OLD_PWD - OLD_PWD = os.getcwd() - os.chdir(os.path.join(os.path.dirname(__file__), - 'ubsan_output_test_files')) - - -def teardown_module(): - """ Restore environment after tests have ran. """ - global OLD_PWD - os.chdir(OLD_PWD) - - -class UBSANPListConverterTestCase(unittest.TestCase): - """ Test the output of the UBSANAnalyzerResult. """ - - def setUp(self): - """ Setup the test. """ - self.analyzer_result = UBSANAnalyzerResult() - self.cc_result_dir = tempfile.mkdtemp() - - def tearDown(self): - """ Clean temporary directory. """ - shutil.rmtree(self.cc_result_dir) - - def __check_analyzer_result(self, analyzer_result, analyzer_result_plist, - source_files, expected_plist): - """ Check the result of the analyzer transformation. """ - - self.analyzer_result.transform(analyzer_result, self.cc_result_dir) - - plist_file = os.path.join(self.cc_result_dir, analyzer_result_plist) - with open(plist_file, mode='rb') as pfile: - res = plistlib.load(pfile) - - # Use relative path for this test. - res['files'] = source_files - - self.assertTrue(res['metadata']['generated_by']['version']) - res['metadata']['generated_by']['version'] = "x.y.z" - - with open(expected_plist, mode='rb') as pfile: - exp = plistlib.load(pfile) - - self.assertEqual(res, exp) - - def test_empty1(self): - """ Test for empty Messages. """ - ret = self.analyzer_result.transform('empty1.out', self.cc_result_dir) - self.assertFalse(ret) - - def test_empty2(self): - """ Test for empty Messages with multiple line. """ - ret = self.analyzer_result.transform('empty2.out', self.cc_result_dir) - self.assertFalse(ret) - - def test_ubsan1(self): - """ Test for the ubsan1.plist file. """ - self.__check_analyzer_result('ubsan1.out', 'ubsan1.cpp_ubsan.plist', - ['files/ubsan1.cpp'], 'ubsan1.plist') - - def test_ubsan2(self): - """ Test for the ubsan2.plist file. """ - self.__check_analyzer_result('ubsan2.out', 'ubsan2.cpp_ubsan.plist', - ['files/ubsan2.cpp'], 'ubsan2.plist') - - -class UBSANOutputParserTestCase(unittest.TestCase): - """ - Tests the output of the OutputParser, which converts an Undefined Behaviour - Sanitizer output file to zero or more Message object. - """ - - def setUp(self): - """Setup the OutputParser.""" - self.parser = UBSANParser() - self.ubsan1_repr = [ - Message( - os.path.abspath('files/ubsan1.cpp'), - 4, 5, - "signed integer overflow: 2147483647 + 1 cannot be " - "represented in type 'int'", - "UndefinedBehaviorSanitizer"), - ] - - self.ubsan2_repr = [ - Message( - os.path.abspath('files/ubsan2.cpp'), - 13, 10, - "load of value 7, which is not a valid value for type " - "'enum E'", - "UndefinedBehaviorSanitizer"), - Message( - os.path.abspath('files/ubsan2.cpp'), - 21, 7, - "load of value 2, which is not a valid value for type 'bool'", - "UndefinedBehaviorSanitizer"), - ] - - def test_empty1(self): - """Test an empty output file.""" - messages = self.parser.parse_messages_from_file('empty1.out') - self.assertEqual(messages, []) - - def test_empty2(self): - """Test an output file that only contains empty lines.""" - messages = self.parser.parse_messages_from_file('empty2.out') - self.assertEqual(messages, []) - - def test_absolute_path(self): - """Test for absolute paths in Messages.""" - for tfile in ['abs.out', 'ubsan1.out']: - messages = self.parser.parse_messages_from_file(tfile) - self.assertNotEqual(len(messages), 0) - for message in messages: - self.assertTrue(os.path.isabs(message.path)) - - def test_ubsan1(self): - """ Test the generated Messages of ubsan1.out. """ - messages = self.parser.parse_messages_from_file('ubsan1.out') - self.assertEqual(len(messages), len(self.ubsan1_repr)) - for message in messages: - self.assertIn(message, self.ubsan1_repr) - - def test_ubsan2(self): - """ Test the generated Messages of ubsan1.out. """ - messages = self.parser.parse_messages_from_file('ubsan2.out') - self.assertEqual(len(messages), len(self.ubsan2_repr)) - for message in messages: - self.assertIn(message, self.ubsan2_repr) diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/abs.out b/tools/report-converter/tests/unit/tidy_output_test_files/abs.out deleted file mode 100644 index a754246a1c..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/abs.out +++ /dev/null @@ -1,9 +0,0 @@ -/something/fake/test.cpp:8:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -/something/fake/test.cpp:8:12: note: Division by zero - return x % 0; - ^ -/something/fake/test.cpp:8:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/Makefile b/tools/report-converter/tests/unit/tidy_output_test_files/files/Makefile deleted file mode 100644 index 5660b7be6b..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/Makefile +++ /dev/null @@ -1,12 +0,0 @@ - -tidy1: - clang-tidy -checks='-*,clang-diagnostic-*,clang-analyzer-*' test.cpp - -tidy2: - clang-tidy -checks='-*,clang-diagnostic-*,clang-analyzer-*' test2.cpp - -tidy3: - clang-tidy -checks='-*,clang-diagnostic-*,clang-analyzer-*,modernize-use-nullptr' test3.cpp -- -I. - -tidy4: - clang-tidy -checks='-*,clang-diagnostic-*,clang-analyzer-*' test4.cpp -- -Wc++11-compat-reserved-user-defined-literal diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/test.cpp b/tools/report-converter/tests/unit/tidy_output_test_files/files/test.cpp deleted file mode 100644 index 029b87fed0..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/test.cpp +++ /dev/null @@ -1,9 +0,0 @@ -#include - -int main() { - int x; - - std::cin >> x; - - return x % 0; -} diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/test2.cpp b/tools/report-converter/tests/unit/tidy_output_test_files/files/test2.cpp deleted file mode 100644 index e1133349eb..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/test2.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include - -int main() { - int x; - int y; - - std::cin >> x; - - if (false || x) { - return 42; - } - - return x % 0; -} diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.cpp b/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.cpp deleted file mode 100644 index ec816bdbc6..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include "test3.hh" - -int main(int argc, const char** /*argv*/) { - int* x = 0; - - if (foo(argc > 3)) { - bar(x); - } - - return 0; -} diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.hh b/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.hh deleted file mode 100644 index 00b6991ba6..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/test3.hh +++ /dev/null @@ -1,7 +0,0 @@ -inline bool foo(bool arg) { - return false || arg; -} - -inline void bar(int* x) { - *x = 42; -} diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/files/test4.cpp b/tools/report-converter/tests/unit/tidy_output_test_files/files/test4.cpp deleted file mode 100644 index b3a85de6ce..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/files/test4.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include - -#define TAR_FILE PATH "/"TAR_FILENAME - -int main() { - int x; - - std::cin >> x; - - return x % 0; -} diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy1.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy1.out deleted file mode 100644 index a7107f979a..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy1.out +++ /dev/null @@ -1,9 +0,0 @@ -files/test.cpp:8:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test.cpp:8:12: note: Division by zero - return x % 0; - ^ -files/test.cpp:8:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy1_v6.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy1_v6.out deleted file mode 100644 index 13c561e556..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy1_v6.out +++ /dev/null @@ -1,7 +0,0 @@ -files/test.cpp:8:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test.cpp:8:12: note: Division by zero -files/test.cpp:8:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy2.out deleted file mode 100644 index 714c22cb27..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2.out +++ /dev/null @@ -1,18 +0,0 @@ -files/test2.cpp:5:7: warning: unused variable 'y' [clang-diagnostic-unused-variable] - int y; - ^ -files/test2.cpp:13:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test2.cpp:9:7: note: Left side of '||' is false - if (false || x) { - ^ -files/test2.cpp:9:3: note: Taking false branch - if (false || x) { - ^ -files/test2.cpp:13:12: note: Division by zero - return x % 0; - ^ -files/test2.cpp:13:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2_v6.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy2_v6.out deleted file mode 100644 index b5d5c2e239..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy2_v6.out +++ /dev/null @@ -1,18 +0,0 @@ -files/test2.cpp:13:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test2.cpp:9:7: note: Left side of '||' is false - if (false || x) { - ^ -files/test2.cpp:9:16: note: Assuming 'x' is 0 - if (false || x) { - ^ -files/test2.cpp:9:3: note: Taking false branch - if (false || x) { - ^ -files/test2.cpp:13:12: note: Division by zero - return x % 0; - ^ -files/test2.cpp:13:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy3.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy3.out deleted file mode 100644 index ff332c7d3a..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy3.out +++ /dev/null @@ -1,25 +0,0 @@ -files/test3.hh:6:6: warning: Dereference of null pointer (loaded from variable 'x') [clang-analyzer-core.NullDereference] - *x = 42; - ^ -files/test3.cpp:4:3: note: 'x' initialized to a null pointer value - int* x = 0; - ^ -files/test3.cpp:6:11: note: Assuming 'argc' is > 3 - if (foo(argc > 3)) { - ^ -files/test3.cpp:6:3: note: Taking true branch - if (foo(argc > 3)) { - ^ -files/test3.cpp:7:9: note: Passing null pointer value via 1st parameter 'x' - bar(x); - ^ -files/test3.cpp:7:5: note: Calling 'bar' - bar(x); - ^ -files/test3.hh:6:6: note: Dereference of null pointer (loaded from variable 'x') - *x = 42; - ^ -files/test3.cpp:4:12: warning: use nullptr [modernize-use-nullptr] - int* x = 0; - ^~ - nullptr diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy4.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy4.out deleted file mode 100644 index 61cd7aa767..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy4.out +++ /dev/null @@ -1,7 +0,0 @@ -files/test.cpp:8:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test.cpp:8:12: note: Division by zero - return x % 0; - ^ -files/test.cpp:8:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy5.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy5.out deleted file mode 100644 index 48446add0e..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy5.out +++ /dev/null @@ -1,13 +0,0 @@ -files/test4.cpp:3:26: warning: identifier after literal will be treated as a reserved user-defined literal suffix in C++11 [clang-diagnostic-c++11-compat-reserved-user-defined-literal] -#define TAR_FILE PATH "/"TAR_FILENAME - ^ - -files/test4.cpp:10:12: warning: Division by zero [clang-analyzer-core.DivideZero] - return x % 0; - ^ -files/test4.cpp:10:12: note: Division by zero - return x % 0; - ^ -files/test4.cpp:10:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy5_v6.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy5_v6.out deleted file mode 100644 index 7ead09b757..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy5_v6.out +++ /dev/null @@ -1,7 +0,0 @@ -files/test4.cpp:3:26: error: invalid suffix on literal; C++11 requires a space between literal and identifier [clang-diagnostic-reserved-user-defined-literal] -#define TAR_FILE PATH "/"TAR_FILENAME - ^~~~~~~~~~~~ - -files/test4.cpp:10:12: warning: remainder by zero is undefined [clang-diagnostic-division-by-zero] - return x % 0; - ^ diff --git a/tools/report-converter/tests/unit/tidy_output_test_files/tidy6.out b/tools/report-converter/tests/unit/tidy_output_test_files/tidy6.out deleted file mode 100644 index f04ea9cb88..0000000000 --- a/tools/report-converter/tests/unit/tidy_output_test_files/tidy6.out +++ /dev/null @@ -1,9 +0,0 @@ -files/test5.cpp:10:9: error: no matching function for call to 'get_type' [clang-diagnostic-error] - (void)get_type(ptr); - ^ -files/test5.cpp:2:18: note: candidate template ignored: substitution failure [with T = int *]: type 'int *' cannot be used prior to '::' because it has no members -typename T::type get_type(const T&); - ^ -files/test5.cpp:5:6: note: candidate template ignored: substitution failure [with T = int]: array size is negative -void get_type(T *, int[(int)sizeof(T) - 9] = 0); - ^ diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/empty1.out b/tools/report-converter/tests/unit/ubsan_output_test_files/empty1.out deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tools/report-converter/tests/unit/ubsan_output_test_files/empty2.out b/tools/report-converter/tests/unit/ubsan_output_test_files/empty2.out deleted file mode 100644 index aaab188f62..0000000000 --- a/tools/report-converter/tests/unit/ubsan_output_test_files/empty2.out +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/tools/report-converter/tests/unit/util/__init__.py b/tools/report-converter/tests/unit/util/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/tools/report-converter/tests/unit/util/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/web/server/tests/unit/newline b/tools/report-converter/tests/unit/util/newline similarity index 100% rename from web/server/tests/unit/newline rename to tools/report-converter/tests/unit/util/newline diff --git a/web/server/tests/unit/test_util_fileread.py b/tools/report-converter/tests/unit/util/test_fileread.py similarity index 95% rename from web/server/tests/unit/test_util_fileread.py rename to tools/report-converter/tests/unit/util/test_fileread.py index 0a7005b533..05803d0e24 100644 --- a/web/server/tests/unit/test_util_fileread.py +++ b/tools/report-converter/tests/unit/util/test_fileread.py @@ -13,7 +13,7 @@ import os import unittest -from codechecker_common.util import get_line +from codechecker_report_converter.util import get_line class GetLineTest(unittest.TestCase): diff --git a/web/server/tests/unit/test_trim_path_prefix.py b/tools/report-converter/tests/unit/util/test_trim_path_prefix.py similarity index 97% rename from web/server/tests/unit/test_trim_path_prefix.py rename to tools/report-converter/tests/unit/util/test_trim_path_prefix.py index 55d22816a3..8779bb94dc 100644 --- a/web/server/tests/unit/test_trim_path_prefix.py +++ b/tools/report-converter/tests/unit/util/test_trim_path_prefix.py @@ -11,7 +11,7 @@ import unittest -from codechecker_common.util import trim_path_prefixes +from codechecker_report_converter.util import trim_path_prefixes class TrimPathPrefixTestCase(unittest.TestCase): diff --git a/web/Makefile b/web/Makefile index 3e4b181e8d..39f3ac74ad 100644 --- a/web/Makefile +++ b/web/Makefile @@ -82,29 +82,25 @@ ifeq ($(BUILD_UI_DIST),YES) echo $(LATEST_COMMIT) > $(LATEST_COMMIT_FILE) endif +build_report_converter: + $(MAKE) -C $(CC_TOOLS)/report-converter build + +package_report_converter: build_report_converter package_dir_structure + cp -rp $(CC_TOOLS)/report-converter/build/report_converter/codechecker_report_converter $(CC_BUILD_LIB_DIR) && \ + chmod u+x $(CC_BUILD_LIB_DIR)/codechecker_report_converter/cli.py && \ + cd $(CC_BUILD_BIN_DIR) && \ + ln -sf ../lib/python3/codechecker_report_converter/cli.py report-converter + # This target should be used from the top level Makefile to build the package -# together with the analyzer part. This way we will not build plist-to-html +# together with the analyzer part. This way we will not build tools # multiple times. -package_web: check_codechecker_api_version check_dist package_dir_structure $(DIST_DIR) +package_web: check_codechecker_api_version check_dist package_dir_structure package_report_converter $(DIST_DIR) ifeq ($(BUILD_UI_DIST),YES) mkdir -p $(CC_BUILD_WEB_DIR) cp -r $(DIST_DIR)/* $(CC_BUILD_WEB_DIR) endif -build_plist_to_html: - $(MAKE) -C $(ROOT)/tools/plist_to_html build - -package_plist_to_html: build_plist_to_html package_dir_structure - # Copy plist-to-html files. - cp -r $(CC_TOOLS)/plist_to_html/build/plist_to_html/plist_to_html $(CC_BUILD_LIB_DIR) - -build_report_hash: - $(MAKE) -C $(ROOT)/tools/codechecker_report_hash build - -package_report_hash: build_report_hash package_dir_structure - cp -r $(CC_TOOLS)/codechecker_report_hash/build/codechecker_report_hash/codechecker_report_hash $(CC_BUILD_LIB_DIR) - -package: package_dir_structure package_plist_to_html package_report_hash package_web +package: package_dir_structure package_web # Copy libraries. cp -r $(ROOT)/codechecker_common $(CC_BUILD_LIB_DIR) && \ cp -r $(CURRENT_DIR)/codechecker_web $(CC_BUILD_LIB_DIR) && \ @@ -160,14 +156,11 @@ check_dist: cd server/vue-cli && npm install && rm -rf $(DIST_DIR); \ fi -clean_package: clean_plist_to_html +clean_package: rm -rf $(BUILD_DIR) rm -rf gen-docs find . -name "*.pyc" -delete -clean_plist_to_html: - rm -rf $(ROOT)/tools/plist_to_html/build - clean: rm -rf $(CC_SERVER)/vue-cli/dist rm -rf $(CC_SERVER)/vue-cli/node_modules diff --git a/web/client/codechecker_client/cmd/store.py b/web/client/codechecker_client/cmd/store.py index 332c1b157a..b148572152 100644 --- a/web/client/codechecker_client/cmd/store.py +++ b/web/client/codechecker_client/cmd/store.py @@ -18,24 +18,26 @@ import os import sys import tempfile -from typing import Dict, List, Set, Tuple import zipfile import zlib +from collections import defaultdict, namedtuple from concurrent.futures import ProcessPoolExecutor - -from collections import namedtuple +from typing import Dict, Iterable, List, Set, Tuple from codechecker_api.codeCheckerDBAccess_v6.ttypes import StoreLimitKind from codechecker_api_shared.ttypes import RequestFailed, ErrorCode -from codechecker_client import client as libclient -from codechecker_common import arg, logger, plist_parser, util, cmd_config -from codechecker_common.report import Report -from codechecker_common.output import twodim -from codechecker_common.source_code_comment_handler import \ +from codechecker_report_converter import twodim +from codechecker_report_converter.report import Report, report_file, \ + reports as reports_helper +from codechecker_report_converter.report.hash import HashType +from codechecker_report_converter.source_code_comment_handler import \ SourceCodeCommentHandler -from codechecker_report_hash.hash import HashType, replace_report_hash +from codechecker_report_converter.util import load_json_or_empty + +from codechecker_client import client as libclient +from codechecker_common import arg, logger, cmd_config from codechecker_web.shared import webserver_context, host_check from codechecker_web.shared.env import get_default_workspace @@ -50,6 +52,12 @@ MAX_UPLOAD_SIZE = 1 * 1024 * 1024 * 1024 # 1GiB +AnalyzerResultFileReports = Dict[str, List[Report]] + + +FileReportPositions = Dict[str, Set[int]] + + """Minimal required information for a report position in a source file. line: line number where the report was generated @@ -274,7 +282,7 @@ def __get_run_name(input_list): for input_path in input_list: metafile = os.path.join(input_path, "metadata.json") if os.path.isdir(input_path) and os.path.exists(metafile): - metajson = util.load_json_or_empty(metafile) + metajson = load_json_or_empty(metafile) if 'version' in metajson and metajson['version'] >= 2: for tool in metajson.get('tools', {}): @@ -297,139 +305,16 @@ def __get_run_name(input_list): return False -def res_handler(results): - """ - Summary about the parsing and storage results. - """ - LOG.info("Finished processing and storing reports.") - LOG.info("Failed: %d/%d", results.count(1), len(results)) - LOG.info("Successful %d/%d", results.count(0), len(results)) - - -def collect_report_files(inputs: List[str]) -> Set[str]: - """ - Collect all the plist report files in the inputs directories recursively. - """ - report_files: Set[str] = set() - - def is_report_file(file_path): - """ True if the given file is a report file. """ - return file_path.endswith(".plist") - - for input_path in inputs: - if os.path.isfile(input_path): - if is_report_file(input_path): - report_files.add(input_path) - else: - for root_dir_path, _, files in os.walk(input_path): - for f in files: - file_path = os.path.join(root_dir_path, f) - if is_report_file(file_path): - report_files.add(file_path) - - return report_files - - -def parse_report_file(plist_file: str) \ - -> Tuple[Dict[int, str], List[Report]]: - """Parse a plist report file and return the list of reports and the - list of source files mentioned in the report file. - """ - files = {} - reports = [] - - try: - files, reports = plist_parser.parse_plist_file(plist_file) - except Exception as ex: - import traceback - traceback.print_stack() - LOG.error('Parsing the plist failed: %s', str(ex)) - finally: - return files, reports - - -def collect_file_info(files: Dict[int, str]) -> Dict: - """Collect file information about given list of files like: - - last modification time - - content hash - If the file is missing the corresponding data will - be empty. - """ - res = {} - for sf in files.values(): - res[sf] = {} - if os.path.isfile(sf): - res[sf]["hash"] = get_file_content_hash(sf) - res[sf]["mtime"] = util.get_last_mod_time(sf) - - return res - - -def find_files(directory, file_name): - """Return the list of files with the exact name match under - the given directory. - """ - res = set() - for input_path in directory: - input_path = os.path.abspath(input_path) - - if not os.path.exists(input_path): - return res - - _, _, files = next(os.walk(input_path), ([], [], [])) - - for f in files: - if f == file_name: - res.add(os.path.join(input_path, f)) - return res - - -def check_missing_files(source_file_info): - """Return a set of the missing files from the source_file_info dict. - """ - return {k for k, v in source_file_info.items() if not bool(v)} - - -def overwrite_cppcheck_report_hash(reports, plist_file): - """CppCheck generates a '0' value for the bug hash. - In case all of the reports in a plist file contain only - a hash with '0' value overwrite the hash values in the - plist report files with a context free hash value. - """ - rep_hash = [rep.report_hash == '0' for rep in reports] - if all(rep_hash): - replace_report_hash(plist_file, HashType.CONTEXT_FREE) - return True - return False - - -def get_report_data(reports): - """Return the minimal required report information to be able - to collect review comments from the source code. - """ - report_main = [] - for report in reports: - last_report_event = report.bug_path[-1] - file_path_index = last_report_event['location']['file'] - report_line = last_report_event['location']['line'] - report_main.append(ReportLineInfo(report_line, - file_path_index, - "")) - return report_main - - -def scan_for_review_comment(job): +def scan_for_review_comment(job: Tuple[str, Iterable[int]]): """Scan a file for review comments returns all the found review comments. """ file_path, lines = job sc_handler = SourceCodeCommentHandler() comments = [] - with open(file_path, mode='r', - encoding='utf-8', - errors='ignore') as sf: - comments, misspelled_comments = \ - sc_handler.scan_source_line_comments(sf, lines) + with open(file_path, mode='r', encoding='utf-8', errors='ignore') as f: + comments, misspelled_comments = sc_handler.scan_source_line_comments( + f, lines) if misspelled_comments: LOG.warning("There are misspelled review status comments in %s", @@ -440,15 +325,14 @@ def scan_for_review_comment(job): return comments -def get_source_file_with_comments(jobs, zip_iter=map): +def get_source_file_with_comments(jobs, zip_iter=map) -> Set[str]: """ Get source files where there is any codechecker review comment at the main report positions. """ files_with_comment = set() - for job, comments in zip(jobs, - zip_iter(scan_for_review_comment, jobs)): + for job, comments in zip(jobs, zip_iter(scan_for_review_comment, jobs)): file_path, _ = job if comments: files_with_comment.add(file_path) @@ -456,19 +340,13 @@ def get_source_file_with_comments(jobs, zip_iter=map): return files_with_comment -def filter_source_files_with_comments(source_file_info, main_report_positions): - """Collect the source files where there is any codechecker review +def filter_source_files_with_comments( + file_report_positions: FileReportPositions +) -> Set[str]: + """ Collect the source files where there is any codechecker review comment at the main report positions. """ - jobs = [] - for file_path, v in source_file_info.items(): - if not bool(v): - # missing file - continue - lines = [rep.line for rep in main_report_positions - if rep.filepath == file_path] - - jobs.append((file_path, lines)) + jobs = file_report_positions.items() # Currently ProcessPoolExecutor fails completely in windows. # Reason is most likely combination of venv and fork() not @@ -482,105 +360,35 @@ def filter_source_files_with_comments(source_file_info, main_report_positions): return get_source_file_with_comments(jobs, executor.map) -def parse_collect_plist_info(plist_file): - """Parse one plist report file and collect information - about the source files mentioned in the report file. - """ +def get_reports(analyzer_result_file_path: str) -> List[Report]: + """ Get reports from the given analyzer result file. """ + reports = report_file.get_reports(analyzer_result_file_path) - source_files, reports = parse_report_file(plist_file) - - if len(source_files) == 0: - # If there is no source in the plist we will not upload - # it to the server. - LOG.debug("Skip empty plist file: %s", plist_file) - rli = ReportFileInfo(store_it=False, main_report_positions=[]) - sfir = SourceFilesInReport(source_info={}, - missing=set(), - changed_since_report_gen=set()) - return rli, sfir - - source_info = collect_file_info(source_files) - - missing_files = set() - missing_files = check_missing_files(source_info) - if missing_files: - LOG.warning("Skipping '%s' because it refers " - "the following missing source files: %s", - plist_file, missing_files) - for mf in missing_files: - missing_files.add(mf) - - rli = ReportFileInfo(store_it=False, main_report_positions=[]) - sfir = SourceFilesInReport(source_info=source_info, - missing=missing_files, - changed_since_report_gen=set()) - return rli, sfir - - if overwrite_cppcheck_report_hash(reports, plist_file): - # If overwrite was needed parse it back again to update the hashes. - source_files, reports = parse_report_file(plist_file) - - main_report_positions = [] - rdata = get_report_data(reports) - # Replace the file index values to source file path. - for rda in rdata: - rda = rda._replace(filepath=source_files[rda.fileidx]) - main_report_positions.append(rda) - - plist_mtime = util.get_last_mod_time(plist_file) + # CppCheck generates a '0' value for the report hash. In case all of the + # reports in a result file contain only a hash with '0' value, overwrite + # the hash values in the report files with a context free hash value. + if all(r.report_hash == '0' for r in reports): + report_file.replace_report_hash( + analyzer_result_file_path, HashType.CONTEXT_FREE) - changed_files = set() - # Check if any source file corresponding to a plist - # file changed since the plist file was generated. - for k, v in source_info.items(): - if bool(v): - if v['mtime'] > plist_mtime: - changed_files.add(k) - rli = ReportFileInfo(store_it=True, - main_report_positions=main_report_positions) - sfir = SourceFilesInReport(source_info=source_info, - missing=missing_files, - changed_since_report_gen=changed_files) - - return rli, sfir - - -def parse_report_files(report_files: Set[str], zip_iter=map): - """Parse and collect source code information mentioned in a report file. - - Collect any mentioned source files wich are missing or changed - since the report generation. If there are missing or changed files - the report will not be stored. - """ + reports = report_file.get_reports(analyzer_result_file_path) - files_to_compress = set() - source_file_info = {} - main_report_positions = [] - changed_files = set() - missing_source_files = set() + return reports - for report_f, v in zip(report_files, - zip_iter(parse_collect_plist_info, - report_files)): - report_file_info, source_in_reports = v +def parse_analyzer_result_files( + analyzer_result_files: Iterable[str], + zip_iter=map +) -> AnalyzerResultFileReports: + """ Get reports from the given analyzer result files. """ + analyzer_result_file_reports: AnalyzerResultFileReports = defaultdict(list) - if report_file_info.store_it: - files_to_compress.add(report_f) + for file_path, reports in zip( + analyzer_result_files, zip_iter( + get_reports, analyzer_result_files)): + analyzer_result_file_reports[file_path] = reports - source_file_info.update(source_in_reports.source_info) - changed_files = \ - changed_files | source_in_reports.changed_since_report_gen - main_report_positions.extend( - report_file_info.main_report_positions) - missing_source_files = \ - missing_source_files | source_in_reports.missing - - return (source_file_info, - main_report_positions, - files_to_compress, - changed_files, - missing_source_files) + return analyzer_result_file_reports def assemble_zip(inputs, zip_file, client): @@ -588,7 +396,19 @@ def assemble_zip(inputs, zip_file, client): contanining analysis related information into a zip file which will be sent to the server. """ - report_files = collect_report_files(inputs) + files_to_compress = set() + analyzer_result_file_paths = [] + + for dir_path, file_paths in report_file.analyzer_result_files(inputs): + analyzer_result_file_paths.extend(file_paths) + + metadata_file_path = os.path.join(dir_path, 'metadata.json') + if os.path.exists(metadata_file_path): + files_to_compress.add(metadata_file_path) + + skip_file_path = os.path.join(dir_path, 'skip_file') + if os.path.exists(skip_file_path): + files_to_compress.add(skip_file_path) LOG.debug("Processing report files ...") @@ -598,60 +418,47 @@ def assemble_zip(inputs, zip_file, client): # PYTHONPATH in parent CodeChecker before store is executed # are lost. if sys.platform == "win32": - (source_file_info, - main_report_positions, - files_to_compress, - changed_files, - missing_source_files) = parse_report_files(report_files) + analyzer_result_file_reports = parse_analyzer_result_files( + analyzer_result_file_paths) else: with ProcessPoolExecutor() as executor: - (source_file_info, - main_report_positions, - files_to_compress, - changed_files, - missing_source_files) = parse_report_files(report_files, - executor.map) + analyzer_result_file_reports = parse_analyzer_result_files( + analyzer_result_file_paths, executor.map) LOG.info("Processing report files done.") + changed_files = set() + file_paths = set() + file_report_positions: FileReportPositions = defaultdict(set) + for file_path, reports in analyzer_result_file_reports.items(): + files_to_compress.add(file_path) + + for report in reports: + if report.changed_files: + changed_files.update(report.changed_files) + continue + + file_paths.update(report.files) + file_report_positions[report.file.original_path].add(report.line) + if changed_files: - changed_files = '\n'.join([' - ' + f for f in changed_files]) - LOG.warning("The following source file contents changed since the " - "latest analysis:\n%s\nPlease analyze your project " - "again to update the reports!", changed_files) + reports_helper.dump_changed_files(changed_files) sys.exit(1) - hash_to_file = {} - # There can be files with same hash, - # but different path. - file_to_hash = {} - - for source_file, info in source_file_info.items(): - if bool(info): - file_to_hash[source_file] = info['hash'] - hash_to_file[info['hash']] = source_file + if not file_paths: + LOG.warning("There is no report to store. After uploading these " + "results the previous reports become resolved.") - LOG.info("Collecting review comments ...") - files_with_comment = \ - filter_source_files_with_comments(source_file_info, - main_report_positions) + hash_to_file: Dict[str, str] = {} - LOG.info("Collecting review comments done.") - file_hash_with_review_status = set() - for file_path in files_with_comment: - file_hash = file_to_hash.get(file_path) - if file_hash: - file_hash_with_review_status.add(file_hash) + # There can be files with same hash, but different path. + file_to_hash: Dict[str, str] = {} - for input_dir_path in inputs: - for root_dir_path, _, _ in os.walk(input_dir_path): - metadata_file_path = os.path.join(root_dir_path, 'metadata.json') - if os.path.exists(metadata_file_path): - files_to_compress.add(metadata_file_path) + for file_path in file_paths: + h = get_file_content_hash(file_path) - skip_file_path = os.path.join(root_dir_path, 'skip_file') - if os.path.exists(skip_file_path): - files_to_compress.add(skip_file_path) + file_to_hash[file_path] = h + hash_to_file[h] = file_path file_hashes = list(hash_to_file.keys()) @@ -660,29 +467,38 @@ def assemble_zip(inputs, zip_file, client): if file_hashes else [] LOG.info("Get missing file content hashes done.") - if not hash_to_file: - LOG.warning("There is no report to store. After uploading these " - "results the previous reports become resolved.") + LOG.info("Collecting review comments ...") + + # Get files which can be found on the server but contains source code + # comments and send these files to the server. + unnecessary_file_report_positions = { + k: v for (k, v) in file_report_positions.items() + if file_to_hash[k] not in necessary_hashes} + + files_with_comment = filter_source_files_with_comments( + unnecessary_file_report_positions) + + for file_path in files_with_comment: + necessary_hashes.append(file_to_hash[file_path]) + + LOG.info("Collecting review comments done.") LOG.debug("Building report zip file.") - with zipfile.ZipFile(zip_file, 'a', - allowZip64=True) as zipf: + with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zipf: # Add the files to the zip which will be sent to the server. - for ftc in files_to_compress: - _, filename = os.path.split(ftc) + for file_path in files_to_compress: + _, file_name = os.path.split(file_path) # Create a unique report directory name. - report_dir_name = \ - hashlib.md5(os.path.dirname(ftc).encode('utf-8')).hexdigest() - - zip_target = \ - os.path.join('reports', report_dir_name, filename) + report_dir_name = hashlib.md5(os.path.dirname( + file_path).encode('utf-8')).hexdigest() - zipf.write(ftc, zip_target) + zip_target = os.path.join('reports', report_dir_name, file_name) + zipf.write(file_path, zip_target) collected_file_paths = set() for f, h in file_to_hash.items(): - if h in necessary_hashes or h in file_hash_with_review_status: + if h in necessary_hashes: LOG.debug("File contents for '%s' needed by the server", f) file_path = os.path.join('root', f.lstrip('/')) @@ -710,22 +526,15 @@ def assemble_zip(inputs, zip_file, client): # Compressing .zip file with open(zip_file, 'rb') as source: - compressed = zlib.compress(source.read(), - zlib.Z_BEST_COMPRESSION) + compressed = zlib.compress(source.read(), zlib.Z_BEST_COMPRESSION) with open(zip_file, 'wb') as target: target.write(compressed) LOG.debug("[ZIP] Mass store zip written at '%s'", zip_file) - if missing_source_files: - LOG.warning("Missing source files: \n%s", '\n'.join( - [" - " + f_ for f_ in missing_source_files])) - - LOG.debug("Building report zip done.") - -def should_be_zipped(input_file, input_files): +def should_be_zipped(input_file: str, input_files: Iterable[str]) -> bool: """ Determine whether a given input file should be included in the zip. Compiler includes and target files should only be included if there is diff --git a/web/client/codechecker_client/cmd_line_client.py b/web/client/codechecker_client/cmd_line_client.py index b0ec01b60c..8253b781ac 100644 --- a/web/client/codechecker_client/cmd_line_client.py +++ b/web/client/codechecker_client/cmd_line_client.py @@ -13,28 +13,31 @@ from collections import defaultdict, namedtuple from datetime import datetime, timedelta import hashlib -import json import os -from operator import itemgetter import re import sys import shutil import time -from typing import Dict, Iterable, List, Tuple, Union - -from plist_to_html import PlistToHtml +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union from codechecker_api.codeCheckerDBAccess_v6 import constants, ttypes from codechecker_api_shared.ttypes import RequestFailed -from codechecker_common import logger, plist_parser, util +from codechecker_report_converter import twodim +from codechecker_report_converter.report import File, Report, report_file, \ + reports as reports_helper +from codechecker_report_converter.report.output import baseline, codeclimate, \ + gerrit, json as report_to_json, plaintext +from codechecker_report_converter.report.output.html import \ + html as report_to_html +from codechecker_report_converter.report.statistics import Statistics +from codechecker_report_converter.util import dump_json_output, \ + load_json_or_empty + +from codechecker_common import logger from codechecker_common.checker_labels import CheckerLabels -from codechecker_common.report import Report -from codechecker_common.output import twodim, gerrit, codeclimate, baseline -from codechecker_report_hash.hash import get_report_path_hash -from codechecker_web.shared import webserver_context -from codechecker_web.shared import convert +from codechecker_web.shared import convert, webserver_context from codechecker_client import report_type_converter from .client import login_user, setup_client @@ -78,7 +81,7 @@ def filter_local_file_remote_run( return local_dirs, baseline_files, run_names -def run_sort_type_str(value): +def run_sort_type_str(value: ttypes.RunSortType) -> Optional[str]: """ Converts the given run sort type to string. """ if value == ttypes.RunSortType.NAME: return 'name' @@ -92,7 +95,7 @@ def run_sort_type_str(value): return 'codechecker_version' -def run_sort_type_enum(value): +def run_sort_type_enum(value: str) -> Optional[ttypes.RunSortType]: """ Returns the given run sort type Thrift enum value. """ if value == 'name': return ttypes.RunSortType.NAME @@ -122,62 +125,7 @@ def get_diff_type(args) -> Union[ttypes.DiffType, None]: return None -def reports_to_html_report_data(reports: List[Report]) -> Dict: - """ - Converts reports from Report class from one plist file - to report data events for the HTML plist parser. - """ - file_sources = {} - report_data = [] - - for report in reports: - # Not all report in this list may refer to the same files - # thus we need to create a single file list with - # all files from all reports. - for file_index, file_path in report.files.items(): - if file_index not in file_sources: - try: - with open(file_path, 'r', encoding='utf-8', - errors='ignore') as source_data: - content = source_data.read() - except (OSError, IOError): - content = file_path + " NOT FOUND." - file_sources[file_index] = {'id': file_index, - 'path': file_path, - 'content': content} - - events = [] - for element in report.bug_path: - kind = element['kind'] - if kind == 'event': - events.append({'location': element['location'], - 'message': element['message']}) - - macros = [] - for macro in report.macro_expansions: - macros.append({'location': macro['location'], - 'expansion': macro['expansion'], - 'name': macro['name']}) - - notes = [] - for note in report.notes: - notes.append({'location': note['location'], - 'message': note['message']}) - - report_hash = report.main['issue_hash_content_of_line_in_context'] - report_data.append({ - 'events': events, - 'macros': macros, - 'notes': notes, - 'path': report.file_path, - 'reportHash': report_hash, - 'checkerName': report.main['check_name']}) - - return {'files': file_sources, - 'reports': report_data} - - -def get_run_tag(client, run_ids, tag_name): +def get_run_tag(client, run_ids: List[int], tag_name: str): """Return run tag information for the given tag name in the given runs.""" run_history_filter = ttypes.RunHistoryFilter() run_history_filter.tagNames = [tag_name] @@ -187,7 +135,7 @@ def get_run_tag(client, run_ids, tag_name): return run_histories[0] if run_histories else None -def process_run_args(client, run_args_with_tag): +def process_run_args(client, run_args_with_tag: Iterable[str]): """Process the argument and returns run ids and run tag ids. The elemnts inside the given run_args_with_tag list has the following @@ -227,72 +175,45 @@ def process_run_args(client, run_args_with_tag): def get_suppressed_reports(reports: List[Report], args: List[str]) -> List[str]: """Returns a list of suppressed report hashes.""" - return [rep.report_hash for rep in reports - if not rep.check_source_code_comments(args.review_status)] + return [report.report_hash for report in reports + if not report.check_source_code_comments(args.review_status)] -def get_report_dir_results(report_dirs: List[str], - args: List[str], - checker_labels: CheckerLabels) -> List[Report]: +def get_report_dir_results( + report_dirs: List[str], + args: List[str], + checker_labels: CheckerLabels +) -> List[Report]: """Get reports from the given report directories. Absolute paths are expected to the given report directories. """ all_reports = [] - processed_path_hashes = set() - for report_dir in report_dirs: - for filename in os.listdir(report_dir): - if not filename.endswith(".plist"): - continue - - file_path = os.path.join(report_dir, filename) - LOG.debug("Parsing: %s", file_path) - _, reports = plist_parser.parse_plist_file(file_path) - LOG.debug("Parsing: %s done %s", file_path, len(reports)) - for report in reports: - LOG.debug("get report hash") - path_hash = get_report_path_hash(report) - if path_hash in processed_path_hashes: - LOG.debug("Not showing report because it is a " - "deduplication of an already processed " - "report!") - LOG.debug("Path hash: %s", path_hash) - LOG.debug(report) - continue - - if skip_report_dir_result(report, args, checker_labels): - continue - - processed_path_hashes.add(path_hash) - all_reports.append(report) - return all_reports + processed_path_hashes = set() + for _, file_paths in report_file.analyzer_result_files(report_dirs): + for file_path in file_paths: + # Get reports. + reports = report_file.get_reports(file_path, checker_labels) + # Skip duplicated reports. + reports = reports_helper.skip(reports, processed_path_hashes) -def print_stats(report_count, file_stats, severity_stats): - """Print summary of the report statistics.""" - print("\n----==== Summary ====----") - if file_stats: - vals = [[os.path.basename(k), v] for k, v in - list(dict(file_stats).items())] - vals.sort(key=itemgetter(0)) - keys = ['Filename', 'Report count'] - table = twodim.to_str('table', keys, vals, 1, True) - print(table) + # Skip reports based on filter arguments. + reports = [ + report for report in reports + if not skip_report_dir_result(report, args, checker_labels)] - if severity_stats: - vals = [[k, v] for k, v in list(dict(severity_stats).items())] - vals.sort(key=itemgetter(0)) - keys = ['Severity', 'Report count'] - table = twodim.to_str('table', keys, vals, 1, True) - print(table) + all_reports.extend(reports) - print("----=================----") - print("Total number of reports: {}".format(report_count)) - print("----=================----\n") + return all_reports -def skip_report_dir_result(report, args, checker_labels): +def skip_report_dir_result( + report: Report, + args: List[str], + checker_labels: CheckerLabels +) -> bool: """Returns True if the report should be skipped from the results. Skipping is done based on the given filter set. @@ -300,12 +221,12 @@ def skip_report_dir_result(report, args, checker_labels): f_severities, f_checkers, f_file_path, _, _, _ = check_filter_values(args) if f_severities: - severity_name = checker_labels.severity(report.main['check_name']) + severity_name = checker_labels.severity(report.checker_name) if severity_name.lower() not in list(map(str.lower, f_severities)): return True if f_checkers: - checker_name = report.main['check_name'] + checker_name = report.checker_name if not any([re.match(r'^' + c.replace("*", ".*") + '$', checker_name, re.IGNORECASE) for c in f_checkers]): @@ -313,12 +234,12 @@ def skip_report_dir_result(report, args, checker_labels): if f_file_path: if not any([re.match(r'^' + f.replace("*", ".*") + '$', - report.file_path, re.IGNORECASE) + report.file.path, re.IGNORECASE) for f in f_file_path]): return True if 'checker_msg' in args: - checker_msg = report.main['description'] + checker_msg = report.message if not any([re.match(r'^' + c.replace("*", ".*") + '$', checker_msg, re.IGNORECASE) for c in args.checker_msg]): @@ -817,29 +738,104 @@ def handle_diff_results(args): init_logger(args.verbose if 'verbose' in args else None, stream) + output_dir = args.export_dir if 'export_dir' in args else None if len(args.output_format) > 1 and ('export_dir' not in args): LOG.error("Export directory is required if multiple output formats " "are selected!") sys.exit(1) + if 'html' in args.output_format and not output_dir: + LOG.error("Argument --export not allowed without argument --output " + "when exporting to HTML.") + sys.exit(1) + if 'gerrit' in args.output_format and \ not gerrit.mandatory_env_var_is_set(): sys.exit(1) check_deprecated_arg_usage(args) + + if 'clean' in args and os.path.isdir(output_dir): + print("Previous analysis results in '{0}' have been removed, " + "overwriting with current results.".format(output_dir)) + shutil.rmtree(output_dir) + + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir) + context = webserver_context.get_context() - source_line_contents = {} + + file_cache: Dict[int, File] = {} + + def cached_report_file_lookup(file_id): + """ + Get source file data for the given file and caches it in a file cache + if file data is not found in the cache. Finally, it returns the source + file data from the cache. + """ + nonlocal file_cache + + if file_id not in file_cache: + source = client.getSourceFileData( + file_id, True, ttypes.Encoding.BASE64) + content = convert.from_b64(source.fileContent) + + file_cache[file_id] = File(source.filePath, file_id, content) + + return file_cache[file_id] + + def convert_report_data_to_report( + client, + reports_data: List[ttypes.ReportData] + ) -> List[Report]: + """ Convert the given report data list to local reports. """ + reports = [] + + if not reports_data: + return reports + + # Get source line contents from the server. + source_lines = defaultdict(set) + for report_data in reports_data: + source_lines[report_data.fileId].add(report_data.line) + + lines_in_files_requested = [] + for file_id in source_lines: + lines_in_files_requested.append( + ttypes.LinesInFilesRequested(fileId=file_id, + lines=source_lines[file_id])) + + source_line_contents = client.getLinesInSourceFileContents( + lines_in_files_requested, ttypes.Encoding.BASE64) + + # Convert reports data to reports. + for report_data in reports_data: + report = report_type_converter.to_report(report_data) + + # For HTML output we need to override the file and get content + # from the server. + if 'html' in args.output_format: + report.file = cached_report_file_lookup(report_data.fileId) + + report.changed_files = [] + report.source_code_comments = [] + report.source_line = \ + source_line_contents[report_data.fileId][report_data.line] + + # TODO: get details + reports.append(report) + + return reports def get_diff_local_dir_remote_run( client, report_dirs: List[str], baseline_files: List[str], remote_run_names: List[str] - ): + ) -> Tuple[List[Report], List[str], List[str]]: """ Compare a local report directory with a remote run. """ filtered_reports = [] - - filtered_report_hashes = set() + filtered_report_hashes: Set[str] = set() report_dir_results = get_report_dir_results( report_dirs, args, context.checker_labels) @@ -860,8 +856,8 @@ def get_diff_local_dir_remote_run( results = get_diff_base_results( client, args, run_ids, remote_hashes, suppressed_in_code) - for result in results: - filtered_reports.append(result) + filtered_reports.extend( + convert_report_data_to_report(client, results)) elif diff_type == ttypes.DiffType.UNRESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. @@ -886,7 +882,9 @@ def get_diff_local_dir_remote_run( for result in results: filtered_report_hashes.discard(result.bugHash) - filtered_reports.append(result) + + filtered_reports.extend( + convert_report_data_to_report(client, results)) elif diff_type == ttypes.DiffType.RESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. @@ -908,7 +906,7 @@ def get_diff_remote_run_local_dir( remote_run_names: List[str], report_dirs: List[str], baseline_files: List[str] - ): + ) -> Tuple[List[Report], List[str], List[str]]: """ Compares a remote run with a local report directory. """ filtered_reports = [] filtered_report_hashes = [] @@ -948,13 +946,16 @@ def get_diff_remote_run_local_dir( results = get_diff_base_results( client, args, run_ids, remote_hashes, suppressed_in_code) - for result in results: - filtered_reports.append(result) + filtered_reports.extend( + convert_report_data_to_report(client, results)) return filtered_reports, filtered_report_hashes, run_names - def get_diff_remote_runs(client, remote_base_run_names, - remote_new_run_names): + def get_diff_remote_runs( + client, + remote_base_run_names: Iterable[str], + remote_new_run_names: Iterable[str] + ) -> Tuple[List[Report], List[str], List[str]]: """ Compares two remote runs and returns the filtered results. """ @@ -984,16 +985,12 @@ def get_diff_remote_runs(client, remote_base_run_names, ttypes.SortType.FILENAME, ttypes.Order.ASC))] - all_results = get_run_results(client, - base_ids, - constants.MAX_QUERY_SIZE, - 0, - sort_mode, - report_filter, - cmp_data, - False) + all_results = get_run_results( + client, base_ids, constants.MAX_QUERY_SIZE, 0, sort_mode, + report_filter, cmp_data, False) - return all_results, base_run_names, new_run_names + reports = convert_report_data_to_report(client, all_results) + return reports, base_run_names, new_run_names def get_diff_local_dirs( report_dirs: List[str], @@ -1047,126 +1044,7 @@ def get_diff_local_dirs( return filtered_reports, filtered_report_hashes - def cached_report_file_lookup(file_cache, file_id): - """ - Get source file data for the given file and caches it in a file cache - if file data is not found in the cache. Finally, it returns the source - file data from the cache. - """ - if file_id not in file_cache: - source = client.getSourceFileData(file_id, True, - ttypes.Encoding.BASE64) - file_content = convert.from_b64(source.fileContent) - file_cache[file_id] = {'id': file_id, - 'path': source.filePath, - 'content': file_content} - - return file_cache[file_id] - - def get_report_data(client, reports, file_cache): - """ - Returns necessary report files and report data events for the HTML - plist parser. - """ - file_sources = {} - report_data = [] - - for report in reports: - file_sources[report.fileId] = cached_report_file_lookup( - file_cache, report.fileId) - - details = client.getReportDetails(report.reportId) - events = [] - for event in details.pathEvents: - file_sources[event.fileId] = cached_report_file_lookup( - file_cache, event.fileId) - - location = {'line': event.startLine, - 'col': event.startCol, - 'file': event.fileId} - - events.append({'location': location, - 'message': event.msg}) - - # Get extended data. - macros = [] - notes = [] - for extended_data in details.extendedData: - file_sources[extended_data.fileId] = cached_report_file_lookup( - file_cache, extended_data.fileId) - - location = {'line': extended_data.startLine, - 'col': extended_data.startCol, - 'file': extended_data.fileId} - - if extended_data.type == ttypes.ExtendedReportDataType.MACRO: - macros.append({'location': location, - 'expansion': event.msg}) - elif extended_data.type == ttypes.ExtendedReportDataType.NOTE: - notes.append({'location': location, - 'message': event.msg}) - - report_data.append({ - 'events': events, - 'macros': macros, - 'notes': notes, - 'path': report.checkedFile, - 'reportHash': report.bugHash, - 'checkerName': report.checkerId}) - - return {'files': file_sources, - 'reports': report_data} - - def report_to_html(client, reports, output_dir): - """ - Generate HTML output files for the given reports in the given output - directory by using the Plist To HTML builder. - """ - html_builder = PlistToHtml.HtmlBuilder( - context.path_plist_to_html_dist, - context.checker_labels) - file_stats = defaultdict(int) - severity_stats = defaultdict(int) - file_report_map = defaultdict(list) - for report in reports: - if isinstance(report, Report): - file_path = report.file_path - - check_name = report.main['check_name'] - sev = context.checker_labels.severity(check_name) - else: - file_path = report.checkedFile - sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] - - file_report_map[file_path].append(report) - file_stats[file_path] += 1 - severity_stats[sev] += 1 - - file_cache = {} - for file_path, file_reports in file_report_map.items(): - checked_file = file_path - filename = os.path.basename(checked_file) - h = int( - hashlib.md5( - file_path.encode('utf-8')).hexdigest(), - 16) % (10 ** 8) - - if isinstance(file_reports[0], Report): - report_data = reports_to_html_report_data(file_reports) - else: - report_data = get_report_data(client, file_reports, file_cache) - - output_path = os.path.join(output_dir, - filename + '_' + str(h) + '.html') - html_builder.create(output_path, report_data) - print('Html file was generated for file://{0}: file://{1}'.format( - checked_file, output_path)) - - html_builder.create_index_html(output_dir) - print_stats(len(reports), file_stats, severity_stats) - def print_reports( - client, reports: List[Report], report_hashes: Iterable[str], output_formats: List[str] @@ -1175,173 +1053,93 @@ def print_reports( LOG.info("Couldn't get local reports for the following baseline " "report hashes: %s", ', '.join(sorted(report_hashes))) - selected_output_format_num = len(output_formats) - - if 'json' in output_formats: - out = [] - for report in reports: - if isinstance(report, Report): - report = \ - report_type_converter.report_to_reportData( - report, context.checker_labels) - out.append(report) - else: - out.append(report) - - encoded_reports = CmdLineOutputEncoder().encode(out) - if output_dir: - report_json = os.path.join(output_dir, 'reports.json') - - with open(report_json, mode="w", encoding="utf-8", - errors="ignore") as reports_file: - reports_file.write(encoded_reports) - LOG.info('JSON report file was created: %s', - os.path.join(output_dir, 'report.json')) - - else: - print(encoded_reports) - - # Json was the only format specified. - if selected_output_format_num == 1: - return - - output_formats.remove('json') - - if 'html' in output_formats: - print("Generating HTML output files to file://{0} directory:\n" - .format(output_dir)) - - report_to_html(client, reports, output_dir) - - print('\nTo view the results in a browser run:\n' - ' $ firefox {0}\n'.format(os.path.join(output_dir, - 'index.html'))) - - # HTML was the only format specified. - if selected_output_format_num == 1: - return - - output_formats.remove('html') - - # Collect source line contents for the report type got from the server. - source_lines = defaultdict(set) - for report in reports: - if not isinstance(report, Report) and report.line is not None: - source_lines[report.fileId].add(report.line) - if client: - lines_in_files_requested = [] - for key in source_lines: - lines_in_files_requested.append( - ttypes.LinesInFilesRequested(fileId=key, - lines=source_lines[key])) - - source_line_contents.update(client.getLinesInSourceFileContents( - lines_in_files_requested, ttypes.Encoding.BASE64)) - - # Convert all the reports to the common Report - # type for printing and formatting to various formats. - converted_reports = [] - changed_files = set() + statistics = Statistics() + changed_files: Set[str] = set() + html_builder: Optional[report_to_html.HtmlBuilder] = None for report in reports: - if not isinstance(report, Report): - r = report_type_converter.reportData_to_report(report) - if source_line_contents: - r.source_line = convert.from_b64( - source_line_contents[report.fileId][report.line]) - converted_reports.append(r) - else: - if not os.path.exists(report.file_path): - changed_files.add(report.file_path) - continue - - report.source_line = util.get_line(report.file_path, - report.line) - - converted_reports.append(report) - - reports = converted_reports - - repo_dir = os.environ.get('CC_REPO_DIR') - if repo_dir: - for report in reports: - report.trim_path_prefixes([repo_dir]) - - if 'gerrit' in output_formats: - gerrit_reports = gerrit.convert(reports, context.checker_labels) - - # Gerrit was the only format specified. - if selected_output_format_num == 1 and not output_dir: - print(json.dumps(gerrit_reports)) - return - - gerrit_review_json = os.path.join(output_dir, - 'gerrit_review.json') - with open(gerrit_review_json, 'w') as review_file: - json.dump(gerrit_reports, review_file) - LOG.info("Gerrit review file was created: %s\n", - gerrit_review_json) + statistics.add_report(report) - output_formats.remove('gerrit') + if report.changed_files: + changed_files.update(report.changed_files) - if 'codeclimate' in output_formats: - cc_reports = codeclimate.convert(reports, context.checker_labels) - # Codelimate was the only format specified. - if selected_output_format_num == 1 and not output_dir: - print(json.dumps(cc_reports)) - return - - codeclimate_issues_json = os.path.join(output_dir, - 'codeclimate_issues.json') - with open(codeclimate_issues_json, 'w') as issues_f: - json.dump(cc_reports, issues_f) + repo_dir = os.environ.get('CC_REPO_DIR') + if repo_dir: + report.trim_path_prefixes([repo_dir]) - LOG.info("Code Climate file was created: %s\n", - codeclimate_issues_json) + for output_format in output_formats: + if output_format == 'plaintext': + file_report_map = plaintext.get_file_report_map(reports) + plaintext.convert(file_report_map) + + if output_format == 'html': + if not html_builder: + html_builder = report_to_html.HtmlBuilder( + context.path_plist_to_html_dist, + context.checker_labels) + + file_report_map = plaintext.get_file_report_map(reports) + + LOG.info("Generating HTML output files to file://%s " + "directory:", output_dir) + + for file_path in file_report_map: + file_name = os.path.basename(file_path) + h = int( + hashlib.md5( + file_path.encode('utf-8')).hexdigest(), + 16) % (10 ** 8) + + output_file_path = os.path.join( + output_dir, f"{file_name}_ {str(h)}.html") + html_builder.create(output_file_path, reports) + + if output_format in ['csv', 'rows', 'table']: + header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] + rows = [] + for report in reports: + if report.source_line is None: + continue + + checked_file = f"{report.file.path}:{str(report.line)}:" \ + f"{str(report.column)}" + rows.append(( + report.severity, + checked_file, + report.message, + report.checker_name, + report.source_line.rstrip())) - output_formats.remove('codeclimate') + print(twodim.to_str(output_format, header, rows)) - header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] - rows = [] + if output_format == 'json': + data = report_to_json.convert(reports) - file_stats = defaultdict(int) - severity_stats = defaultdict(int) + report_json = os.path.join(output_dir, 'reports.json') \ + if output_dir else None + dump_json_output(data, report_json) - for report in reports: - if report.source_line is None: - continue + if output_format == 'gerrit': + data = gerrit.convert(reports) - severity = context.checker_labels.severity(report.check_name) - file_name = report.file_path - checked_file = file_name \ - + ':' + str(report.line) + ":" + str(report.col) - check_msg = report.description + report_json = os.path.join( + output_dir, 'gerrit_review.json') if output_dir else None + dump_json_output(data, report_json) - rows.append((severity, - checked_file, - check_msg, - report.check_name, - report.source_line)) + if output_format == 'codeclimate': + data = codeclimate.convert(reports) - severity_stats[severity] += 1 - file_stats[file_name] += 1 + report_json = os.path.join( + output_dir, 'codeclimate_issues.json') \ + if output_dir else None + dump_json_output(data, report_json) - for output_format in output_formats: - if output_format == 'plaintext': - for row in rows: - print("[{0}] {1}: {2} [{3}]\n{4}\n".format( - row[0], row[1], row[2], row[3], row[4])) - else: - print(twodim.to_str(output_format, header, rows)) + if 'html' in output_formats: + html_builder.finish(output_dir, statistics) - print_stats(len(reports), file_stats, severity_stats) + if 'plaintext' in output_formats: + statistics.write() - if changed_files: - changed_f = '\n'.join([' - ' + f for f in changed_files]) - LOG.warning("The following source file contents changed since the " - "latest analysis:\n%s\nMultiple reports were not " - "shown and skipped from the statistics. Please " - "analyze your project again to update the " - "reports!", changed_f) + reports_helper.dump_changed_files(changed_files) basename_local_dirs, basename_baseline_files, basename_run_names = \ filter_local_file_remote_run(args.base_run_names) @@ -1369,15 +1167,6 @@ def print_reports( if has_different_run_args: sys.exit(1) - output_dir = args.export_dir if 'export_dir' in args else None - if 'clean' in args and os.path.isdir(output_dir): - print("Previous analysis results in '{0}' have been removed, " - "overwriting with current results.".format(output_dir)) - shutil.rmtree(output_dir) - - if output_dir and not os.path.exists(output_dir): - os.makedirs(output_dir) - if basename_local_dirs: LOG.info("Matching local report directories (--baseline): %s", ', '.join(basename_local_dirs)) @@ -1419,7 +1208,7 @@ def print_reports( basename_local_dirs, basename_baseline_files, newname_local_dirs, newname_baseline_files) - print_reports(client, reports, report_hashes, args.output_format) + print_reports(reports, report_hashes, args.output_format) LOG.info("Compared the following local files / directories: %s and %s", ', '.join([*basename_local_dirs, *basename_baseline_files]), ', '.join([*newname_local_dirs, *newname_baseline_files])) @@ -1429,7 +1218,7 @@ def print_reports( client, basename_run_names, newname_local_dirs, newname_baseline_files) - print_reports(client, reports, report_hashes, args.output_format) + print_reports(reports, report_hashes, args.output_format) LOG.info("Compared remote run(s) %s (matching: %s) and local files / " "report directory(s) %s", ', '.join(basename_run_names), @@ -1441,7 +1230,7 @@ def print_reports( client, basename_local_dirs, basename_baseline_files, newname_run_names) - print_reports(client, reports, report_hashes, args.output_format) + print_reports(reports, report_hashes, args.output_format) LOG.info("Compared local files / report directory(s) %s and remote " "run(s) %s (matching: %s).", ', '.join([*basename_local_dirs, *basename_baseline_files]), @@ -1450,7 +1239,7 @@ def print_reports( else: reports, matching_base_run_names, matching_new_run_names = \ get_diff_remote_runs(client, basename_run_names, newname_run_names) - print_reports(client, reports, None, args.output_format) + print_reports(reports, None, args.output_format) LOG.info("Compared multiple remote runs %s (matching: %s) and %s " "(matching: %s)", ', '.join(basename_run_names), @@ -1755,7 +1544,7 @@ def handle_import(args): client = setup_client(args.product_url) - data = util.load_json_or_empty(args.input, default=None) + data = load_json_or_empty(args.input, default=None) if not data: LOG.error("Failed to import data!") sys.exit(1) diff --git a/web/client/codechecker_client/credential_manager.py b/web/client/codechecker_client/credential_manager.py index 692d427248..37b7da904c 100644 --- a/web/client/codechecker_client/credential_manager.py +++ b/web/client/codechecker_client/credential_manager.py @@ -17,8 +17,9 @@ import portalocker +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty from codechecker_web.shared.env import check_file_owner_rw, get_password_file,\ get_session_file diff --git a/web/client/codechecker_client/metadata.py b/web/client/codechecker_client/metadata.py index 46f2125c21..d2d15d6318 100644 --- a/web/client/codechecker_client/metadata.py +++ b/web/client/codechecker_client/metadata.py @@ -9,8 +9,9 @@ Helpers to manage metadata.json file. """ +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty LOG = get_logger('system') diff --git a/web/client/codechecker_client/product_client.py b/web/client/codechecker_client/product_client.py index 57d279d6cb..725cd6c372 100644 --- a/web/client/codechecker_client/product_client.py +++ b/web/client/codechecker_client/product_client.py @@ -15,8 +15,9 @@ from codechecker_api.ProductManagement_v6.ttypes import DatabaseConnection, \ ProductConfiguration +from codechecker_report_converter import twodim + from codechecker_common import logger -from codechecker_common.output import twodim from codechecker_web.shared import database_status, convert diff --git a/web/client/codechecker_client/report_type_converter.py b/web/client/codechecker_client/report_type_converter.py index 85036e84a5..694c5ccb71 100644 --- a/web/client/codechecker_client/report_type_converter.py +++ b/web/client/codechecker_client/report_type_converter.py @@ -6,53 +6,41 @@ # # ------------------------------------------------------------------------- -"""Convert between the codechecker_common.Report type and -the thrift ReportData type.""" +""" Convert between Report type and thrift ReportData type. """ -from codechecker_common.checker_labels import CheckerLabels -from codechecker_common.report import Report from codechecker_api.codeCheckerDBAccess_v6.ttypes import ReportData, Severity +from codechecker_report_converter.report import File, Report -def reportData_to_report(report_data: ReportData) -> Report: - """Create a report object from the given thrift report data.""" - main = { - "check_name": report_data.checkerId, - "description": report_data.checkerMsg, - "issue_hash_content_of_line_in_context": report_data.bugHash, - "location": { - "line": report_data.line, - "col": report_data.column, - "file": 0, - }, - } - bug_path = None - files = {0: report_data.checkedFile} - # TODO Can not reconstruct because only the analyzer name was stored - # it should be a analyzer_name analyzer_version - return Report(main, bug_path, files, metadata=None) - - -def report_to_reportData(report: Report, - checker_labels: CheckerLabels) -> ReportData: - """Convert a Report object to a Thrift ReportData type.""" - events = [i for i in report.bug_path if i.get("kind") == "event"] - - report_hash = report.main["issue_hash_content_of_line_in_context"] - checker_name = report.main["check_name"] - - severity = None - if checker_labels: - severity_name = checker_labels.severity(checker_name) - severity = Severity._NAMES_TO_VALUES[severity_name] +def to_report(report: ReportData) -> Report: + """ Create a Report object from the given thrift report data. """ + severity = Severity._VALUES_TO_NAMES[report.severity] \ + if report.severity else 'UNSPECIFIED' + + return Report( + File(report.checkedFile), + report.line, + report.column, + report.checkerMsg, + report.checkerId, + severity, + report.bugHash, + report.analyzerName) + + +def to_report_data( + report: Report +) -> ReportData: + """ Convert a Report object to a Thrift ReportData type. """ + severity = Severity._NAMES_TO_VALUES[report.severity or 'UNSPECIFIED'] return ReportData( - checkerId=checker_name, - bugHash=report_hash, - checkedFile=report.file_path, - checkerMsg=report.main["description"], - line=report.main["location"]["line"], - column=report.main["location"]["col"], + checkerId=report.checker_name, + bugHash=report.report_hash, + checkedFile=report.file.path, + checkerMsg=report.message, + line=report.line, + column=report.column, severity=severity, - bugPathLength=len(events), - ) + analyzerName=report.analyzer_name, + bugPathLength=len(report.bug_path_events)) diff --git a/web/client/codechecker_client/source_component_client.py b/web/client/codechecker_client/source_component_client.py index a7c37efbbb..71f360da1e 100644 --- a/web/client/codechecker_client/source_component_client.py +++ b/web/client/codechecker_client/source_component_client.py @@ -12,8 +12,9 @@ import sys +from codechecker_report_converter import twodim + from codechecker_common import logger -from codechecker_common.output import twodim from codechecker_web.shared.env import get_user_input from .client import setup_client diff --git a/web/client/codechecker_client/suppress_file_handler.py b/web/client/codechecker_client/suppress_file_handler.py index 89d1fcca5d..9133fc039d 100644 --- a/web/client/codechecker_client/suppress_file_handler.py +++ b/web/client/codechecker_client/suppress_file_handler.py @@ -25,7 +25,7 @@ import re from codechecker_common.logger import get_logger -from codechecker_common.source_code_comment_handler import \ +from codechecker_report_converter.source_code_comment_handler import \ SourceCodeCommentHandler LOG = get_logger('system') diff --git a/web/client/codechecker_client/token_client.py b/web/client/codechecker_client/token_client.py index cc64e327af..dcc8f8cfb3 100644 --- a/web/client/codechecker_client/token_client.py +++ b/web/client/codechecker_client/token_client.py @@ -9,8 +9,9 @@ Argument handlers for the 'CodeChecker cmd token' subcommands. """ +from codechecker_report_converter import twodim + from codechecker_common import logger -from codechecker_common.output import twodim from .client import init_auth_client from .cmd_line import CmdLineOutputEncoder diff --git a/web/client/tests/unit/__init__.py b/web/client/tests/unit/__init__.py index 6d34ff8f01..5ded218171 100644 --- a/web/client/tests/unit/__init__.py +++ b/web/client/tests/unit/__init__.py @@ -9,7 +9,6 @@ Setup python modules for the unit tests. """ -import json import os import sys @@ -21,3 +20,4 @@ sys.path.append(REPO_ROOT) sys.path.append(os.path.join(REPO_ROOT, 'web')) +sys.path.append(os.path.join(REPO_ROOT, 'tools', 'report-converter')) diff --git a/web/client/tests/unit/test_report_converter.py b/web/client/tests/unit/test_report_converter.py index 61f2589fb6..8ca9bfc207 100644 --- a/web/client/tests/unit/test_report_converter.py +++ b/web/client/tests/unit/test_report_converter.py @@ -12,7 +12,8 @@ import unittest -from codechecker_common import report +from codechecker_report_converter.report import File, Report + from codechecker_client import report_type_converter from codechecker_api.codeCheckerDBAccess_v6 import ttypes @@ -21,75 +22,48 @@ class ReportTypeConverterTest(unittest.TestCase): """Type conversion tests.""" def test_Report_to_ReportData(self): - """Report to reportData conversion.""" - check_name = "checker.name" - report_hash = "2343we23" - source_file = "main.cpp" - description = "some checker message" - line = 10 - column = 8 - - main = { - "description": description, - "check_name": check_name, - "issue_hash_content_of_line_in_context": report_hash, - "location": {"line": line, "col": column, "file": 0}, - } - - rep = report.Report(main=main, - bugpath=[], - files={0: source_file}, - metadata=None) - - class CheckerLabels: - def severity(self, checker): - if checker == check_name: - return 'LOW' - - # This assertion warns when a new test-case in the future - # intends to query the severity of another checker. The - # original behavior of this function is to return the - # defult 'UNSPECIFIED' value by defult when the severity is - # not provided in the config file. - assert False, \ - 'Currently no test-case quieries other labels for ' \ - 'other checkers.' + """ Report to reportData conversion. """ + checker_name = "checker.name" + report = Report( + file=File("main.cpp"), + line=10, + column=8, + message="some checker message", + checker_name=checker_name, + report_hash="2343we23", + analyzer_name="dummy.analyzer", + severity="LOW" + ) - checker_labels = CheckerLabels() - rep_data = report_type_converter.report_to_reportData( - rep, checker_labels) + rep_data = report_type_converter.to_report_data(report) - self.assertEqual(rep_data.checkerId, rep.check_name) - self.assertEqual(rep_data.bugHash, rep.report_hash) - self.assertEqual(rep_data.checkedFile, rep.file_path) - self.assertEqual(rep_data.line, rep.line) - self.assertEqual(rep_data.column, rep.col) + self.assertEqual(rep_data.checkerId, report.checker_name) + self.assertEqual(rep_data.bugHash, report.report_hash) + self.assertEqual(rep_data.checkedFile, report.file.path) + self.assertEqual(rep_data.line, report.line) + self.assertEqual(rep_data.column, report.column) + self.assertEqual(rep_data.analyzerName, report.analyzer_name) self.assertEqual(rep_data.severity, ttypes.Severity.LOW) def test_ReportData_to_Report(self): - """ReportData to Report conversion.""" - check_name = "checker.name" - report_hash = "2343we23" - source_file = "main.cpp" - description = "some checker message" - line = 10 - column = 8 - + """ ReportData to Report conversion. """ rep_data = ttypes.ReportData( - checkerId=check_name, - bugHash=report_hash, - checkedFile=source_file, - checkerMsg=description, - line=line, - column=column, - severity="LOW", + checkerId="checker.name", + bugHash="2343we23", + checkedFile="main.cpp", + checkerMsg="some checker message", + line=10, + column=8, + severity=ttypes.Severity.LOW, + analyzerName="dummy.analyzer", bugPathLength=5, ) - rep = report_type_converter.reportData_to_report(rep_data) - self.assertEqual(rep.check_name, rep_data.checkerId) - self.assertEqual(rep.report_hash, rep_data.bugHash) - self.assertEqual(rep.file_path, rep_data.checkedFile) - self.assertEqual(rep.description, rep_data.checkerMsg) - self.assertEqual(rep.line, rep_data.line) - self.assertEqual(rep.col, rep_data.column) + report = report_type_converter.to_report(rep_data) + self.assertEqual(report.checker_name, rep_data.checkerId) + self.assertEqual(report.report_hash, rep_data.bugHash) + self.assertEqual(report.file.path, rep_data.checkedFile) + self.assertEqual(report.message, rep_data.checkerMsg) + self.assertEqual(report.line, rep_data.line) + self.assertEqual(report.column, rep_data.column) + self.assertEqual(report.analyzer_name, rep_data.analyzerName) diff --git a/web/codechecker_web/cmd/web_version.py b/web/codechecker_web/cmd/web_version.py index e81b6bde94..18651e95dc 100644 --- a/web/codechecker_web/cmd/web_version.py +++ b/web/codechecker_web/cmd/web_version.py @@ -13,8 +13,10 @@ import argparse import json +from codechecker_report_converter import twodim + from codechecker_common import logger -from codechecker_common.output import USER_FORMATS, twodim +from codechecker_common.output import USER_FORMATS from codechecker_web.shared import webserver_context, version diff --git a/web/codechecker_web/shared/webserver_context.py b/web/codechecker_web/shared/webserver_context.py index 893e574dee..f9a09edf87 100644 --- a/web/codechecker_web/shared/webserver_context.py +++ b/web/codechecker_web/shared/webserver_context.py @@ -15,10 +15,11 @@ import re import sys +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common import logger from codechecker_common.checker_labels import CheckerLabels from codechecker_common.singleton import Singleton -from codechecker_common.util import load_json_or_empty LOG = logger.get_logger('system') @@ -176,7 +177,8 @@ def git_commit_urls(self): @property def path_plist_to_html_dist(self): - return os.path.join(self._lib_dir_path, 'plist_to_html', 'static') + return os.path.join(self._lib_dir_path, 'codechecker_report_converter', + 'report', 'output', 'html', 'static') @property def path_env_extra(self): diff --git a/web/server/codechecker_server/api/mass_store_run.py b/web/server/codechecker_server/api/mass_store_run.py index a943877180..13d56bea0a 100644 --- a/web/server/codechecker_server/api/mass_store_run.py +++ b/web/server/codechecker_server/api/mass_store_run.py @@ -19,26 +19,27 @@ from datetime import datetime from hashlib import sha256 from tempfile import TemporaryDirectory -from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple import codechecker_api_shared from codechecker_api.codeCheckerDBAccess_v6 import ttypes -from codechecker_common import plist_parser, skiplist_handler, util +from codechecker_common import skiplist_handler, util from codechecker_common.logger import get_logger -from codechecker_common.source_code_comment_handler import \ - SourceCodeCommentHandler, SpellException, contains_codechecker_comment -from codechecker_report_hash.hash import get_report_path_hash +from codechecker_report_converter import util +from codechecker_report_converter.report import report_file, Report +from codechecker_report_converter.report.hash import get_report_path_hash +from codechecker_report_converter.source_code_comment_handler import \ + SourceCodeCommentHandler, SpellException, contains_codechecker_comment from ..database import db_cleanup from ..database.config_db_model import Product from ..database.database import DBSession from ..database.run_db_model import AnalysisInfo, AnalyzerStatistic, \ BugPathEvent, BugReportPoint, ExtendedReportData, File, FileContent, \ - Report, Run, RunHistory, RunLock -from ..metadata import checker_is_unavailable, get_analyzer_name, \ - MetadataInfoParser + Report as DBReport, Run, RunHistory, RunLock +from ..metadata import checker_is_unavailable, MetadataInfoParser from .report_server import ThriftRequestHandler from .thrift_enum_helper import report_extended_data_type_str @@ -49,14 +50,6 @@ # FIXME: when these types are introduced we need to use those. SourceLineComments = List[Any] -ReportType = Any -MainSection = Dict - - -class PathEvents(NamedTuple): - paths: List[ttypes.BugPathPos] - events: List[ttypes.BugPathEvent] - extended_data: List[ttypes.ExtendedReportData] def unzip(b64zip: str, output_dir: str) -> int: @@ -112,141 +105,6 @@ def parse_codechecker_review_comment( return src_comment_data -def collect_paths_events( - report: ReportType, - file_ids: Dict[str, int], - files: Dict[str, str] -) -> PathEvents: - """ - This function creates the BugPathPos and BugPathEvent objects which belong - to a report. - - report -- A report object from the parsed plist file. - file_ids -- A dictionary which maps the file paths to file IDs in the - database. - files -- A list containing the file paths from the parsed plist file. The - order of this list must be the same as in the plist file. - - #TODO Multiple ranges could belong to an event or control node. - Only the first range from the list of ranges is stored into the - database. Further improvement can be to store and view all ranges - if there are more than one. - """ - path_events = PathEvents([], [], []) - - events = [i for i in report.bug_path if i.get('kind') == 'event'] - - # Create remaining data for bugs and send them to the server. In plist - # file the source and target of the arrows are provided as starting and - # ending ranges of the arrow. The path A->B->C is given as A->B and - # B->C, thus range B is provided twice. So in the loop only target - # points of the arrows are stored, and an extra insertion is done for - # the source of the first arrow before the loop. - report_path = [i for i in report.bug_path if i.get('kind') == 'control'] - - if report_path: - start_range = report_path[0]['edges'][0]['start'] - start1_line = start_range[0]['line'] - start1_col = start_range[0]['col'] - start2_line = start_range[1]['line'] - start2_col = start_range[1]['col'] - source_file_path = files[start_range[1]['file']] - path_events.paths.append(ttypes.BugPathPos( - start1_line, - start1_col, - start2_line, - start2_col, - file_ids[source_file_path])) - - for path in report_path: - try: - end_range = path['edges'][0]['end'] - end1_line = end_range[0]['line'] - end1_col = end_range[0]['col'] - end2_line = end_range[1]['line'] - end2_col = end_range[1]['col'] - source_file_path = files[end_range[1]['file']] - path_events.paths.append(ttypes.BugPathPos( - end1_line, - end1_col, - end2_line, - end2_col, - file_ids[source_file_path])) - except IndexError: - # Edges might be empty nothing can be stored. - continue - - for event in events: - file_path = files[event['location']['file']] - - start_loc = event['location'] - end_loc = event['location'] - # Range can provide more precise location information. - # Use that if available. - ranges = event.get("ranges") - if ranges: - start_loc = ranges[0][0] - end_loc = ranges[0][1] - - path_events.events.append(ttypes.BugPathEvent( - start_loc['line'], - start_loc['col'], - end_loc['line'], - end_loc['col'], - event['message'], - file_ids[file_path])) - - for macro in report.macro_expansions: - if not macro['expansion']: - continue - - file_path = files[macro['location']['file']] - - start_loc = macro['location'] - end_loc = macro['location'] - # Range can provide more precise location information. - # Use that if available. - ranges = macro.get("ranges") - if ranges: - start_loc = ranges[0][0] - end_loc = ranges[0][1] - - path_events.extended_data.append(ttypes.ExtendedReportData( - ttypes.ExtendedReportDataType.MACRO, - start_loc['line'], - start_loc['col'], - end_loc['line'], - end_loc['col'], - macro['expansion'], - file_ids[file_path])) - - for note in report.notes: - if not note['message']: - continue - - file_path = files[note['location']['file']] - - start_loc = note['location'] - end_loc = note['location'] - # Range can provide more precise location information. - # Use that if available. - ranges = note.get("ranges") - if ranges: - start_loc = ranges[0][0] - end_loc = ranges[0][1] - - path_events.extended_data.append(ttypes.ExtendedReportData( - ttypes.ExtendedReportDataType.NOTE, - start_loc['line'], - start_loc['col'], - end_loc['line'], - end_loc['col'], - note['message'], - file_ids[file_path])) - - return path_events - - def add_file_record( session: DBSession, file_path: str, @@ -799,72 +657,71 @@ def __add_report( self, session: DBSession, run_id: int, - file_id: int, - main_section: MainSection, - path_events: PathEvents, + report: Report, + file_path_to_id: Dict[str, int], detection_status: str, detection_time: datetime, analysis_info: AnalysisInfo, analyzer_name: Optional[str] = None ) -> int: """ Add report to the database. """ - def store_bug_events(report_id: int): - """ Add bug path events. """ - for i, event in enumerate(path_events.events): - bpe = BugPathEvent( - event.startLine, event.startCol, event.endLine, - event.endCol, i, event.msg, event.fileId, report_id) - session.add(bpe) - - def store_bug_path(report_id: int): - """ Add bug path points. """ - for i, piece in enumerate(path_events.paths): - brp = BugReportPoint( - piece.startLine, piece.startCol, piece.endLine, - piece.endCol, i, piece.fileId, report_id) - session.add(brp) - - def store_extended_bug_data(report_id: int): - """ Add extended bug data objects to the database session. """ - for data in path_events.extended_data: - data_type = report_extended_data_type_str(data.type) - red = ExtendedReportData( - data.startLine, data.startCol, data.endLine, data.endCol, - data.message, data.fileId, report_id, data_type) - session.add(red) - try: - checker_name = main_section['check_name'] + checker_name = report.checker_name severity_name = \ self.__context.checker_labels.severity(checker_name) severity = ttypes.Severity._NAMES_TO_VALUES[severity_name] - report = Report( - run_id, main_section['issue_hash_content_of_line_in_context'], - file_id, main_section['description'], - checker_name or 'NOT FOUND', - main_section['category'], main_section['type'], - main_section['location']['line'], - main_section['location']['col'], + + db_report = DBReport( + run_id, report.report_hash, file_path_to_id[report.file.path], + report.message, checker_name or 'NOT FOUND', + report.category, report.type, report.line, report.column, severity, detection_status, detection_time, - len(path_events.events), analyzer_name) + len(report.bug_path_events), analyzer_name) - session.add(report) + session.add(db_report) session.flush() - LOG.debug("storing bug path") - store_bug_path(report.id) - - LOG.debug("storing events") - store_bug_events(report.id) - - LOG.debug("storing extended report data") - store_extended_bug_data(report.id) + LOG.debug("Storing bug path positions.") + for i, p in enumerate(report.bug_path_positions): + session.add(BugReportPoint( + p.range.start_line, p.range.start_col, + p.range.end_line, p.range.end_col, + i, file_path_to_id[p.file.path], db_report.id)) + + LOG.debug("Storing bug path events.") + for i, event in enumerate(report.bug_path_events): + session.add(BugPathEvent( + event.range.start_line, event.range.start_col, + event.range.end_line, event.range.end_col, + i, event.message, file_path_to_id[event.file.path], + db_report.id)) + + LOG.debug("Storing notes.") + for note in report.notes: + data_type = report_extended_data_type_str( + ttypes.ExtendedReportDataType.NOTE) + + session.add(ExtendedReportData( + note.range.start_line, note.range.start_col, + note.range.end_line, note.range.end_col, + note.message, file_path_to_id[note.file.path], + db_report.id, data_type)) + + LOG.debug("Storing macro expansions.") + for macro in report.macro_expansions: + data_type = report_extended_data_type_str( + ttypes.ExtendedReportDataType.MACRO) + + session.add(ExtendedReportData( + macro.range.start_line, macro.range.start_col, + macro.range.end_line, macro.range.end_col, + macro.message, file_path_to_id[macro.file.path], + db_report.id, data_type)) if analysis_info: - report.analysis_info.append(analysis_info) - - return report.id + db_report.analysis_info.append(analysis_info) + return db_report.id except Exception as ex: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, @@ -884,48 +741,22 @@ def __process_report_file( """ Process and save reports from the given report file to the database. """ - try: - files, reports = plist_parser.parse_plist_file(report_file_path) - except Exception as ex: - LOG.warning('Parsing the plist failed: %s', str(ex)) - return False + reports = report_file.get_reports(report_file_path) if not reports: return True - trimmed_files = {} - file_ids = {} - missing_ids_for_files = [] - - for k, v in files.items(): - trimmed_files[k] = \ - util.trim_path_prefixes(v, self.__trim_path_prefixes) - - for file_name in trimmed_files.values(): - file_id = file_path_to_id.get(file_name, -1) - if file_id == -1: - missing_ids_for_files.append(file_name) - continue - - file_ids[file_name] = file_id - - if missing_ids_for_files: - LOG.warning("Failed to get file path id for '%s'!", - ' '.join(missing_ids_for_files)) - return False - - def set_review_status(report: ReportType): + def set_review_status(report: Report): """ Set review status for the given report if there is any source code comment. """ - checker_name = report.main['check_name'] - last_report_event = report.bug_path[-1] + checker_name = report.checker_name + last_report_event = report.bug_path_events[-1] - # The original file path is needed here not the trimmed - # because the source files are extracted as the original - # file path. - file_name = files[last_report_event['location']['file']] + # The original file path is needed here, not the trimmed, because + # the source files are extracted as the original file path. + file_name = report.file.original_path source_file_name = os.path.realpath( os.path.join(source_root, file_name.strip("/"))) @@ -934,14 +765,14 @@ def set_review_status(report: ReportType): if not os.path.isfile(source_file_name): return - report_line = last_report_event['location']['line'] + report_line = last_report_event.range.end_line source_file = os.path.basename(file_name) src_comment_data = parse_codechecker_review_comment( source_file_name, report_line, checker_name) if len(src_comment_data) == 1: - status = src_comment_data[0]['status'] + status = src_comment_data[0].status rw_status = ttypes.ReviewStatus.FALSE_POSITIVE if status == 'confirmed': rw_status = ttypes.ReviewStatus.CONFIRMED @@ -950,7 +781,7 @@ def set_review_status(report: ReportType): self.__report_server._setReviewStatus( session, report.report_hash, rw_status, - src_comment_data[0]['message'], run_history_time) + src_comment_data[0].message, run_history_time) elif len(src_comment_data) > 1: LOG.warning( "Multiple source code comment can be found " @@ -966,9 +797,9 @@ def set_review_status(report: ReportType): analysis_info = self.__analysis_info.get(root_dir_path) for report in reports: - self.__all_report_checkers.add(report.check_name) + self.__all_report_checkers.add(report.checker_name) - if skip_handler.should_skip(report.file_path): + if skip_handler.should_skip(report.file.path): continue report.trim_path_prefixes(self.__trim_path_prefixes) @@ -980,29 +811,24 @@ def set_review_status(report: ReportType): LOG.debug("Storing report to the database...") - bug_id = report.report_hash - detection_status = 'new' detected_at = run_history_time - if bug_id in hash_map_reports: - old_report = hash_map_reports[bug_id][0] + if report.report_hash in hash_map_reports: + old_report = hash_map_reports[report.report_hash][0] old_status = old_report.detection_status detection_status = 'reopened' \ if old_status == 'resolved' else 'unresolved' detected_at = old_report.detected_at - analyzer_name = get_analyzer_name( - report.check_name, mip.checker_to_analyzer, report.metadata) - - path_events = collect_paths_events(report, file_ids, trimmed_files) + analyzer_name = mip.checker_to_analyzer.get( + report.checker_name, report.analyzer_name) report_id = self.__add_report( - session, run_id, file_ids[report.file_path], report.main, - path_events, detection_status, detected_at, analysis_info, - analyzer_name) + session, run_id, report, file_path_to_id, + detection_status, detected_at, analysis_info, analyzer_name) - self.__new_report_hashes.add(bug_id) + self.__new_report_hashes.add(report.report_hash) self.__already_added_report_hashes.add(report_path_hash) set_review_status(report) @@ -1046,8 +872,8 @@ def get_skip_handler( self.__new_report_hashes = set() self.__all_report_checkers = set() - all_reports = session.query(Report) \ - .filter(Report.run_id == run_id) \ + all_reports = session.query(DBReport) \ + .filter(DBReport.run_id == run_id) \ .all() hash_map_reports = defaultdict(list) @@ -1068,7 +894,7 @@ def get_skip_handler( disabled_checkers.update(mip.disabled_checkers) for f in report_file_paths: - if not f.endswith('.plist'): + if not report_file.is_supported(f): continue LOG.debug("Parsing input file '%s'", f) diff --git a/web/server/codechecker_server/cmd/server.py b/web/server/codechecker_server/cmd/server.py index a2ba225391..9e3201a298 100644 --- a/web/server/codechecker_server/cmd/server.py +++ b/web/server/codechecker_server/cmd/server.py @@ -26,8 +26,9 @@ from codechecker_api_shared.ttypes import DBStatus +from codechecker_report_converter import twodim + from codechecker_common import arg, logger, util, cmd_config -from codechecker_common.output import twodim from codechecker_server import instance_manager, server from codechecker_server.database import database diff --git a/web/server/codechecker_server/instance_manager.py b/web/server/codechecker_server/instance_manager.py index b80ad8df93..a0fc51d79f 100644 --- a/web/server/codechecker_server/instance_manager.py +++ b/web/server/codechecker_server/instance_manager.py @@ -20,8 +20,9 @@ import portalocker +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty LOG = get_logger('system') diff --git a/web/server/codechecker_server/metadata.py b/web/server/codechecker_server/metadata.py index 0c2eacb157..1ce0b4a21d 100644 --- a/web/server/codechecker_server/metadata.py +++ b/web/server/codechecker_server/metadata.py @@ -12,9 +12,10 @@ from typing import Any, Dict, List, Optional, Set, Union import os +from codechecker_report_converter.util import load_json_or_empty from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty + LOG = get_logger('system') @@ -29,23 +30,6 @@ MetadataCheckers = Dict[str, Union[Dict[str, bool], List[str]]] -def get_analyzer_name( - checker_name: str, - checker_to_analyzer: Optional[CheckerToAnalyzer], - metadata: Dict[str, Any] -) -> Optional[str]: - """ Get analyzer name for the given checker name. """ - analyzer_name = checker_to_analyzer.get(checker_name) - if analyzer_name: - return analyzer_name - - if metadata: - return metadata.get("analyzer", {}).get("name") - - if checker_name.startswith('clang-diagnostic-'): - return 'clang-tidy' - - def checker_is_unavailable( checker_name: str, enabled_checkers: EnabledCheckers diff --git a/web/server/codechecker_server/session_manager.py b/web/server/codechecker_server/session_manager.py index 6b2d0f7205..e72adbdcfd 100644 --- a/web/server/codechecker_server/session_manager.py +++ b/web/server/codechecker_server/session_manager.py @@ -18,8 +18,9 @@ from datetime import datetime from typing import Optional +from codechecker_report_converter.util import load_json_or_empty + from codechecker_common.logger import get_logger -from codechecker_common.util import load_json_or_empty from codechecker_web.shared.env import check_file_owner_rw from codechecker_web.shared.version import SESSION_COOKIE_NAME as _SCN diff --git a/web/server/tests/unit/__init__.py b/web/server/tests/unit/__init__.py index 615aa29214..0cbef905f5 100644 --- a/web/server/tests/unit/__init__.py +++ b/web/server/tests/unit/__init__.py @@ -23,4 +23,4 @@ sys.path.append(os.path.join(REPO_ROOT, 'web', 'client')) sys.path.append(os.path.join(REPO_ROOT, 'web', 'server')) -sys.path.append(os.path.join(REPO_ROOT, 'tools', 'codechecker_report_hash')) +sys.path.append(os.path.join(REPO_ROOT, 'tools', 'report-converter')) diff --git a/web/server/tests/unit/test_collect_path_events.py b/web/server/tests/unit/test_collect_path_events.py deleted file mode 100644 index 6745530453..0000000000 --- a/web/server/tests/unit/test_collect_path_events.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- -""" Test Store handler features. """ - - -import os -import unittest - -from codechecker_api.codeCheckerDBAccess_v6 import ttypes - -from codechecker_common import plist_parser - -from codechecker_server.api.mass_store_run import collect_paths_events - - -class CollectPathEventsTest(unittest.TestCase): - """ - Test collecting path events. - """ - - @classmethod - def setup_class(cls): - # Already generated plist files for the tests. - cls.__this_dir = os.path.dirname(__file__) - cls.__plist_test_files = os.path.join( - cls.__this_dir, 'plist_test_files') - - def test_collect_path_events(self): - """ - Test path event collect before store. - """ - clang50_trunk_plist = os.path.join( - self.__plist_test_files, 'clang-5.0-trunk.plist') - files, reports = plist_parser.parse_plist_file(clang50_trunk_plist, - False) - self.assertEqual(len(reports), 3) - - # Generate dummy file_ids which should come from the database. - file_ids = {} - for i, file_name in files.items(): - file_ids[file_name] = i + 1 - - msg = "This test is prepared to handle 3 reports." - self.assertEqual(len(reports), 3, msg) - - report1_path = [ - ttypes.BugPathPos(startLine=19, filePath=None, endCol=7, - startCol=5, endLine=19, fileId=1), - ttypes.BugPathPos(startLine=20, filePath=None, endCol=7, - startCol=5, endLine=20, fileId=1), - ttypes.BugPathPos(startLine=21, filePath=None, endCol=13, - startCol=5, endLine=21, fileId=1), - ttypes.BugPathPos(startLine=7, filePath=None, endCol=7, - startCol=5, endLine=7, fileId=1), - ttypes.BugPathPos(startLine=8, filePath=None, endCol=6, - startCol=5, endLine=8, fileId=1), - ttypes.BugPathPos(startLine=8, filePath=None, endCol=25, - startCol=22, endLine=8, fileId=1), - ttypes.BugPathPos(startLine=8, filePath=None, endCol=20, - startCol=10, endLine=8, fileId=1), - ttypes.BugPathPos(startLine=7, filePath=None, endCol=14, - startCol=14, endLine=7, fileId=2) - ] - report1_events = [ - ttypes.BugPathEvent(startLine=20, filePath=None, endCol=12, - startCol=5, msg="'base' initialized to 0", - endLine=20, fileId=1), - ttypes.BugPathEvent(startLine=21, filePath=None, endCol=18, - startCol=15, - msg="Passing the value 0 via " - "1st parameter 'base'", - endLine=21, fileId=1), - ttypes.BugPathEvent(startLine=21, filePath=None, endCol=19, - startCol=5, msg="Calling 'test_func'", - endLine=21, fileId=1), - ttypes.BugPathEvent(startLine=6, filePath=None, endCol=1, - startCol=1, msg="Entered call from 'main'", - endLine=6, fileId=1), - ttypes.BugPathEvent(startLine=8, filePath=None, endCol=25, - startCol=22, - msg="Passing the value 0 via " - "1st parameter 'num'", endLine=8, fileId=1), - ttypes.BugPathEvent(startLine=8, filePath=None, endCol=26, - startCol=10, msg="Calling 'generate_id'", - endLine=8, fileId=1), - ttypes.BugPathEvent(startLine=6, filePath=None, endCol=1, - startCol=1, - msg="Entered call from 'test_func'", - endLine=6, fileId=2), - ttypes.BugPathEvent(startLine=7, filePath=None, endCol=17, - startCol=12, msg='Division by zero', - endLine=7, fileId=2) - ] - - path1, events1, _ = collect_paths_events(reports[0], file_ids, files) - - self.assertEqual(path1, report1_path) - self.assertEqual(events1, report1_events) - - report2_path = [] - report2_events = [ - ttypes.BugPathEvent(startLine=8, filePath=None, endCol=26, - startCol=10, - msg="Value stored to 'id' is ""never read", - endLine=8, fileId=1) - ] - - path2, events2, _ = collect_paths_events(reports[1], file_ids, files) - - self.assertEqual(path2, report2_path) - self.assertEqual(events2, report2_events) - - report3_path = [ - ttypes.BugPathPos(startLine=14, filePath=None, endCol=6, - startCol=3, endLine=14, fileId=1), - ttypes.BugPathPos(startLine=15, filePath=None, endCol=3, - startCol=3, endLine=15, fileId=1), - ttypes.BugPathPos(startLine=16, filePath=None, endCol=1, - startCol=1, endLine=16, fileId=1) - ] - report3_events = [ - ttypes.BugPathEvent(startLine=14, filePath=None, endCol=29, - startCol=3, - msg="Address of stack memory associated" - " with local variable 'str'" - " is still referred to by the global " - "variable 'p' upon returning to the " - "caller. This will be a dangling " - "reference", - endLine=14, fileId=1) - ] - - path, events, _ = collect_paths_events(reports[2], file_ids, files) - - self.assertEqual(path, report3_path) - self.assertEqual(events, report3_events) diff --git a/web/server/tests/unit/test_plist_parser.py b/web/server/tests/unit/test_plist_parser.py deleted file mode 100644 index ab3aea1d05..0000000000 --- a/web/server/tests/unit/test_plist_parser.py +++ /dev/null @@ -1,357 +0,0 @@ -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -""" -Test the parsing of the plist generated by multiple clang versions. - -With the newer clang releases more information is available in the plist files. - -* Before Clang v3.7: - - Not supported - -* Clang v3.7: - - Checker name is available in the plist - - Report hash is not avilable (generated based on the report path elements - see report handling and plist parsing modules for more details - -* After Clang v3.8: - - Checker name is available - - Report hash is available - -""" - - -import os -import unittest - -from codechecker_common import plist_parser - -# These are the base skeletons for the main report sections where the -# report hash and checker name is missing. -# Before comparison in the tests needs to be extended. -div_zero_skel = \ - {'category': 'Logic error', - 'issue_context': 'generate_id', - 'issue_context_kind': 'function', - 'description': 'Division by zero', - 'type': 'Division by zero', - 'issue_hash': '1', - 'location': { - 'line': 7, - 'col': 14, - 'file': 1 - } - } - -stack_addr_skel = \ - {'category': 'Logic error', - 'issue_context': 'test', - 'issue_context_kind': 'function', - 'description': "Address of stack memory associated with local variable" - " 'str' is still referred to by the global variable 'p'" - " upon returning to the caller." - " This will be a dangling reference", - 'type': 'Stack address stored into global variable', - 'issue_hash': '3', - 'location': { - 'line': 16, - 'col': 1, - 'file': 0 - } - } - -# Base skeletons for reports where the checker name is already available. -div_zero_skel_name = \ - {'category': 'Logic error', - 'issue_context': 'generate_id', - 'issue_context_kind': 'function', - 'description': 'Division by zero', - 'check_name': 'core.DivideZero', - 'type': 'Division by zero', - 'issue_hash': '1', - 'location': { - 'line': 7, - 'col': 14, - 'file': 1 - } - } - -stack_addr_skel_name = \ - {'category': 'Logic error', - 'issue_context': 'test', - 'issue_context_kind': 'function', - 'description': "Address of stack memory associated with local variable" - " 'str' is still referred to by the global variable 'p'" - " upon returning to the caller." - " This will be a dangling reference", - 'check_name': 'core.StackAddressEscape', - 'type': 'Stack address stored into global variable', - 'issue_hash': '3', - 'location': { - 'line': 16, - 'col': 1, - 'file': 0 - } - } - -# Main sections for reports where checker name and report hash is available. -div_zero_skel_name_hash = \ - {'category': 'Logic error', - 'check_name': 'core.DivideZero', - 'description': 'Division by zero', - 'issue_context': 'generate_id', - 'issue_context_kind': 'function', - 'issue_hash_content_of_line_in_context': - '79e31a6ba028f0b7d9779faf4a6cb9cf', - 'issue_hash_function_offset': '1', - 'location': { - 'col': 14, - 'file': 1, - 'line': 7 - }, - 'type': 'Division by zero' - } - -stack_addr_skel_name_hash = \ - {'category': 'Logic error', - 'issue_context': 'test', - 'issue_context_kind': 'function', - 'description': "Address of stack memory associated with local variable" - " 'str' is still referred to by the global variable 'p'" - " upon returning to the caller." - " This will be a dangling reference", - 'check_name': 'core.StackAddressEscape', - 'type': 'Stack address stored into global variable', - 'issue_hash_content_of_line_in_context': - 'f7b5072d428e890f2d309217f3ead16f', - 'issue_hash_function_offset': '3', - 'location': { - 'line': 16, - 'col': 1, - 'file': 0 - } - } - - -# core.StackAddressEscape hash generated by clang is different -# from the hash generated by the previous clang release. -stack_addr_skel_name_hash_after_v40 = \ - {'category': 'Logic error', - 'issue_context': 'test', - 'issue_context_kind': 'function', - 'description': "Address of stack memory associated with local variable" - " 'str' is still referred to by the global variable 'p'" - " upon returning to the caller." - " This will be a dangling reference", - 'check_name': 'core.StackAddressEscape', - 'type': 'Stack address stored into global variable', - 'issue_hash_content_of_line_in_context': - 'a6d3464f8aab9eb31a8ea7e167e84322', - 'issue_hash_function_offset': '3', - 'location': { - 'line': 16, - 'col': 1, - 'file': 0 - } - } - - -class PlistParserTestCaseNose(unittest.TestCase): - """Test the parsing of the plist generated by multiple clang versions.""" - - @classmethod - def setup_class(cls): - """Initialize test source file.""" - # Bugs found by these checkers in the test source files. - cls.__found_checker_names = [ - 'core.DivideZero', - 'core.StackAddressEscape', - 'deadcode.DeadStores'] - - # Reports were found in these test files. - cls.__found_file_names = {0: 'gen_plist/test.cpp', - 1: './gen_plist/test.h'} - - # Already generated plist files for the tests. - cls.__this_dir = os.path.dirname(__file__) - cls.__plist_test_files = os.path.join( - cls.__this_dir, 'plist_test_files') - - def missing_checker_name_and_hash(self, reports): - """ - The checker name and the report hash is generated - by CodeChecker. - """ - for report in reports: - # Get the checker name detected by CodeChecker based - # on the report description. - checker_name = report.main['check_name'] - - if checker_name == 'core.DivideZero': - test_data = div_zero_skel - # Report hash generated by CodeChecker. - test_data['issue_hash_content_of_line_in_context'] = \ - 'e9fb5a280e64610cfa82472117c8d0ac' - test_data['check_name'] = 'core.DivideZero' - self.assertEqual(report.main, test_data) - - if checker_name == 'NOT FOUND': - test_data = stack_addr_skel - # Report hash generated by CodeChecker. - test_data['issue_hash_content_of_line_in_context'] = \ - 'b1bc0e8364a255659522055d1e15cd16' - test_data['check_name'] = 'NOT FOUND' - self.assertEqual(report.main, test_data) - - def missing_hash(self, reports): - """ - Checker name is available but report hash is not available - yet in the generated plist. - """ - - for report in reports: - checker_name = report.main['check_name'] - # Checker name should be available for all the reports. - self.assertNotEqual(checker_name, 'NOT FOUND') - - if checker_name == 'core.DivideZero': - test_data = div_zero_skel_name - # Report hash generated by CodeChecker. - test_data['issue_hash_content_of_line_in_context'] = \ - '51bd152830c2599e98c89cfc78890d0b' - self.assertEqual(report.main, test_data) - - if checker_name == 'core.StackAddressEscape': - test_data = stack_addr_skel_name - # core.StackAddressEscape hash is changed because the checker - # name is available and it is included in the hash. - test_data['issue_hash_content_of_line_in_context'] = \ - '3439d5e09aeb5b69a835a6f0a307dfb6' - self.assertEqual(report.main, test_data) - - def test_empty_file(self): - """Plist file is empty.""" - empty_plist = os.path.join(self.__plist_test_files, 'empty_file') - files, reports = plist_parser.parse_plist_file(empty_plist, - False) - self.assertEqual(files, {}) - self.assertEqual(reports, []) - - def test_no_bug_file(self): - """There was no bug in the checked file.""" - no_bug_plist = os.path.join( - self.__plist_test_files, 'clang-3.7-noerror.plist') - files, reports = plist_parser.parse_plist_file(no_bug_plist, - False) - self.assertEqual(files, {}) - self.assertEqual(reports, []) - - def test_clang37_plist(self): - """ - Check plist generated by clang 3.7 checker name should be in the plist - file generating a report hash is still needed. - """ - clang37_plist = os.path.join( - self.__plist_test_files, 'clang-3.7.plist') - files, reports = plist_parser.parse_plist_file(clang37_plist, - False) - - self.assertEqual(files, self.__found_file_names) - self.assertEqual(len(reports), 3) - - self.missing_hash(reports) - - def test_clang38_trunk_plist(self): - """ - Check plist generated by clang 3.8 trunk checker name and report hash - should be in the plist file. - """ - clang38_plist = os.path.join( - self.__plist_test_files, 'clang-3.8-trunk.plist') - files, reports = plist_parser.parse_plist_file(clang38_plist, - False) - - self.assertEqual(files, self.__found_file_names) - self.assertEqual(len(reports), 3) - - valid_checker_names = [] - valid_checker_names.extend(self.__found_checker_names) - - for report in reports: - checker_name = report.main['check_name'] - self.assertIn(checker_name, valid_checker_names) - - if checker_name == 'core.DivideZero': - # Test data is still valid for this version. - self.assertEqual(report.main, - div_zero_skel_name_hash) - - if checker_name == 'core.StackAddressEscape': - self.assertEqual(report.main, - stack_addr_skel_name_hash) - - def test_clang40_plist(self): - """ - Check plist generated by clang 4.0 checker name and report hash - should be in the plist file. - """ - clang40_plist = os.path.join( - self.__plist_test_files, 'clang-4.0.plist') - files, reports = plist_parser.parse_plist_file(clang40_plist, - False) - - self.assertEqual(files, self.__found_file_names) - self.assertEqual(len(reports), 3) - - valid_checker_names = [] - valid_checker_names.extend(self.__found_checker_names) - - for report in reports: - checker_name = report.main['check_name'] - # Checker name should be in the plist file. - self.assertNotEqual(checker_name, 'NOT FOUND') - self.assertIn(checker_name, valid_checker_names) - - if checker_name == 'core.DivideZero': - # Test data is still valid for this version. - self.assertEqual(report.main, - div_zero_skel_name_hash) - - if checker_name == 'core.StackAddressEscape': - self.assertEqual(report.main, - stack_addr_skel_name_hash_after_v40) - - def test_clang50_trunk_plist(self): - """ - Check plist generated by clang 5.0 trunk checker name and report hash - should be in the plist file. - """ - clang50_trunk_plist = os.path.join( - self.__plist_test_files, 'clang-5.0-trunk.plist') - files, reports = plist_parser.parse_plist_file(clang50_trunk_plist, - False) - self.assertEqual(files, self.__found_file_names) - self.assertEqual(len(reports), 3) - - valid_checker_names = [] - valid_checker_names.extend(self.__found_checker_names) - - for report in reports: - checker_name = report.main['check_name'] - # Checker name should be in the plist file. - self.assertNotEqual(checker_name, 'NOT FOUND') - self.assertIn(checker_name, valid_checker_names) - - if checker_name == 'core.DivideZero': - # Test data is still valid for this version. - self.assertEqual(report.main, - div_zero_skel_name_hash) - - if checker_name == 'core.StackAddressEscape': - self.assertEqual(report.main, - stack_addr_skel_name_hash_after_v40) diff --git a/web/server/tests/unit/test_source_code_comment.py b/web/server/tests/unit/test_source_code_comment.py deleted file mode 100644 index 4cab1d1591..0000000000 --- a/web/server/tests/unit/test_source_code_comment.py +++ /dev/null @@ -1,819 +0,0 @@ -# -*- coding: utf-8 -*- -# -# ------------------------------------------------------------------------- -# -# Part of the CodeChecker project, under the Apache License v2.0 with -# LLVM Exceptions. See LICENSE for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ------------------------------------------------------------------------- - -"""Tests for source code comments in source file.""" - - -import os -import unittest - -from codechecker_common.source_code_comment_handler import \ - SourceCodeCommentHandler - - -class SourceCodeCommentTestCase(unittest.TestCase): - """Tests for source code comments in source file.""" - - @classmethod - def setup_class(cls): - """Initialize test source file references.""" - cls.__test_src_dir = os.path.join( - os.path.dirname(__file__), 'source_code_comment_test_files') - - cls.__tmp_srcfile_1 = open(os.path.join(cls.__test_src_dir, - 'test_file_1'), - encoding='utf-8', errors="ignore") - cls.__tmp_srcfile_2 = open(os.path.join(cls.__test_src_dir, - 'test_file_2'), - encoding='utf-8', errors="ignore") - cls.__tmp_srcfile_3 = open(os.path.join(cls.__test_src_dir, - 'test_file_3'), - encoding='utf-8', errors="ignore") - - @classmethod - def teardown_class(cls): - cls.__tmp_srcfile_1.close() - cls.__tmp_srcfile_2.close() - cls.__tmp_srcfile_3.close() - - def test_src_comment_first_line(self): - """Bug is reported for the first line.""" - bug_line = 3 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertFalse(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 0) - - def test_no_comment(self): - """There is no comment above the bug line.""" - bug_line = 9 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertFalse(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 0) - - def test_no_src_comment_comment(self): - """There is no source comment above the bug line.""" - bug_line = 16 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [all] some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_multi_liner_all(self): - """There is source code comment above the bug line.""" - bug_line = 23 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some long comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [all] some long\n ' - '// comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_one_liner_all(self): - """There is source code comment above the bug line.""" - bug_line = 29 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1', 'my_checker_2'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [my_checker_1, ' - 'my_checker_2] some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_multi_liner_all_2(self): - """There is source code comment above the bug line.""" - bug_line = 36 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my.checker_1', 'my.checker_2'}, - 'message': 'some really long comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [my.checker_1 ' - 'my.checker_2] some really\n // long comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_one_liner_some_checkers(self): - """There is source code comment above the bug line.""" - bug_line = 43 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my.Checker_1', 'my.Checker_2'}, - 'message': 'some really really long comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [my.Checker_1, ' - 'my.Checker_2] some really\n // really\n' - ' // long comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_multi_liner_some_checkers(self): - """There is source code comment above the bug line.""" - bug_line = 50 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertFalse(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 0) - - def test_comment_characters(self): - """Check for different special comment characters.""" - bug_line = 57 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my.checker_1', 'my.checker_2'}, - 'message': "i/';0 (*&^%$#@!)", - 'status': 'false_positive', - 'line': "// codechecker_suppress [my.checker_1, " - "my.checker_2]\n // i/';0 (*&^%$#@!)\n"} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_fancy_comment_characters(self): - """Check fancy comment.""" - bug_line = 64 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': "áúőóüöáé [▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬]", - 'status': 'false_positive', - 'line': '// codechecker_suppress [ my_checker_1 ]\n // ' - 'áúőóüöáé [▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬]\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_no_fancy_comment(self): - """Check no fancy comment.""" - bug_line = 70 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_1, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': 'WARNING! source code comment is missing', - 'status': 'false_positive', - 'line': '// codechecker_suppress [ my_checker_1 ]\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_malformed_commment_format(self): - """Check malformed comment.""" - bug_line = 1 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_2, - bug_line) - self.assertFalse(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_2, bug_line) - self.assertEqual(len(source_line_comments), 0) - - def test_source_code_comment(self): - """Check source code comment.""" - bug_line = 2 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_suppress [ all ] some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_false_positive_comment(self): - """Check False positive comment.""" - bug_line = 7 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_false_positive [ all ] ' - 'some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_intentional_comment(self): - """Check Intentional comment.""" - bug_line = 12 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some comment', - 'status': 'intentional', - 'line': '// codechecker_intentional [ all ] ' - 'some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_confirmed_comment(self): - """Check Confirmed comment.""" - bug_line = 17 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'all'}, - 'message': 'some comment', - 'status': 'confirmed', - 'line': '// codechecker_confirmed [ all ] some comment\n'} - self.assertDictEqual(expected, source_line_comments[0]) - - def test_multiple_comments(self): - """Check multiple comment.""" - bug_line = 23 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'intentional comment', - 'status': 'intentional', - 'line': '// codechecker_intentional [ my.checker_1 ] ' - 'intentional comment\n'}, - { - 'checkers': {'my.checker_2'}, - 'message': 'confirmed bug', - 'status': 'confirmed', - 'line': '// codechecker_confirmed [ my.checker_2 ] ' - 'confirmed bug\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_1') - self.assertEqual(len(current_line_comments), 1) - self.assertEqual(current_line_comments[0]['message'], - expected[0]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[0]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_2') - self.assertEqual(len(current_line_comments), 1) - - self.assertEqual(current_line_comments[0]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.dummy') - self.assertEqual(len(current_line_comments), 0) - - def test_multiple_multi_line_comments(self): - """Check multi line long line comments.""" - bug_line = 31 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'long intentional bug comment', - 'status': 'intentional', - 'line': '// codechecker_intentional [ my.checker_1 ] ' - 'long intentional\n // bug comment\n'}, - { - 'checkers': {'my.checker_2'}, - 'message': 'long confirmed bug comment', - 'status': 'confirmed', - 'line': '// codechecker_confirmed [ my.checker_2 ] ' - 'long confirmed\n // bug comment\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - def test_multiple_all_comments(self): - """Check multiple comment.""" - bug_line = 37 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = \ - sc_handler.get_source_line_comments(self.__tmp_srcfile_3, bug_line) - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'intentional comment', - 'status': 'intentional', - 'line': '// codechecker_intentional [ my.checker_1 ] ' - 'intentional comment\n'}, - { - 'checkers': {'all'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_false_positive [ all ] ' - 'some comment\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_1') - self.assertEqual(len(current_line_comments), 2) - - self.assertEqual(current_line_comments[0]['message'], - expected[0]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[0]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - '') - self.assertEqual(len(current_line_comments), 1) - self.assertEqual(current_line_comments[0]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.dummy') - self.assertEqual(len(current_line_comments), 1) - - self.assertEqual(len(current_line_comments), 1) - self.assertEqual(current_line_comments[0]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[1]['status']) - - def test_multiple_checker_name_comments(self): - """ - Check multiple comment where same checker name are given for multiple - source code comment. - """ - - bug_line = 43 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_3, - bug_line) - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'intentional comment', - 'status': 'intentional', - 'line': '// codechecker_intentional [ my.checker_1 ] ' - 'intentional comment\n' - }, - { - 'checkers': {'my.checker_2', 'my.checker_1'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '// codechecker_false_positive [ ' - 'my.checker_2, my.checker_1 ] some comment\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_1') - self.assertEqual(len(current_line_comments), 2) - - def test_cstyle_comment(self): - """ - C style comment in one line. - /* codechecker_suppress [ my_checker_1 ] suppress comment */ - """ - - bug_line = 76 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_1, - bug_line) - - for line in source_line_comments: - print(line) - - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': 'suppress comment', - 'status': 'false_positive', - 'line': '/* codechecker_suppress [ my_checker_1 ] ' - 'suppress comment */\n'} - - self.assertDictEqual(expected, source_line_comments[0]) - - def test_cstyle_comment_multi_line(self): - """ - Multi line C style comment. - /* codechecker_suppress [ my_checker_1 ] - some longer - comment */ - """ - - bug_line = 83 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_1, - bug_line) - - for line in source_line_comments: - print(line) - - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': 'some longer comment', - 'status': 'false_positive', - 'line': '/* codechecker_suppress [ my_checker_1 ]\n ' - 'some longer\n comment */\n'} - - self.assertDictEqual(expected, source_line_comments[0]) - - def test_cstyle_comment_multi_nomsg(self): - """ - Multi line C style comment. - /* codechecker_suppress [ my_checker_1 ] - */ - """ - - bug_line = 89 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_1, - bug_line) - - for line in source_line_comments: - print(line) - - self.assertEqual(len(source_line_comments), 1) - - expected = [{ - 'checkers': {'my_checker_1'}, - 'message': 'WARNING! source code comment is missing', - 'status': 'false_positive', - 'line': '/* codechecker_suppress [ my_checker_1 ]' - '\n */\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - - def test_cstyle_comment_multi_star(self): - """ - Multi line C style comment. - - /* codechecker_suppress [ my_checker_1 ] - * multi line - * comment - * again - */ - """ - - bug_line = 98 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_1, - bug_line) - - for line in source_line_comments: - print('-======') - print(line) - - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': 'multi line comment again', - 'status': 'false_positive', - 'line': "/* codechecker_suppress [ my_checker_1 ]\n * " - "multi line\n * comment\n * again\n */\n"} - - self.assertDictEqual(expected, source_line_comments[0]) - - def test_cstyle_comment_multi_line_mismatch(self): - """ - Multi line C style comment start '/*' is in a different line - from the codechecker review status comment. - - /* - codechecker_suppress [ my_checker_1 ] - multi line - comment - again - */ - """ - - bug_line = 108 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_1, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_1, - bug_line) - - for line in source_line_comments: - print('-======') - print(line) - - self.assertEqual(len(source_line_comments), 1) - - expected = {'checkers': {'my_checker_1'}, - 'message': 'multi line comment again', - 'status': 'false_positive', - 'line': ' codechecker_suppress [ my_checker_1 ]\n ' - 'multi line\n comment\n again\n */\n'} - - self.assertDictEqual(expected, source_line_comments[0]) - - def test_cstyle_multi_comment_multi_line(self): - """ - Multi line C style comment with multiple review status comment. - - /* codechecker_false_positive [ my.checker_2, my.checker_1 ] comment - codechecker_intentional [ my.checker_1 ] intentional comment */ - - """ - - bug_line = 49 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_3, - bug_line) - - for line in source_line_comments: - print(line) - - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'intentional comment', - 'status': 'intentional', - 'line': 'codechecker_intentional [ my.checker_1 ] ' - 'intentional comment */\n'}, - { - 'checkers': {'my.checker_1', 'my.checker_2'}, - 'message': 'some comment', - 'status': 'false_positive', - 'line': '/* codechecker_false_positive [ ' - 'my.checker_2, my.checker_1 ] some comment\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_1') - self.assertEqual(len(current_line_comments), 2) - self.assertEqual(current_line_comments[0]['message'], - expected[0]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[0]['status']) - self.assertEqual(current_line_comments[1]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[1]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_2') - self.assertEqual(len(current_line_comments), 1) - - self.assertEqual(current_line_comments[0]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.dummy') - self.assertEqual(len(current_line_comments), 0) - - def test_cstyle_multi_comment_multi_line_long(self): - """ - Multi line C style comment with multiple review status comment. - - /* codechecker_false_positive [ my.checker_2, my.checker_1 ] comment - which - is - long - codechecker_intentional [ my.checker_1 ] intentional comment - long - again */ - - """ - - bug_line = 60 - sc_handler = SourceCodeCommentHandler() - res = sc_handler.has_source_line_comments(self.__tmp_srcfile_3, - bug_line) - self.assertTrue(res) - - source_line_comments = sc_handler.get_source_line_comments( - self.__tmp_srcfile_3, - bug_line) - - for line in source_line_comments: - print(line) - - self.assertEqual(len(source_line_comments), 2) - - expected = [{ - 'checkers': {'my.checker_1'}, - 'message': 'intentional comment long again', - 'status': 'intentional', - 'line': 'codechecker_intentional [ my.checker_1 ] ' - 'intentional comment\n long\n again */\n'}, - { - 'checkers': {'my.checker_1', 'my.checker_2'}, - 'message': 'comment which is long', - 'status': 'false_positive', - 'line': '/* codechecker_false_positive [ ' - 'my.checker_2, my.checker_1 ] comment\n ' - 'which\n is\n long\n' - }] - - self.assertDictEqual(expected[0], source_line_comments[0]) - self.assertDictEqual(expected[1], source_line_comments[1]) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_1') - self.assertEqual(len(current_line_comments), 2) - self.assertEqual(current_line_comments[0]['message'], - expected[0]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[0]['status']) - self.assertEqual(current_line_comments[1]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[1]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.checker_2') - self.assertEqual(len(current_line_comments), 1) - - self.assertEqual(current_line_comments[0]['message'], - expected[1]['message']) - self.assertEqual(current_line_comments[0]['status'], - expected[1]['status']) - - current_line_comments = \ - sc_handler.filter_source_line_comments(self.__tmp_srcfile_3, - bug_line, - 'my.dummy') - self.assertEqual(len(current_line_comments), 0) diff --git a/web/tests/functional/diff_local/test_diff_local.py b/web/tests/functional/diff_local/test_diff_local.py index d3d1743101..d9ffae70fa 100644 --- a/web/tests/functional/diff_local/test_diff_local.py +++ b/web/tests/functional/diff_local/test_diff_local.py @@ -65,7 +65,7 @@ def test_resolved_json(self): print(resolved_results) for resolved in resolved_results: - self.assertEqual(resolved['checkerId'], "core.CallAndMessage") + self.assertEqual(resolved['checker_name'], "core.CallAndMessage") def test_new_json(self): """Get the new reports. @@ -78,7 +78,8 @@ def test_new_json(self): print(new_results) for new_result in new_results: - self.assertEqual(new_result['checkerId'], "core.NullDereference") + self.assertEqual( + new_result['checker_name'], "core.NullDereference") def test_non_existent_reports_directory(self): """Handles non existent directory well @@ -188,6 +189,13 @@ def test_missing_source_file(self): # Rename the file back. os.rename(new_file_path, old_file_path) + # Change files' ctime to the current time in the report directory, + # so the CodeChecker diff command will not see report files which + # reference the previously renamed file older then the source file. + for root, _, files in os.walk(self.new_reports): + for file_name in files: + os.utime(os.path.join(root, file_name)) + def test_suppress_reports(self): """ Check diff command when analysing the same source file which contains @@ -281,7 +289,8 @@ def test_basename_baseline_file_json(self): print(new_results) for new_result in new_results: - self.assertEqual(new_result['checkerId'], "core.NullDereference") + self.assertEqual( + new_result['checker_name'], "core.NullDereference") # Get unresolved results. unresolved_results, _, _ = get_diff_results( @@ -291,12 +300,12 @@ def test_basename_baseline_file_json(self): self.assertTrue(any( r for r in unresolved_results - if r['checkerId'] == 'core.DivideZero')) + if r['checker_name'] == 'core.DivideZero')) self.assertFalse(any( r for r in unresolved_results - if r['checkerId'] == 'core.NullDereference' or - r['checkerId'] == 'core.CallAndMessage')) + if r['checker_name'] == 'core.NullDereference' or + r['checker_name'] == 'core.CallAndMessage')) # Get resolved results. resolved_results, err, returncode = get_diff_results( @@ -342,7 +351,7 @@ def test_newname_baseline_file_json(self): [self.base_reports], [baseline_file_path], '--resolved', 'json') for report in resolved_results: - self.assertEqual(report['checkerId'], "core.CallAndMessage") + self.assertEqual(report['checker_name'], "core.CallAndMessage") def test_multiple_baseline_file_json(self): """ Test multiple baseline file for basename option. """ @@ -366,7 +375,7 @@ def test_multiple_baseline_file_json(self): self.assertTrue(any( r for r in unresolved_results - if r['checkerId'] == 'core.DivideZero')) + if r['checker_name'] == 'core.DivideZero')) # Get resolved results. resolved_results, err, returncode = get_diff_results( diff --git a/web/tests/functional/diff_local_remote/test_diff_local_remote.py b/web/tests/functional/diff_local_remote/test_diff_local_remote.py index 72138c7567..6267607a17 100644 --- a/web/tests/functional/diff_local_remote/test_diff_local_remote.py +++ b/web/tests/functional/diff_local_remote/test_diff_local_remote.py @@ -65,7 +65,7 @@ def setUp(self): self._env = self._test_cfg['codechecker_cfg']['check_env'] - def get_local_remote_diff(self, extra_args=None): + def get_local_remote_diff(self, extra_args=None, format_type=None): """Return the unresolved results comparing local to a remote. Returns the text output of the diff command comparing the @@ -79,7 +79,7 @@ def get_local_remote_diff(self, extra_args=None): extra_args = [] return get_diff_results([self._local_reports], [self._run_names[0]], - '--unresolved', None, + '--unresolved', format_type, ['--url', self._url, *extra_args])[0] def test_local_to_remote_compare_count_new(self): @@ -100,7 +100,7 @@ def test_remote_to_local_compare_count_new(self): # 5 new core.CallAndMessage issues. # 1 is suppressed in code count = len(re.findall(r'\[core\.CallAndMessage\]', out)) - self.assertEqual(count, 4) + self.assertEqual(count, 5) # core.NullDereference was disabled in the remote analysis # so no results are new comapared to the local analysis. @@ -167,7 +167,7 @@ def test_local_cmp_filter_unres_filepath(self): # Only 4 bugs can be found in the following file but in the # output the file names are printed again because of the summary. - self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 5) + self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 6) self.assertEqual(len(re.findall(r'new_delete.cpp', res)), 0) @@ -178,7 +178,7 @@ def test_local_cmp_filter_unres_filepath(self): # Only 6 bugs can be found in the following file but in the # output the file names are printed again because of the summary. - self.assertEqual(len(re.findall(r'new_delete.cpp', res)), 7) + self.assertEqual(len(re.findall(r'new_delete.cpp', res)), 8) def test_local_cmp_filter_unres_checker_name(self): """Filter by checker name.""" @@ -187,7 +187,7 @@ def test_local_cmp_filter_unres_checker_name(self): self.assertEqual(len(re.findall(r'core.NullDereference', res)), 0) res = self.get_local_remote_diff(['--checker-name', 'core.*']) - self.assertEqual(len(re.findall(r'core.*', res)), 13) + self.assertEqual(len(re.findall(r'core.*', res)), 15) # Filter by checker message (case insensitive). res = self.get_local_remote_diff(['--checker-msg', 'division by*']) @@ -200,20 +200,17 @@ def test_local_cmp_filter_unres_filter_mix(self): # Only 2 bugs can be found in the following file but in the # output the file names are printed again because of the summary. - self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 3) + self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 4) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 2) def test_local_cmp_filter_unres_filter_mix_json(self): """Filter by multiple filters file and severity with json output.""" # TODO check if only high severity reports are retuned. - res = self.get_local_remote_diff(['--file', '*divide_zero.cpp', - '--severity', 'high', - '-o', 'json']) - reports = json.loads(res) + reports = self.get_local_remote_diff(['--file', '*divide_zero.cpp', + '--severity', 'high'], 'json') for report in reports: - print(report) - self.assertTrue("divide_zero.cpp" in report['checkedFile'], + self.assertTrue("divide_zero.cpp" in report['file']['path'], "Report filename is different from the expected.") def test_local_compare_res_html_output_unresolved(self): @@ -225,24 +222,24 @@ def test_local_compare_res_html_output_unresolved(self): ["--url", self._url, '-e', html_reports, "--verbose", "debug"]) - diff_res = json.loads(self.get_local_remote_diff(['-o', 'json'])) - checked_files = set() - for res in diff_res: - print(res) - checked_files.add(os.path.basename(res['checkedFile'])) + for res in self.get_local_remote_diff(None, 'json'): + checked_files.add(os.path.basename(res['file']['path'])) # Check if index.html file was generated. html_index = os.path.join(html_reports, "index.html") self.assertTrue(os.path.exists(html_index)) + html_statistics = os.path.join(html_reports, "statistics.html") + self.assertTrue(os.path.exists(html_statistics)) + # Check that html files were generated for each reports. for html_file_names in os.listdir(html_reports): suffix = html_file_names.rfind("_") file_name = html_file_names[:suffix] \ if suffix != -1 else html_file_names - if file_name == "index.html": + if file_name in ["index.html", "statistics.html"]: continue self.assertIn(file_name, checked_files) @@ -319,7 +316,7 @@ def test_diff_gerrit_output(self): self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], - "CodeChecker found 4 issue(s) in the code.") + "CodeChecker found 5 issue(s) in the code.") self.assertEqual(review_data["tag"], "jenkins") comments = review_data["comments"] @@ -327,7 +324,7 @@ def test_diff_gerrit_output(self): file_path = next(iter(comments)) reports = comments[file_path] - self.assertEqual(len(reports), 4) + self.assertEqual(len(reports), 5) for report in reports: self.assertIn("message", report) @@ -362,7 +359,7 @@ def test_diff_gerrit_stdout(self): self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], - "CodeChecker found 4 issue(s) in the code.") + "CodeChecker found 5 issue(s) in the code.") self.assertEqual(review_data["tag"], "jenkins") comments = review_data["comments"] @@ -370,7 +367,7 @@ def test_diff_gerrit_stdout(self): file_path = next(iter(comments)) reports = comments[file_path] - self.assertEqual(len(reports), 4) + self.assertEqual(len(reports), 5) for report in reports: self.assertIn("message", report) @@ -539,6 +536,7 @@ def test_diff_multiple_output(self): "--url", self._url], env) + print(out) # Check the plaintext output. count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 4) @@ -573,7 +571,7 @@ def test_local_to_remote_with_baseline_file(self): print(new_results) for report in new_results: - self.assertEqual(report['checkerId'], "core.NullDereference") + self.assertEqual(report['checker_name'], "core.NullDereference") self.assertEqual(returncode, 2) @@ -586,7 +584,7 @@ def test_local_to_remote_with_baseline_file(self): self.assertTrue(unresolved_results) self.assertFalse(any( r for r in unresolved_results - if r['checkerId'] == 'core.CallAndMessage')) + if r['checker_name'] == 'core.CallAndMessage')) self.assertEqual(returncode, 2) # Get resolved reports. @@ -614,7 +612,7 @@ def test_remote_to_local_with_baseline_file(self): '--new', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - new_hashes = sorted(set([n['bugHash'] for n in res])) + new_hashes = sorted(set([n['report_hash'] for n in res])) new_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--new', 'json', @@ -634,7 +632,7 @@ def test_remote_to_local_with_baseline_file(self): '--unresolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - unresolved_hashes = sorted(set([n['bugHash'] for n in res])) + unresolved_hashes = sorted(set([n['report_hash'] for n in res])) unresolved_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], @@ -655,7 +653,7 @@ def test_remote_to_local_with_baseline_file(self): '--resolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - resolved_hashes = set([n['bugHash'] for n in res]) + resolved_hashes = set([n['report_hash'] for n in res]) resolved_results, _, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--resolved', 'json', @@ -664,5 +662,5 @@ def test_remote_to_local_with_baseline_file(self): self.assertTrue(resolved_results) self.assertSetEqual( - {r['bugHash'] for r in resolved_results}, resolved_hashes) + {r['report_hash'] for r in resolved_results}, resolved_hashes) self.assertEqual(returncode, 2) diff --git a/web/tests/functional/source_change/test_source_change.py b/web/tests/functional/source_change/test_source_change.py index dbb538fa27..530c239c65 100644 --- a/web/tests/functional/source_change/test_source_change.py +++ b/web/tests/functional/source_change/test_source_change.py @@ -82,7 +82,7 @@ def test_parse(self): ret, out, _ = codechecker.parse(self._codechecker_cfg) self.assertEqual(ret, 2) - msg = 'did change since the last analysis.' + msg = 'changed or missing since the latest analysis' self.assertTrue(msg in out, '"' + msg + '" was not found in the parse output') diff --git a/web/tests/functional/store/__init__.py b/web/tests/functional/store/__init__.py index 7c859d6a58..7bf23a3bf8 100644 --- a/web/tests/functional/store/__init__.py +++ b/web/tests/functional/store/__init__.py @@ -57,8 +57,12 @@ def setup_package(): dst_dir = os.path.join(TEST_WORKSPACE, "test_proj") shutil.copytree(os.path.join(test_dir, "test_proj"), dst_dir) - report_file = os.path.join(dst_dir, "divide_zero.plist") - plist_test.prefix_file_path(report_file, dst_dir) + prefix_file_paths = [ + os.path.join(dst_dir, "divide_zero", "divide_zero.plist"), + os.path.join(dst_dir, "double_suppress", "double_suppress.plist")] + + for file_name in prefix_file_paths: + plist_test.prefix_file_path(file_name, os.path.dirname(file_name)) def teardown_package(): diff --git a/web/tests/functional/store/test_proj/Makefile b/web/tests/functional/store/test_proj/divide_zero/Makefile similarity index 66% rename from web/tests/functional/store/test_proj/Makefile rename to web/tests/functional/store/test_proj/divide_zero/Makefile index 7e346d58bc..7d74dd5aff 100644 --- a/web/tests/functional/store/test_proj/Makefile +++ b/web/tests/functional/store/test_proj/divide_zero/Makefile @@ -3,10 +3,9 @@ build: $(CXX) -c divide_zero.cpp -o divide_zero.o - $(CXX) -c double_suppress.cpp -o double_suppress.o clean: - rm -rf divide_zero.o double_suppress.o + rm -rf divide_zero.o # Using relative path to the source files # it is easier to prefix them with the temporary @@ -14,5 +13,3 @@ clean: analyze: clang --analyze -I. divide_zero.cpp --analyzer-output plist-multi-file \ -o divide_zero.plist - clang --analyze double_suppress.cpp --analyzer-output plist-multi-file \ - -o double_suppress.plist diff --git a/web/tests/functional/store/test_proj/divide_zero.cpp b/web/tests/functional/store/test_proj/divide_zero/divide_zero.cpp similarity index 100% rename from web/tests/functional/store/test_proj/divide_zero.cpp rename to web/tests/functional/store/test_proj/divide_zero/divide_zero.cpp diff --git a/web/tests/functional/store/test_proj/divide_zero.plist b/web/tests/functional/store/test_proj/divide_zero/divide_zero.plist similarity index 99% rename from web/tests/functional/store/test_proj/divide_zero.plist rename to web/tests/functional/store/test_proj/divide_zero/divide_zero.plist index 11826220c8..a633036a92 100644 --- a/web/tests/functional/store/test_proj/divide_zero.plist +++ b/web/tests/functional/store/test_proj/divide_zero/divide_zero.plist @@ -474,7 +474,7 @@ files - ./lib.h + lib.h divide_zero.cpp diff --git a/web/tests/functional/store/test_proj/lib.h b/web/tests/functional/store/test_proj/divide_zero/lib.h similarity index 100% rename from web/tests/functional/store/test_proj/lib.h rename to web/tests/functional/store/test_proj/divide_zero/lib.h diff --git a/web/tests/functional/store/test_proj/project_info.json b/web/tests/functional/store/test_proj/divide_zero/project_info.json similarity index 100% rename from web/tests/functional/store/test_proj/project_info.json rename to web/tests/functional/store/test_proj/divide_zero/project_info.json diff --git a/web/tests/functional/store/test_proj/double_suppress/Makefile b/web/tests/functional/store/test_proj/double_suppress/Makefile new file mode 100644 index 0000000000..b86fae20d6 --- /dev/null +++ b/web/tests/functional/store/test_proj/double_suppress/Makefile @@ -0,0 +1,15 @@ +# Makefile to build and analyze the test files required +# for the store update tests. + +build: + $(CXX) -c double_suppress/double_suppress.cpp -o double_suppress/double_suppress.o + +clean: + rm -rf double_suppress/double_suppress.o + +# Using relative path to the source files +# it is easier to prefix them with the temporary +# directory during test preparation. +analyze: + clang --analyze double_suppress.cpp --analyzer-output plist-multi-file \ + -o double_suppress.plist diff --git a/web/tests/functional/store/test_proj/double_suppress.cpp b/web/tests/functional/store/test_proj/double_suppress/double_suppress.cpp similarity index 100% rename from web/tests/functional/store/test_proj/double_suppress.cpp rename to web/tests/functional/store/test_proj/double_suppress/double_suppress.cpp diff --git a/web/tests/functional/store/test_proj/double_suppress.plist b/web/tests/functional/store/test_proj/double_suppress/double_suppress.plist similarity index 100% rename from web/tests/functional/store/test_proj/double_suppress.plist rename to web/tests/functional/store/test_proj/double_suppress/double_suppress.plist diff --git a/web/tests/functional/store/test_proj/double_suppress/project_info.json b/web/tests/functional/store/test_proj/double_suppress/project_info.json new file mode 100644 index 0000000000..2f26cc29b4 --- /dev/null +++ b/web/tests/functional/store/test_proj/double_suppress/project_info.json @@ -0,0 +1,5 @@ +{ + "name": "store", + "clean_cmd": "make clean", + "build_cmd": "make build" +} diff --git a/web/tests/functional/store/test_store.py b/web/tests/functional/store/test_store.py index c3b457fbe6..ddfd73d324 100644 --- a/web/tests/functional/store/test_store.py +++ b/web/tests/functional/store/test_store.py @@ -20,11 +20,11 @@ import subprocess import unittest +from codechecker_report_converter import util + from codechecker_api.codeCheckerDBAccess_v6.ttypes import AnalysisInfoFilter from libtest import codechecker, env -from codechecker_common import util - def _call_cmd(command, cwd=None, env=None): try: @@ -46,7 +46,6 @@ class TestStore(unittest.TestCase): """Test storage reports""" def setUp(self): - # Get the test workspace used to cppcheck tests. self._test_workspace = os.environ["TEST_WORKSPACE"] @@ -60,6 +59,11 @@ def setUp(self): self._temp_workspace = os.path.join(self._codechecker_cfg["workspace"], "test_proj") + self._divide_zero_workspace = os.path.join( + self._temp_workspace, "divide_zero") + self._double_suppress_workspace = os.path.join( + self._temp_workspace, "double_suppress") + self.product_name = self._codechecker_cfg['viewer_product'] # Setup a viewer client to test viewer API calls. @@ -81,7 +85,7 @@ def test_product_details(self): run_name = "product_details_test" store_cmd = [ - env.codechecker_cmd(), "store", self._temp_workspace, + env.codechecker_cmd(), "store", self._divide_zero_workspace, "--name", run_name, "--url", env.parts_to_url(self._codechecker_cfg)] @@ -109,45 +113,39 @@ def test_trim_path_prefix_store(self): temporary test directory, the test trims that temporary test directory from the source file path during the storage. """ - report_file = os.path.join(self._temp_workspace, "divide_zero.plist") + report_file = os.path.join( + self._divide_zero_workspace, "divide_zero.plist") report_content = {} with open(report_file, mode="rb") as rf: report_content = plistlib.load(rf) trimmed_paths = [ - util.trim_path_prefixes(path, [self._temp_workspace]) + util.trim_path_prefixes(path, [self._divide_zero_workspace]) for path in report_content["files"] ] run_name = "store_test" store_cmd = [ - env.codechecker_cmd(), - "store", - self._temp_workspace, - "--name", - run_name, - "--url", - env.parts_to_url(self._codechecker_cfg), - "--trim-path-prefix", - self._temp_workspace, - "--verbose", - "debug", + env.codechecker_cmd(), "store", + self._divide_zero_workspace, + "--name", run_name, + "--url", env.parts_to_url(self._codechecker_cfg), + "--trim-path-prefix", self._divide_zero_workspace, + "--verbose", "debug", ] - ret, _, _ = _call_cmd(store_cmd) + ret, out, err = _call_cmd(store_cmd) + print(out) + print(err) self.assertEqual(ret, 0, "Plist file could not store.") query_cmd = [ - env.codechecker_cmd(), - "cmd", - "results", + env.codechecker_cmd(), "cmd", "results", run_name, # Use the 'Default' product. - "--url", - env.parts_to_url(self._codechecker_cfg), - "-o", - "json", + "--url", env.parts_to_url(self._codechecker_cfg), + "-o", "json", ] ret, out, _ = _call_cmd(query_cmd) @@ -168,7 +166,7 @@ def test_store_multiple_report_dirs(self): store command to a run. """ cfg = dict(self._codechecker_cfg) - codechecker.log(cfg, self._temp_workspace) + codechecker.log(cfg, self._divide_zero_workspace) common_report_dir = os.path.join(self._temp_workspace, 'reports') report_dir1 = \ @@ -181,12 +179,12 @@ def test_store_multiple_report_dirs(self): cfg['reportdir'] = report_dir1 cfg['checkers'] = [ '-d', 'core.DivideZero', '-e', 'deadcode.DeadStores'] - codechecker.analyze(cfg, self._temp_workspace) + codechecker.analyze(cfg, self._divide_zero_workspace) cfg['reportdir'] = report_dir2 cfg['checkers'] = [ '-e', 'core.DivideZero', '-d', 'deadcode.DeadStores'] - codechecker.analyze(cfg, self._temp_workspace) + codechecker.analyze(cfg, self._divide_zero_workspace) def store_multiple_report_dirs(report_dirs): """ """ @@ -292,7 +290,6 @@ def test_suppress_duplicated(self): Test server if recognise duplicated suppress comments in the stored source code. """ - test_plist_file = "double_suppress.plist" store_cmd = [env.codechecker_cmd(), "store", "--url", @@ -300,6 +297,6 @@ def test_suppress_duplicated(self): "never", "--input-format", "plist", "--verbose", "debug", test_plist_file] - ret, _, _ = _call_cmd(store_cmd, self._temp_workspace) + ret, _, _ = _call_cmd(store_cmd, self._double_suppress_workspace) self.assertEqual(ret, 1, "Duplicated suppress comments was not " "recognised.") diff --git a/web/tests/libtest/codechecker.py b/web/tests/libtest/codechecker.py index c576e00ca5..ef122b5be6 100644 --- a/web/tests/libtest/codechecker.py +++ b/web/tests/libtest/codechecker.py @@ -102,7 +102,7 @@ def get_diff_results(basenames, newnames, diff_type, format_type=None, out, err = proc.communicate() if format_type == "json": - return json.loads(out), err, proc.returncode + return json.loads(out)['reports'], err, proc.returncode return out, err, proc.returncode diff --git a/web/tests/libtest/env.py b/web/tests/libtest/env.py index d880af0e7b..3bcaa21b83 100644 --- a/web/tests/libtest/env.py +++ b/web/tests/libtest/env.py @@ -19,6 +19,8 @@ import stat import subprocess +from codechecker_report_converter.util import load_json_or_empty + from .thrift_client_to_db import get_auth_client from .thrift_client_to_db import get_config_client from .thrift_client_to_db import get_product_client @@ -27,8 +29,6 @@ from functional import PKG_ROOT from functional import REPO_ROOT -from codechecker_common import util - def get_free_port(): """ @@ -348,7 +348,7 @@ def enable_auth(workspace): server_cfg_file = os.path.join(workspace, server_config_filename) - scfg_dict = util.load_json_or_empty(server_cfg_file, {}) + scfg_dict = load_json_or_empty(server_cfg_file, {}) scfg_dict["authentication"]["enabled"] = True scfg_dict["authentication"]["method_dictionary"]["enabled"] = True scfg_dict["authentication"]["method_dictionary"]["auths"] = \ @@ -388,7 +388,7 @@ def enable_storage_of_analysis_statistics(workspace): server_cfg_file = os.path.join(workspace, server_config_filename) - scfg_dict = util.load_json_or_empty(server_cfg_file, {}) + scfg_dict = load_json_or_empty(server_cfg_file, {}) scfg_dict["store"]["analysis_statistics_dir"] = \ os.path.join(workspace, 'analysis_statistics')
Number of processed plist files${number_of_plist_files}Number of processed analyzer result files${num_of_analyzer_result_files}
Number of analyzer reports