diff --git a/CMakeLists.txt b/CMakeLists.txt index 29fe210..a7e9d45 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -134,31 +134,47 @@ add_custom_command(TARGET pathfinding_demo POST_BUILD $/resources ) -# Test executable -add_executable(tests cpp/test/test.cpp) +# Unit tests executable +add_executable(unit_tests + cpp/test/test.cpp +) if(WIN32) - target_link_libraries(tests GTest::gtest GTest::gtest_main GTest::gmock GTest::gmock_main) + target_link_libraries(unit_tests GTest::gtest GTest::gtest_main GTest::gmock GTest::gmock_main) else() - target_link_libraries(tests GTest::gtest GTest::gtest_main) + target_link_libraries(unit_tests GTest::gtest GTest::gtest_main) +endif() + +# Performance tests executable +add_executable(performance_tests + cpp/test/collision_performance.cpp +) +if(WIN32) + target_link_libraries(performance_tests GTest::gtest GTest::gtest_main GTest::gmock GTest::gmock_main) +else() + target_link_libraries(performance_tests GTest::gtest GTest::gtest_main) endif() # Enable testing enable_testing() -add_test(NAME unit_tests COMMAND tests) +add_test(NAME unit_tests COMMAND unit_tests) +add_test(NAME performance_tests COMMAND performance_tests) # Compiler-specific options with MSVC support if(MSVC) # MSVC-specific flags: disable permissive mode, enable high warning level target_compile_options(pathfinding_demo PRIVATE /W4 /permissive-) - target_compile_options(tests PRIVATE /W4 /permissive-) + target_compile_options(unit_tests PRIVATE /W4 /permissive-) + target_compile_options(performance_tests PRIVATE /W4 /permissive-) # Additional MSVC flags for C++23 and modern standards target_compile_options(pathfinding_demo PRIVATE /Zc:__cplusplus /Zc:preprocessor) - target_compile_options(tests PRIVATE /Zc:__cplusplus /Zc:preprocessor) + target_compile_options(unit_tests PRIVATE /Zc:__cplusplus /Zc:preprocessor) + target_compile_options(performance_tests PRIVATE /Zc:__cplusplus /Zc:preprocessor) elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") # GCC/Clang flags target_compile_options(pathfinding_demo PRIVATE -Wall -Wextra -Wpedantic) - target_compile_options(tests PRIVATE -Wall -Wextra -Wpedantic) + target_compile_options(unit_tests PRIVATE -Wall -Wextra -Wpedantic) + target_compile_options(performance_tests PRIVATE -Wall -Wextra -Wpedantic) endif() # Platform-specific build configurations diff --git a/cpp/test/collision_performance.cpp b/cpp/test/collision_performance.cpp new file mode 100644 index 0000000..3860f9f --- /dev/null +++ b/cpp/test/collision_performance.cpp @@ -0,0 +1,106 @@ +#include +#include +#include +#include + +// TODO: Add necessary includes for collision testing +// #include "collision_shapes.hpp" +// #include "entities.hpp" + +/** + * @file collision_performance.cpp + * @brief Performance tests for collision detection systems + * + * This file contains performance benchmarks and tests for various + * collision detection algorithms and optimizations. + */ + +/** + * @brief Helper class to measure and print execution time + */ +class PerformanceTimer { +public: + using Clock = std::chrono::high_resolution_clock; + using TimePoint = std::chrono::time_point; + using Duration = std::chrono::duration; + + PerformanceTimer(const std::string& name) : name_(name) { + start_ = Clock::now(); + } + + ~PerformanceTimer() { + auto end = Clock::now(); + Duration duration = end - start_; + std::cout << std::fixed << std::setprecision(3) + << "[PERF] " << name_ << ": " + << duration.count() << " ms" << std::endl; + } + + double elapsed_ms() const { + auto end = Clock::now(); + Duration duration = end - start_; + return duration.count(); + } + +private: + std::string name_; + TimePoint start_; +}; + +/** + * @brief Run a function multiple times and measure average execution time + * @param name Name of the test for output + * @param iterations Number of iterations to run + * @param func Function to benchmark + */ +template +void benchmark_function(const std::string& name, int iterations, Func func) { + auto start = PerformanceTimer::Clock::now(); + + for (int i = 0; i < iterations; ++i) { + func(); + } + + auto end = PerformanceTimer::Clock::now(); + PerformanceTimer::Duration total_duration = end - start; + double avg_duration = total_duration.count() / iterations; + + std::cout << std::fixed << std::setprecision(6) + << "[BENCHMARK] " << name << ":\n" + << " Total: " << total_duration.count() << " ms\n" + << " Iterations: " << iterations << "\n" + << " Average: " << avg_duration << " ms\n" + << " Throughput: " << (iterations / (total_duration.count() / 1000.0)) + << " ops/sec" << std::endl; +} + +// Example test function 1 +void test_function_1() { + // TODO: Implement actual collision test + volatile int sum = 0; + for (int i = 0; i < 1000; ++i) { + sum += i; + } +} + +// Example test function 2 +void test_function_2() { + // TODO: Implement actual collision test + volatile int product = 1; + for (int i = 1; i < 100; ++i) { + product *= (i % 10 + 1); + } +} + +TEST(CollisionPerformance, CompareAlgorithms) { + std::cout << "\n=== Collision Performance Comparison ===\n" << std::endl; + + const int iterations = 10000; + + benchmark_function("Algorithm 1 (test_function_1)", iterations, test_function_1); + benchmark_function("Algorithm 2 (test_function_2)", iterations, test_function_2); + + std::cout << "\n======================================\n" << std::endl; + + SUCCEED(); +}