Hybrid Query Processing Engine for Coprocessing in Database Systems
HyPE
|
00001 00002 #include <core/statistics_gatherer.hpp> 00003 00004 namespace hype{ 00005 namespace core{ 00006 00007 StatisticsGatherer::StatisticsGatherer(const std::string& operation_name) : operation_name_(operation_name), 00008 number_of_right_decisions_(0), 00009 number_of_total_decisions_(0), 00010 isolated_execution_time_of_algorithms_(), 00011 names_of_algorithms_(), 00012 execution_time_of_ideal_model_(0), 00013 execution_time_of_cpu_only_model_(0), 00014 execution_time_of_gpu_only_model_(0), 00015 execution_time_of_real_model_(0), 00016 total_time_for_overhead_of_addObservation_(0), 00017 total_time_for_overhead_of_getOptimalAlgorithm_(0), 00018 //Feature: inter device parallelism 00019 inter_device_parallel_time_cpu_(0), 00020 inter_device_parallel_time_gpu_(0){ 00021 00022 } 00023 00024 bool StatisticsGatherer::addStatistics(const WorkloadGenerator& w){ 00025 00026 assert(this->operation_name_==w.operation_name_); 00027 00028 this->number_of_right_decisions_ += w.number_of_right_decisions_; 00029 this->number_of_total_decisions_ += w. number_of_total_decisions_; 00030 this->execution_time_of_ideal_model_ += w.execution_time_of_ideal_model_; 00031 this->execution_time_of_cpu_only_model_ += w.execution_time_of_cpu_only_model_; 00032 this->execution_time_of_gpu_only_model_ += w.execution_time_of_gpu_only_model_; 00033 this->execution_time_of_real_model_ += w.execution_time_of_real_model_; 00034 this->total_time_for_overhead_of_addObservation_ += w.total_time_for_overhead_of_addObservation_; 00035 this->total_time_for_overhead_of_getOptimalAlgorithm_ += w.total_time_for_overhead_of_getOptimalAlgorithm_; 00036 //Feature: inter device parallelism 00037 this->inter_device_parallel_time_cpu_+=w.inter_device_parallel_time_cpu_; 00038 this->inter_device_parallel_time_gpu_+=w.inter_device_parallel_time_gpu_; 00039 00040 if(this->isolated_execution_time_of_algorithms_.empty()){ 00041 this->isolated_execution_time_of_algorithms_=w.isolated_execution_time_of_algorithms_; 00042 //std::cout << w.offline_algorithms.size() << std::endl; 00043 for(unsigned int i=0;i<w.offline_algorithms.size();i++){ 00044 //std::cout << w.offline_algorithms[i].getAlgorithmName() << std::endl; 00045 this->names_of_algorithms_.push_back(w.offline_algorithms[i].getAlgorithmName()); 00046 } 00047 //exit(-1); 00048 return true; 00049 } 00050 assert(this->isolated_execution_time_of_algorithms_.size() == w.isolated_execution_time_of_algorithms_.size()); 00051 assert(this->names_of_algorithms_.size()==this->isolated_execution_time_of_algorithms_.size()); 00052 for(unsigned int i=0;i<w.offline_algorithms.size();i++){ 00053 this->isolated_execution_time_of_algorithms_[i] += w.isolated_execution_time_of_algorithms_[i]; 00054 } 00055 return true; 00056 } 00057 00058 void StatisticsGatherer::printReport() const throw(){ 00059 //Feature: overhead tracking 00060 double execution_time_of_real_model_with_overhead = execution_time_of_real_model_; 00061 execution_time_of_real_model_with_overhead += total_time_for_overhead_of_addObservation_; 00062 execution_time_of_real_model_with_overhead += total_time_for_overhead_of_getOptimalAlgorithm_; 00063 00064 //Feature: inter device parallelism 00065 double response_time_with_inter_device_parallelism=std::max(inter_device_parallel_time_cpu_, inter_device_parallel_time_gpu_); 00066 response_time_with_inter_device_parallelism+=total_time_for_overhead_of_addObservation_; 00067 response_time_with_inter_device_parallelism+=total_time_for_overhead_of_getOptimalAlgorithm_; 00068 00069 std::cout << "================================================================================" << std::endl 00070 << "Global Report for operation "<< operation_name_ << ": " << std::endl 00071 << "Number of correct decisions: " << number_of_right_decisions_ << " " 00072 << "Number of total decisions: " << number_of_total_decisions_ << std::endl 00073 << "Precision (Hitrate): " << double(number_of_right_decisions_)/number_of_total_decisions_ << std::endl 00074 << "--------------------------------------------------------------------------------" << std::endl 00075 << "Execution time for workload of ideal model: " << execution_time_of_ideal_model_ << "ns" << std::endl 00076 << "Execution time for workload of real model (without overhead): " << execution_time_of_real_model_ << "ns (model quality: " 00077 << execution_time_of_ideal_model_/execution_time_of_real_model_ << ")" << std::endl 00078 << "Execution time for workload of real model (with overhead): " << execution_time_of_real_model_with_overhead << "ns (model quality: " 00079 << execution_time_of_ideal_model_/execution_time_of_real_model_with_overhead << ")" << std::endl 00080 << "--------------------------------------------------------------------------------" << std::endl 00081 << "Overhead time for workload of real model (addObservation): " << total_time_for_overhead_of_addObservation_ << "ns" << std::endl 00082 << "Overhead time for workload of real model (getOptimalAlgorithm): " << total_time_for_overhead_of_getOptimalAlgorithm_ << "ns" << std::endl 00083 << "Total Overhead time for workload of real model: " << total_time_for_overhead_of_addObservation_+total_time_for_overhead_of_getOptimalAlgorithm_ << "ns" << std::endl 00084 << "Precentaged Overhead of total time of real model for workload: " << ((total_time_for_overhead_of_addObservation_+total_time_for_overhead_of_getOptimalAlgorithm_)/execution_time_of_real_model_)*100 << "%" << std::endl 00085 00086 //Feature: inter device parallelism 00087 << "--------------------------------------------------------------------------------" << std::endl 00088 << "Execution Time spend on CPU: " << inter_device_parallel_time_cpu_ << "ns" << std::endl 00089 << "Execution Time spend on GPU: " << inter_device_parallel_time_gpu_ << "ns" << std::endl 00090 << "Response Time with Decision Model (including overhead): " << response_time_with_inter_device_parallelism << "ns" << std::endl 00092 << "(approximative) Ideal Response Time: " << ((inter_device_parallel_time_cpu_+inter_device_parallel_time_gpu_)/2) << std::endl 00093 << "--------------------------------------------------------------------------------" << std::endl 00094 << "CPU Utilization: " << inter_device_parallel_time_cpu_/(inter_device_parallel_time_cpu_+inter_device_parallel_time_gpu_) << "%" << std::endl 00095 << "GPU Utilization: " << inter_device_parallel_time_gpu_/(inter_device_parallel_time_cpu_+inter_device_parallel_time_gpu_) << "%" << std::endl 00096 << "================================================================================" << std::endl; 00097 00098 assert(this->names_of_algorithms_.size()==this->isolated_execution_time_of_algorithms_.size()); 00099 for(unsigned int i=0;i<names_of_algorithms_.size();i++){ 00100 std::cout << "Execution time for workload for model that uses only algorithm " << names_of_algorithms_[i] << ": " 00101 << isolated_execution_time_of_algorithms_[i] << "ns (model quality: " << execution_time_of_ideal_model_/isolated_execution_time_of_algorithms_[i] 00102 << ")" << std::endl; 00103 //Feature: inter device parallelism 00104 std::cout << "Speedup compared to Algorithm " << names_of_algorithms_[i] << ": " << isolated_execution_time_of_algorithms_[i]/response_time_with_inter_device_parallelism << std::endl 00105 << "Overall Improvement using decision model (saved time): "<< ((isolated_execution_time_of_algorithms_[i]-response_time_with_inter_device_parallelism)/isolated_execution_time_of_algorithms_[i])*100 << "% (" << isolated_execution_time_of_algorithms_[i]-response_time_with_inter_device_parallelism << "ns)" << std::endl 00106 //<< "Ideal Speedup compared to Algorithm " << names_of_algorithms_[i] << ": " << isolated_execution_time_of_algorithms_[i]/((inter_device_parallel_time_cpu_+inter_device_parallel_time_gpu_)/2) << std::endl 00107 << "--------------------------------------------------------------------------------" << std::endl; 00108 } 00109 00110 } 00111 00112 }; //end namespace core 00113 }; //end namespace hype 00114