目錄
- 簡單版本1
- 優化版本1
- 優化版本2
對于QPS要求很高或者對性能有一定要求的服務器程序,同步寫日志會對服務的關鍵性邏輯的快速執行和及時響應帶來一定的性能損失,因為寫日志時等待磁盤IO完成工作也需要一定時間。為了減少這種損失,一般采用異步寫日志。
本質上仍然是一個生產者與消費者模型,產生日志的線程是生產者,將日志寫入文件的線程是消費者。
如果有多個消費者線程,可能存在寫日志的時間順序錯位,所以一般將日志消費者線程數量設置為1.
簡單版本1
下面給出一個簡單的版本:
#include <iostream>
#include <thread>
#include <mutex>
#include <list>
#include <string>
#include <sstream>
#include <vector>
// 保護隊列的mutex
std::mutex log_mutex;
std::list<std::string> cached_logs;
FILE* log_file = nullptr;bool init_log_file()
{// 以追加的模式寫入文件,如果文件不存在,則創建log_file = fopen("my.log","a+");return log_file != nullptr;
}void uninit_log_file()
{if (log_file != nullptr)fclose(log_file);
}bool write_log_tofile(const std::string& line)
{if (log_file == nullptr)return false;if (fwrite((void *)line.c_str(), 1, line.length(), log_file) != line.length())return false;// 將日志flush到文件中fflush(log_file);return true;
}void log_producer()
{int index = 0;while (true) {++index;std::ostringstream os;os << "This is log, index :" << index << ", producer threadID:" << std::this_thread::get_id() << "\n";{std::lock_guard<std::mutex> lock(log_mutex);cached_logs.emplace_back(os.str());}// 生產出一個log之后,休眠100ms再生產std::chrono::milliseconds duration(100);std::this_thread::sleep_for(duration);}
}void log_consumer()
{std::string line;while (true) {{std::lock_guard<std::mutex> lock(log_mutex);if (!cached_logs.empty()) {line = cached_logs.front();cached_logs.pop_front();}}// 如果取出來的行為空,說明隊列里面是空的,消費者休眠一會兒再去消費if (line.empty()) {std::chrono::milliseconds duration(1000);std::this_thread::sleep_for(duration);continue;}// 否則將line寫入到log_file中write_log_tofile(line);line.clear();}
}int main(int argc, char* argv[])
{if (!init_log_file()) {std::cout << "init log file error." << std::endl;return -1;}std::thread log_producer1(log_producer);std::thread log_producer2(log_producer);std::thread log_producer3(log_producer);std::thread log_consumer1(log_consumer);std::thread log_consumer2(log_consumer);std::thread log_consumer3(log_consumer);log_producer1.join();log_producer2.join();log_producer3.join();log_consumer1.join();log_consumer2.join();log_consumer3.join();uninit_log_file();return 0;
}
效果:
This is log, index :1, producer threadID:139910877185792
This is log, index :1, producer threadID:139910868793088
This is log, index :1, producer threadID:139910860400384
This is log, index :2, producer threadID:139910877185792
This is log, index :2, producer threadID:139910860400384
This is log, index :3, producer threadID:139910877185792
This is log, index :3, producer threadID:139910860400384
This is log, index :2, producer threadID:139910868793088
This is log, index :3, producer threadID:139910868793088
This is log, index :4, producer threadID:139910877185792
This is log, index :4, producer threadID:139910860400384
This is log, index :4, producer threadID:139910868793088
This is log, index :5, producer threadID:139910877185792
This is log, index :5, producer threadID:139910860400384
This is log, index :5, producer threadID:139910868793088
This is log, index :6, producer threadID:139910860400384
This is log, index :6, producer threadID:139910868793088
This is log, index :7, producer threadID:139910877185792
This is log, index :7, producer threadID:139910860400384
This is log, index :7, producer threadID:139910868793088
This is log, index :8, producer threadID:139910877185792
This is log, index :8, producer threadID:139910860400384
This is log, index :8, producer threadID:139910868793088
This is log, index :9, producer threadID:139910877185792
This is log, index :9, producer threadID:139910860400384
This is log, index :9, producer threadID:139910868793088
This is log, index :6, producer threadID:139910877185792
This is log, index :10, producer threadID:139910860400384
This is log, index :10, producer threadID:139910868793088
This is log, index :10, producer threadID:139910877185792
This is log, index :11, producer threadID:139910860400384
This is log, index :11, producer threadID:139910868793088
This is log, index :12, producer threadID:139910860400384
This is log, index :12, producer threadID:139910868793088
This is log, index :11, producer threadID:139910877185792
This is log, index :12, producer threadID:139910877185792
This is log, index :13, producer threadID:139910860400384
This is log, index :13, producer threadID:139910868793088
This is log, index :13, producer threadID:139910877185792
This is log, index :14, producer threadID:139910868793088
This is log, index :14, producer threadID:139910877185792
This is log, index :15, producer threadID:139910868793088
This is log, index :15, producer threadID:139910877185792
This is log, index :14, producer threadID:139910860400384
This is log, index :15, producer threadID:139910860400384
This is log, index :16, producer threadID:139910877185792
This is log, index :16, producer threadID:139910860400384
This is log, index :17, producer threadID:139910877185792
This is log, index :17, producer threadID:139910860400384
This is log, index :17, producer threadID:139910868793088
This is log, index :16, producer threadID:139910868793088
This is log, index :18, producer threadID:139910877185792
This is log, index :19, producer threadID:139910860400384
This is log, index :19, producer threadID:139910877185792
This is log, index :19, producer threadID:139910868793088
This is log, index :20, producer threadID:139910860400384
This is log, index :18, producer threadID:139910860400384
This is log, index :20, producer threadID:139910877185792
This is log, index :18, producer threadID:139910868793088
This is log, index :20, producer threadID:139910868793088
This is log, index :21, producer threadID:139910868793088
This is log, index :21, producer threadID:139910877185792
This is log, index :21, producer threadID:139910860400384
This is log, index :22, producer threadID:139910860400384
This is log, index :22, producer threadID:139910877185792
This is log, index :22, producer threadID:139910868793088
This is log, index :23, producer threadID:139910860400384
This is log, index :23, producer threadID:139910877185792
This is log, index :23, producer threadID:139910868793088
This is log, index :24, producer threadID:139910860400384
This is log, index :24, producer threadID:139910868793088
This is log, index :24, producer threadID:139910877185792
This is log, index :25, producer threadID:139910860400384
This is log, index :25, producer threadID:139910868793088
This is log, index :25, producer threadID:139910877185792
This is log, index :26, producer threadID:139910868793088
This is log, index :26, producer threadID:139910860400384
This is log, index :26, producer threadID:139910877185792
This is log, index :27, producer threadID:139910868793088
This is log, index :27, producer threadID:139910860400384
This is log, index :27, producer threadID:139910877185792
This is log, index :28, producer threadID:139910868793088
This is log, index :28, producer threadID:139910877185792
This is log, index :28, producer threadID:139910860400384
This is log, index :29, producer threadID:139910877185792
This is log, index :29, producer threadID:139910868793088
This is log, index :29, producer threadID:139910860400384
This is log, index :30, producer threadID:139910877185792
This is log, index :30, producer threadID:139910868793088
This is log, index :30, producer threadID:139910860400384
This is log, index :31, producer threadID:139910868793088
This is log, index :31, producer threadID:139910877185792
This is log, index :31, producer threadID:139910860400384
This is log, index :32, producer threadID:139910877185792
This is log, index :32, producer threadID:139910868793088
This is log, index :32, producer threadID:139910860400384
This is log, index :33, producer threadID:139910860400384
This is log, index :33, producer threadID:139910868793088
This is log, index :33, producer threadID:139910877185792
This is log, index :34, producer threadID:139910860400384
This is log, index :34, producer threadID:139910877185792
This is log, index :34, producer threadID:139910868793088
This is log, index :35, producer threadID:139910860400384
This is log, index :35, producer threadID:139910868793088
This is log, index :35, producer threadID:139910877185792
This is log, index :36, producer threadID:139910877185792
This is log, index :36, producer threadID:139910868793088
This is log, index :37, producer threadID:139910860400384
This is log, index :37, producer threadID:139910877185792
This is log, index :37, producer threadID:139910868793088
This is log, index :38, producer threadID:139910860400384
This is log, index :38, producer threadID:139910877185792
This is log, index :38, producer threadID:139910868793088
This is log, index :39, producer threadID:139910860400384
This is log, index :39, producer threadID:139910877185792
This is log, index :39, producer threadID:139910868793088
This is log, index :40, producer threadID:139910860400384
This is log, index :40, producer threadID:139910877185792
This is log, index :40, producer threadID:139910868793088
This is log, index :36, producer threadID:139910860400384
This is log, index :41, producer threadID:139910860400384
This is log, index :41, producer threadID:139910877185792
This is log, index :42, producer threadID:139910860400384
This is log, index :42, producer threadID:139910877185792
This is log, index :42, producer threadID:139910868793088
This is log, index :43, producer threadID:139910860400384
This is log, index :43, producer threadID:139910868793088
This is log, index :43, producer threadID:139910877185792
This is log, index :44, producer threadID:139910860400384
This is log, index :44, producer threadID:139910868793088
This is log, index :44, producer threadID:139910877185792
This is log, index :45, producer threadID:139910860400384
This is log, index :45, producer threadID:139910868793088
This is log, index :45, producer threadID:139910877185792
This is log, index :46, producer threadID:139910860400384
This is log, index :46, producer threadID:139910868793088
This is log, index :46, producer threadID:139910877185792
This is log, index :47, producer threadID:139910860400384
This is log, index :47, producer threadID:139910868793088
This is log, index :47, producer threadID:139910877185792
This is log, index :48, producer threadID:139910860400384
This is log, index :48, producer threadID:139910868793088
This is log, index :48, producer threadID:139910877185792
This is log, index :49, producer threadID:139910860400384
This is log, index :49, producer threadID:139910877185792
This is log, index :49, producer threadID:139910868793088
This is log, index :50, producer threadID:139910877185792
This is log, index :50, producer threadID:139910860400384
This is log, index :50, producer threadID:139910868793088
This is log, index :41, producer threadID:139910868793088
優化版本1
上面的代碼,在當前緩存隊列沒有日志記錄的時候,消費日志線程會做無用功。
這里可以使用條件變量,如果當前隊列中沒有日志記錄,就將日志消費者線程掛起;
當產生了新的日志后,signal條件變量,喚醒消費線程,將被日志從隊列中取出,并寫入文件。
下面是主要修改
#include <condition_variable>
std::condition_variable log_cv;
void log_producer()
{int index = 0;while (true) {++index;std::ostringstream os;os << "This is log, index :" << index << ", producer threadID:" << std::this_thread::get_id() << "\n";{std::lock_guard<std::mutex> lock(log_mutex);cached_logs.emplace_back(os.str());log_cv.notify_one();}// 生產出一個log之后,休眠100ms再生產std::chrono::milliseconds duration(100);std::this_thread::sleep_for(duration);}
}void log_consumer()
{std::string line;while (true) {{std::unique_lock<std::mutex> lock(log_mutex);while (cached_logs.empty()) {// 無限等待log_cv.wait(lock);}line = cached_logs.front();cached_logs.pop_front();}// 如果取出來的行為空,說明隊列里面是空的,消費者休眠一會兒再去消費if (line.empty()) {std::chrono::milliseconds duration(1000);std::this_thread::sleep_for(duration);continue;}// 否則將line寫入到log_file中write_log_tofile(line);line.clear();}
}
優化版本2
還可以使用信號量來設計異步日志系統。
信號量是帶有資源計數的線程同步對象,每產生一條日志,就將信號量資源計數+1,日志消費線程默認等待這個信號量是否signal,如果signal,就喚醒一個日志消費線程,信號量計數自動-1。如果當前資源計數為0,則將消費者自動掛起。
C++現在還沒有提供不同平臺的信號量對象封裝,這里以Linux系統為例:
明顯能感覺運行相同時間,寫入的日志更多了。。
#include <unistd.h>
#include <iostream>
#include <thread>
#include <mutex>
#include <list>
#include <string>
#include <sstream>
#include <semaphore.h>// 保護隊列的mutex
pthread_mutex_t log_mutex = PTHREAD_MUTEX_INITIALIZER;
sem_t log_semphore;
std::list<std::string> cached_logs;
FILE* log_file = nullptr;bool init()
{pthread_mutex_init(&log_mutex, nullptr);// 初始信號量資源數量為0sem_init(&log_semphore, 0, 0);// 以追加的模式寫入文件,如果文件不存在,則創建log_file = fopen("my.log","a++");return log_file != nullptr;
}void uninit()
{pthread_mutex_destroy(&log_mutex);sem_destroy(&log_semphore);if (log_file != nullptr)fclose(log_file);
}bool write_log_tofile(const std::string& line)
{if (log_file == nullptr)return false;// 對于比較長的日志應該分段寫入,因為單次寫入可能只寫入部分內容// 這里邏輯從簡if (fwrite((void *)line.c_str(), 1, line.length(), log_file) != line.length())return false;// 將日志flush到文件中fflush(log_file);return true;
}void* log_producer(void* arg)
{int index = 0;while (true) {++index;std::ostringstream os;os << "This is log, index :" << index << ", producer threadID:" << std::this_thread::get_id() << "\n";pthread_mutex_lock(&log_mutex);cached_logs.emplace_back(os.str());pthread_mutex_unlock(&log_mutex);sem_post(&log_semphore);usleep(1000);}
}void* log_consumer(void* arg)
{std::string line;while (true) {// 無限等待sem_wait(&log_semphore);pthread_mutex_lock(&log_mutex);if (!cached_logs.empty()) {line = cached_logs.front();cached_logs.pop_front();}pthread_mutex_unlock(&log_mutex);// 如果取出來的行為空,說明隊列里面是空的,消費者休眠一會兒再去消費if (line.empty()) {sleep(1);continue;}// 否則將line寫入到log_file中write_log_tofile(line);line.clear();}
}int main(int argc, char* argv[])
{if (!init()) {std::cout << "init log file error." << std::endl;return -1;}// 創建三個生產日志線程pthread_t producer_thread_id[3];for (size_t i = 0; i < sizeof(producer_thread_id) / sizeof(producer_thread_id[0]); ++i) {pthread_create(&producer_thread_id[i], NULL, log_producer, NULL);}// 創建三個消費日志線程pthread_t consumer_thread_id[3];for (size_t i = 0; i < sizeof(consumer_thread_id) / sizeof(consumer_thread_id[0]); ++i) {pthread_create(&consumer_thread_id[i], NULL, log_consumer, NULL);}// 等待生產者線程退出for (size_t i = 0; i < sizeof(producer_thread_id) / sizeof(producer_thread_id[0]); ++i) {pthread_join(producer_thread_id[i], NULL);}// 等待消費者線程退出for (size_t i = 0; i < sizeof(consumer_thread_id) / sizeof(consumer_thread_id[0]); ++i) {pthread_join(consumer_thread_id[i], NULL);}uninit();return 0;
}