由于在內網發送http請求同一個token會限制次數,所以很容易達到網關流量上限。
業務中使用了多線程并發,一個線程發起一次http請求,得到正確結果后返回。這里采用的策略是,如果解析出來達到流量上限,那么該線程休眠一段時間,然后重試請求,如果還是失敗,那么繼續休眠,每次休眠的時間隨著重試輪次增加:
# 探測是否觸及網關流量上限
def probe_func(m_url, m_headers, m_json, m_timeout):json_rep = requests.post(url = m_url, headers = m_headers,json = m_json,timeout = m_timeout)zhiyan_data = json_rep.json()if(zhiyan_data['code'] != 0):return Noneelse:return json_rep# 解析數據包,不涉及probe_func中的檢測內容
def parse(json_rep, room_name, metric_name):if json_rep == None: logging.info(room_name + " json_rep == None")return 0if (json_rep.content and json_rep.status_code != 204 and json_rep.headers["content-type"].strip().startswith("application/json")):zhiyan_data = json_rep.json()if len(zhiyan_data['data']) == 0:logging.warning(zhiyan_data['日志信息拉取無結果'])return 0else:res = zhiyan_data['data']['chart_info'][0]['key_data_list'][3]['current']logging.info(room_name + str(res))if str(res) == "None":logging.warning(room_name + ":拉取zhiyan_data:" + metric_name + " 出現了問題,拉取數據為None")return 0else:return reselse:return 0# 具有可靠性地獲取數據
def request_post_reliable(m_url, m_headers, m_json, m_timeout):sleep_time_s = 1sleep_time_max = 60res = probe_func(m_url, m_headers, m_json, m_timeout)# 如果探測失敗則線程睡眠一段時間后再嘗試while (res == None):logging.info("探測失敗,線程睡眠"+str(sleep_time_s)+"秒")time.sleep(sleep_time_s)tmp = sleep_time_s * 2if tmp < sleep_time_max:sleep_time_s = tmpelse:sleep_time_s = sleep_time_maxlogging.info("睡眠結束,線程重新探測")res = probe_func(m_url, m_headers, m_json, m_timeout)# 直到探測成功,返回正確結果return res