閱讀了librtmp的源碼,簡單記錄下。
首先補充下AMF格式基本知識
1 AMF格式
????????AMF是Action Message Format(動作消息格式)的簡寫,它是一種二進制的數據格式。它的設計是為了把actionscript里面的數據(包括Object, Array, Boolean, Number等)序列化成二進制數據,然后把這段數據隨意發送給其他接收方程序,比如發給遠程的服務器,在遠程服務器那邊,可以把這段數據給還原出來,以此達到一個數據傳輸的作用。
1.1 AMFObject
?AMF分成兩種: 1. AMF0,基本的數據轉換規則; 2. AMF3,是AMF0的擴展
AMF0數據類型:
// AMF0數據類型
typedef enum
{AMF_NUMBER = 0, // 數字(double)AMF_BOOLEAN, // 布爾AMF_STRING, // 字符串AMF_OBJECT, // 對象AMF_MOVIECLIP, // 保留,未使用AMF_NULL, // nullAMF_UNDEFINED, // 未定義AMF_REFERENCE, // 引用AMF_ECMA_ARRAY, // 數組AMF_OBJECT_END, // 對象結束AMF_STRICT_ARRAY, // 嚴格的數組AMF_DATE, // 日期AMF_LONG_STRING, // 長字符串AMF_UNSUPPORTED, // 未支持AMF_RECORDSET, // 保留,未使用AMF_XML_DOC, // xml文檔AMF_TYPED_OBJECT, // 有類型的對象AMF_AVMPLUS, // 需要擴展到AMF3AMF_INVALID = 0xff // 無效的
}AMFDataType;
AMF3數據類型:
// AMF3數據類型
typedef enum
{AMF3_UNDEFINED = 0, // 未定義AMF3_NULL, // nullAMF3_FALSE, // falseAMF3_TRUE, // trueAMF3_INTEGER, // 數字intAMF3_DOUBLE, // doubleAMF3_STRING, // 字符串AMF3_XML_DOC, // xml文檔AMF3_DATE, // 日期AMF3_ARRAY, // 數組AMF3_OBJECT, // 對象AMF3_XML, // xmlAMF3_BYTE_ARRAY // 字節數組
} AMF3DataType;
AMF定義了自己的字符串類型:
// AMF自定義的字符串typedef struct AVal{char *av_val;int av_len;} AVal;// AVal的快速初始化
#define AVC(str) {str,sizeof(str)-1}
// 比較AVal字符串
#define AVMATCH(a1,a2) ((a1)->av_len == (a2)->av_len && !memcmp((a1)->av_val,(a2)->av_val,(a1)->av_len))
AMFObject表示AMF對象,o_num 代表 o_props的個數, 一個對象內部可以包含N個對象屬性
// AMF對象, 就是由一系列的屬性構成的
typedef struct AMFObject
{int o_num; // 屬性數目;struct AMFObjectProperty *o_props; // 屬性數組;
} AMFObject;
AMFObjectProperty表示AMF對象屬性,即key-value鍵值對。p_name表示key;p_type表示value的類型;p_vu表示value的數值。
// AMF對象的屬性;
typedef struct AMFObjectProperty
{AVal p_name; // 屬性名稱;AMFDataType p_type; // 屬性類型;union{double p_number;AVal p_aval;AMFObject p_object;} p_vu; // 屬性數值;int16_t p_UTCoffset; // UTC偏移;
} AMFObjectProperty;
p_vu設置為聯合體的目的:
當p_type為number時, m_vu取值double類型 p_number;
當p_type為string時,??? m_vu取值AVal類型 p_aval;
當p_type為object時,?? m_vu取值AMFObject類型 p_object。
1.2 編碼格式
浮點數:
0x00 + 8字節浮點數
Bool型:
0x01 + 1字節Bool值
短字符串:
0x02 + 2字節長度 + 字符串
長字符串
0x02 + 4字節長度 + 字符串
對象:
0x03 + 屬性1名稱長度 + 屬性1名稱 + 1字節屬性1類型 + n字節屬性1值 + 屬性2名稱長度 + 屬性2名稱 + 1字節屬性2類型 + n字節屬性2值 + 3字節結尾標志
2 librtmp源碼分析
2.1?RTMP_ParseURL
解析URL,得到協議名稱(protocol),主機名稱(host),應用程序名稱(app)
2.2?HandShake(握手)
handshake.h文件里面的HandShake有些代碼是處理rtmp加密版協議,考慮普通的rtmp協議,分析rtmp.c文件里的HandShake
static int
HandShake(RTMP *r, int FP9HandShake)
{int i;uint32_t uptime, suptime;int bMatch;char type;char clientbuf[RTMP_SIG_SIZE + 1], *clientsig = clientbuf + 1;char serversig[RTMP_SIG_SIZE];clientbuf[0] = 0x03; /* not encrypted */uptime = htonl(RTMP_GetTime());memcpy(clientsig, &uptime, 4);memset(&clientsig[4], 0, 4);#ifdef _DEBUGfor (i = 8; i < RTMP_SIG_SIZE; i++)clientsig[i] = 0xff;
#elsefor (i = 8; i < RTMP_SIG_SIZE; i++)clientsig[i] = (char)(rand() % 256);
#endifif (!WriteN(r, clientbuf, RTMP_SIG_SIZE + 1))return FALSE;if (ReadN(r, &type, 1) != 1) /* 0x03 or 0x06 */return FALSE;RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type);if (type != clientbuf[0])RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d",__FUNCTION__, clientbuf[0], type);if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)return FALSE;/* decode server response */memcpy(&suptime, serversig, 4);suptime = ntohl(suptime);RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, suptime);RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__,serversig[4], serversig[5], serversig[6], serversig[7]);/* 2nd part of handshake */if (!WriteN(r, serversig, RTMP_SIG_SIZE))return FALSE;if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)return FALSE;bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0);if (!bMatch){RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__);}return TRUE;
}
1)填充C0=0x3;C1填充時間戳和隨機數共1536byte
2)發送C0 C1給服務器
3)從服務器讀取S0比對是否為0x3,從服務器讀取S1
4)把S1作為C2發送給服務器
5)從服務器讀取S2,比對C1和S2,相同則握手成功
2.3?RTMP_Connect
建立NetConnection
主要調用了兩個函數,RTMP_Connect0和RTMP_Connect1
RTMP_Connect0
建立Socket連接
RTMP_Connect1
建立RTMP連接,HandShake完成握手,SendConnectPacket發送"connect"命令建立RTMP連接
SendConnectPacket
填充packet頭
m_nChannel --> chunk Stream ID
m_headerType -->?chunk header中的basic header中的fmt
m_packetType -->?Message Type ID,填充的0x14,表示命令消息
m_nTimeStamp --> 時間戳
m_nInfoField2 -->?chunk fmt為0時,header的最后四個字節,即Message Stream ID
m_hasAbsTimestamp -->?時間戳是絕對的還是相對的,即chunk type為0時為絕對時間戳,其他類型時為時間戳增量
packet.m_nChannel = 0x03; /* control channel (invoke) */packet.m_headerType = RTMP_PACKET_SIZE_LARGE;packet.m_packetType = RTMP_PACKET_TYPE_INVOKE;packet.m_nTimeStamp = 0;packet.m_nInfoField2 = 0;packet.m_hasAbsTimestamp = 0;packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;
將av_x串化為"x",如:av_connect串化為"connect"
#define SAVC(x) static const AVal av_##x = AVC(#x)SAVC(app);
SAVC(connect);
SAVC(flashVer);
SAVC(swfUrl);
SAVC(pageUrl);
SAVC(tcUrl);
SAVC(fpad);
SAVC(capabilities);
SAVC(audioCodecs);
SAVC(videoCodecs);
SAVC(videoFunction);
SAVC(objectEncoding);
SAVC(secureToken);
SAVC(secureTokenResponse);
SAVC(type);
SAVC(nonprivate);
按照RTMP協議規范?7.2.1.1 發送connect命令
命令名"connect"AMF編碼
enc = AMF_EncodeString(enc, pend, &av_connect);?
Transaction ID AMF編碼
enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);?
設置connect 命令中使用的名值對對象
對象格式起始(0x3)
*enc++ = AMF_OBJECT;
屬性"app",名字為r->Link.app
enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app);?
屬性"flashver"
enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer);
屬性"swfUrl"
enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl);
屬性"tcUrl"
enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl);
屬性"fpad"
enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE);
屬性"capabilities"
enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0);
屬性"audioCodecs"
enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs);
屬性"videoCodecs"
enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs);
屬性"videoFunction"
enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0);
屬性"pageUrl"
enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl);
屬性"objectEncoding"
enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding);
對象格式結束(0x00 0x00 0x09)
*enc++ = 0;
*enc++ = 0;? ? ? ? ? ??
*enc++ = AMF_OBJECT_END;
wireshark抓包示意
2.4?RTMP_ConnectStream
在NetConnection基礎上建立一個NetStream
主要調用了兩個函數,RTMP_ReadPacket和RTMP_ClientPacket
RTMP_ReadPacket
從socket接收消息塊(chunk),解析chunk存放在RTMPPacket。此處是塊(chunk)而不是消息(message),因為消息在網絡傳輸會分割成塊。一個消息可能封裝多個塊,當所有塊讀取完再處理消息。
按照RTMP協議規范?5.3.1解析chunk
int
RTMP_ReadPacket(RTMP *r, RTMPPacket *packet)
{uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = { 0 };char *header = (char *)hbuf;int nSize, hSize, nToRead, nChunk;int didAlloc = FALSE;int extendedTimestamp;RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket);if (ReadN(r, (char *)hbuf, 1) == 0){RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header", __FUNCTION__);return FALSE;}packet->m_headerType = (hbuf[0] & 0xc0) >> 6; // fmt(2bit)packet->m_nChannel = (hbuf[0] & 0x3f); // 塊流ID(2-63)header++;if (packet->m_nChannel == 0) // 塊流ID第一個字節為0,表示塊流ID占2個字節,表示ID的范圍是64-319(第二個字節 + 64){if (ReadN(r, (char *)&hbuf[1], 1) != 1){RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 2nd byte",__FUNCTION__);return FALSE;}packet->m_nChannel = hbuf[1];packet->m_nChannel += 64;header++;}else if (packet->m_nChannel == 1) // 塊流ID第一個字節為1,表示塊流ID占3個字節,表示ID范圍是64-65599(第三個字節*256 + 第二個字節 + 64){int tmp;if (ReadN(r, (char *)&hbuf[1], 2) != 2){RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 3nd byte",__FUNCTION__);return FALSE;}tmp = (hbuf[2] << 8) + hbuf[1];packet->m_nChannel = tmp + 64;RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel);header += 2;}nSize = packetSize[packet->m_headerType]; //ChunkMsgHeader 4種類型,大小分別11/7/3/0,數組值多加了1if (packet->m_nChannel >= r->m_channelsAllocatedIn){int n = packet->m_nChannel + 10;int *timestamp = realloc(r->m_channelTimestamp, sizeof(int) * n);RTMPPacket **packets = realloc(r->m_vecChannelsIn, sizeof(RTMPPacket*) * n);if (!timestamp)free(r->m_channelTimestamp);if (!packets)free(r->m_vecChannelsIn);r->m_channelTimestamp = timestamp;r->m_vecChannelsIn = packets;if (!timestamp || !packets) {r->m_channelsAllocatedIn = 0;return FALSE;}memset(r->m_channelTimestamp + r->m_channelsAllocatedIn, 0, sizeof(int) * (n - r->m_channelsAllocatedIn));memset(r->m_vecChannelsIn + r->m_channelsAllocatedIn, 0, sizeof(RTMPPacket*) * (n - r->m_channelsAllocatedIn));r->m_channelsAllocatedIn = n;}if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */packet->m_hasAbsTimestamp = TRUE; // 11字節完整ChunkMsgHeader的TimeStamp是絕對時間戳else if (nSize < RTMP_LARGE_HEADER_SIZE){ /* using values from the last message of this channel */if (r->m_vecChannelsIn[packet->m_nChannel])memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel],sizeof(RTMPPacket));}nSize--; // 真實的ChunkMsgHeader的大小,此處減1是因為前面獲取包類型的時候多加了1if (nSize > 0 && ReadN(r, header, nSize) != nSize){RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header. type: %x",__FUNCTION__, (unsigned int)hbuf[0]);return FALSE;}hSize = nSize + (header - (char *)hbuf); // 目前已經讀取的字節數 = basic header + chunk msg headerif (nSize >= 3) // chunk msg header為11/7/3字節,fmt類型值為0/1/2{packet->m_nTimeStamp = AMF_DecodeInt24(header); // 首部前3個字節為timestamp/*RTMP_Log(RTMP_LOGDEBUG, "%s, reading RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */if (nSize >= 6) // chunk msg header為11或7字節,fmt類型值為0或1{packet->m_nBodySize = AMF_DecodeInt24(header + 3); // msg lengthpacket->m_nBytesRead = 0;if (nSize > 6){packet->m_packetType = header[6]; // msg type idif (nSize == 11)packet->m_nInfoField2 = DecodeInt32LE(header + 7); // msg stream id,小端字節序}}}extendedTimestamp = packet->m_nTimeStamp == 0xffffff; //timestamp為0xffffff,則需要extend timestamp,占4字節if (extendedTimestamp){if (ReadN(r, header + nSize, 4) != 4){RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp",__FUNCTION__);return FALSE;}packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize);hSize += 4;}RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize);if (packet->m_nBodySize > 0 && packet->m_body == NULL){if (!RTMPPacket_Alloc(packet, packet->m_nBodySize)){RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__);return FALSE;}didAlloc = TRUE;packet->m_headerType = (hbuf[0] & 0xc0) >> 6;}nToRead = packet->m_nBodySize - packet->m_nBytesRead;nChunk = r->m_inChunkSize;if (nToRead < nChunk)nChunk = nToRead;/* Does the caller want the raw chunk? */if (packet->m_chunk){packet->m_chunk->c_headerSize = hSize; // 塊頭大小memcpy(packet->m_chunk->c_header, hbuf, hSize); // 填充塊頭數據packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead; // 塊消息數據緩沖區packet->m_chunk->c_chunkSize = nChunk; // 塊大小}// 讀取一個塊大小的數據存入塊消息數據緩沖區if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk){RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet body. len: %u",__FUNCTION__, packet->m_nBodySize);return FALSE;}RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk);packet->m_nBytesRead += nChunk;/* keep the packet as ref for other packets on this channel */if (!r->m_vecChannelsIn[packet->m_nChannel])r->m_vecChannelsIn[packet->m_nChannel] = malloc(sizeof(RTMPPacket));memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(RTMPPacket));if (extendedTimestamp){r->m_vecChannelsIn[packet->m_nChannel]->m_nTimeStamp = 0xffffff;}if (RTMPPacket_IsReady(packet)) // 讀取完畢{/* make packet's timestamp absolute */if (!packet->m_hasAbsTimestamp)packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp;/* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel *//* arrives and requests to re-use some info (small packet header) */r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL;r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0;r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */}else{packet->m_body = NULL; /* so it won't be erased on free */}return TRUE;
}
RTMP_ClientPacket
根據接收到的消息(Message)類型的不同,做出不同的響應
int
RTMP_ClientPacket(RTMP *r, RTMPPacket *packet)
{int bHasMediaPacket = 0;switch (packet->m_packetType){case RTMP_PACKET_TYPE_CHUNK_SIZE: // msg type 0x1 設置塊大小/* chunk size */HandleChangeChunkSize(r, packet);break;case RTMP_PACKET_TYPE_BYTES_READ_REPORT: // msg type 0x3 確認/* bytes read report */RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__);break;case RTMP_PACKET_TYPE_CONTROL: // msg type 0x4 用戶控制/* ctrl */HandleCtrl(r, packet);break;case RTMP_PACKET_TYPE_SERVER_BW: // msg type 0x5 確認窗口大小/* server bw */HandleServerBW(r, packet);break;case RTMP_PACKET_TYPE_CLIENT_BW: // msg type 0x6 設置對端帶寬/* client bw */HandleClientBW(r, packet);break;case RTMP_PACKET_TYPE_AUDIO: // msy type 0x8 音頻/* audio data *//*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */HandleAudio(r, packet);bHasMediaPacket = 1;if (!r->m_mediaChannel)r->m_mediaChannel = packet->m_nChannel;if (!r->m_pausing)r->m_mediaStamp = packet->m_nTimeStamp;break;case RTMP_PACKET_TYPE_VIDEO: // msg type 0x9 視頻/* video data *//*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */HandleVideo(r, packet);bHasMediaPacket = 1;if (!r->m_mediaChannel)r->m_mediaChannel = packet->m_nChannel;if (!r->m_pausing)r->m_mediaStamp = packet->m_nTimeStamp;break;case RTMP_PACKET_TYPE_FLEX_STREAM_SEND: // msg type 0xf AMF3編碼 數據消息/* flex stream send */RTMP_Log(RTMP_LOGDEBUG,"%s, flex stream send, size %u bytes, not supported, ignoring",__FUNCTION__, packet->m_nBodySize);break;case RTMP_PACKET_TYPE_FLEX_SHARED_OBJECT: // msg type 0x10 AMF3編碼 共享對象消息/* flex shared object */RTMP_Log(RTMP_LOGDEBUG,"%s, flex shared object, size %u bytes, not supported, ignoring",__FUNCTION__, packet->m_nBodySize);break;case RTMP_PACKET_TYPE_FLEX_MESSAGE: // msg type 0x11 AMF3編碼 命令消息/* flex message */{RTMP_Log(RTMP_LOGDEBUG,"%s, flex message, size %u bytes, not fully supported",__FUNCTION__, packet->m_nBodySize);/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); *//* some DEBUG code */
#if 0RTMP_LIB_AMFObject obj;int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1);if(nRes < 0) {RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__);/*return; */}obj.Dump();
#endifif (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1)bHasMediaPacket = 2;break;}case RTMP_PACKET_TYPE_INFO: // msg type 0x12 AMF0編碼數據消息/* metadata (notify) */RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %u bytes", __FUNCTION__,packet->m_nBodySize);if (HandleMetadata(r, packet->m_body, packet->m_nBodySize))bHasMediaPacket = 1;break;case RTMP_PACKET_TYPE_SHARED_OBJECT: // msg type 0x13 AMF0編碼共享對象消息RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring",__FUNCTION__);break;case RTMP_PACKET_TYPE_INVOKE: // msg type 0x14 AMF0編碼命令消息/* invoke */RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %u bytes", __FUNCTION__,packet->m_nBodySize);/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1)bHasMediaPacket = 2;break;case RTMP_PACKET_TYPE_FLASH_VIDEO: // msg type 0x16 統計消息{/* go through FLV packets and handle metadata packets */unsigned int pos = 0;uint32_t nTimeStamp = packet->m_nTimeStamp;while (pos + 11 < packet->m_nBodySize){uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1); /* size without header (11) and prevTagSize (4) */if (pos + 11 + dataSize + 4 > packet->m_nBodySize){RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!");break;}if (packet->m_body[pos] == 0x12){HandleMetadata(r, packet->m_body + pos + 11, dataSize);}else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9){nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4);nTimeStamp |= (packet->m_body[pos + 7] << 24);}pos += (11 + dataSize + 4);}if (!r->m_pausing)r->m_mediaStamp = nTimeStamp;/* FLV tag(s) *//*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */bHasMediaPacket = 1;break;}default:RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__,packet->m_packetType);
#ifdef _DEBUGRTMP_LogHex(RTMP_LOGDEBUG, packet->m_body, packet->m_nBodySize);
#endif}return bHasMediaPacket;
}
主要關注msg type 0x14 的消息,AMF0編碼的命令消息。這在RTMP連接中是非常常見的,比如說各種控制命令:播放,暫停,停止等。消息處理函數HandleInvoke
HandleInvoke
主要分析"createStream"流程
RTMP_Connect建立網絡連接后,服務器返回"_result"給客戶端
如果是"connect"的回應,調用RTMP_SendCreateStream,發送"createStream"給服務器,服務器返回"_result"給客戶端
if (AVMATCH(&methodInvoked, &av_connect)){...RTMP_SendCreateStream(r);...}
如果是"createStream"的回應,使能推流調用SendPublish,發送"publish",反之調用SendPlaylist獲取播放列表和SendPlay 發送"play"開始播放流媒體數據
else if (AVMATCH(&methodInvoked, &av_createStream)){r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3));if (r->Link.protocol & RTMP_FEATURE_WRITE){SendPublish(r);}else{if (r->Link.lFlags & RTMP_LF_PLST)SendPlaylist(r);SendPlay(r);RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS);}}
以下是rtmpdump拉流的打印,可以更加清晰的看出代碼的執行流程
2.5?RTMP_Read
FLV層讀取數據,主要調用Read_1_Packet,從網絡上讀取RTMPPacket數據,這個數據是不包含FLV頭的,RTMP_Read函數里增加flv頭,flv header+Previous Tag Size共13字節
static const char flvHeader[] = { 'F', 'L', 'V', 0x01,0x00, /* 0x04 == audio, 0x01 == video */0x00, 0x00, 0x00, 0x09,0x00, 0x00, 0x00, 0x00
};
Read_1_Packet
主要調用RTMP_ReadPacket和RTMP_ClientPacket(),前一個負責從網絡讀取數據,后一個負責處理數據。RTMP_ReadPacket讀取的RTMPPacket數據是FLV的裸數據 Tag Data,Read_1_Packet函數里增加Tag Header
/* audio (0x08), video (0x09) or metadata (0x12) packets :* construct 11 byte header then add rtmp packet's data */if (packet.m_packetType == RTMP_PACKET_TYPE_AUDIO|| packet.m_packetType == RTMP_PACKET_TYPE_VIDEO|| packet.m_packetType == RTMP_PACKET_TYPE_INFO){nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp;prevTagSize = 11 + nPacketLen;*ptr = packet.m_packetType;ptr++;ptr = AMF_EncodeInt24(ptr, pend, nPacketLen);#if 0if(packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) {/* H264 fix: */if((packetBody[0] & 0x0f) == 7) { /* CodecId = H264 */uint8_t packetType = *(packetBody+1);uint32_t ts = AMF_DecodeInt24(packetBody+2); /* composition time */int32_t cts = (ts+0xff800000)^0xff800000;RTMP_Log(RTMP_LOGDEBUG, "cts : %d\n", cts);nTimeStamp -= cts;/* get rid of the composition time */CRTMP::EncodeInt24(packetBody+2, 0);}RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp);}
#endifptr = AMF_EncodeInt24(ptr, pend, nTimeStamp);*ptr = (char)((nTimeStamp & 0xFF000000) >> 24);ptr++;/* stream id */ptr = AMF_EncodeInt24(ptr, pend, 0);}
2.6?RTMP_Write
解析FLV header和Tag header,調用RTMP_SendPacket 發送Tag Data到網絡
RTMP_SendPacket
主要是將Tag Data封裝成chunk 數據,send出去。RTMP_ReadPacket的逆向處理,不過多分析。
2.7 字段注釋
/*表示一個 raw chunk,原始chunkc_header,c_headerSize:保存chunk header的數據和大小c_chunk,c_chunkSize:保存chunk data的數據和大小*/typedef struct RTMPChunk{int c_headerSize;int c_chunkSize;char *c_chunk;char c_header[RTMP_MAX_HEADER_SIZE];} RTMPChunk;/*表示一個Messagem_headerType :表示m_chunk的類型,即chunk header中的basic header中的fmtm_packetType :表示Message Type IDm_hasAbsTimestamp :表示時間戳是絕對的還是相對的,即chunk type為0時為絕對時間戳,其他類型時為時間戳增量m_nChannel :表示chunk Stream IDm_nTimeStamp :時間戳m_nInfoField2 :chunk fmt為0時,header的最后四個字節,即Message Stream IDm_nBodySize :Message的body的尺寸m_nBytesRead :已經讀取到的body的字節數m_chunk :如果不為NULL,表示用戶想要獲取chunk,那么在讀取Message時,會填充這個字段m_body :Message的body*/typedef struct RTMPPacket{uint8_t m_headerType;uint8_t m_packetType;uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */int m_nChannel;uint32_t m_nTimeStamp; /* timestamp */int32_t m_nInfoField2; /* last 4 bytes in a long header */uint32_t m_nBodySize;uint32_t m_nBytesRead;RTMPChunk *m_chunk;char *m_body;} RTMPPacket;/*RTMPSockBuf:RTMP的傳輸層的套接字及其緩存表示一個TCP套接字連接,以及其讀取緩存sb_socket :Socket套接字sb_size :buffer中未處理的字節數量,即緩沖數據的大小sb_start :指向buffer中需要處理的字節,即指向緩沖數據sb_buf :數據讀取緩沖區sb_timedout :套接字是否中斷sb_ssl :SSL相關數據*/typedef struct RTMPSockBuf{int sb_socket;int sb_size; /* number of unprocessed bytes in buffer */char *sb_start; /* pointer into sb_pBuffer of next byte to process */char sb_buf[RTMP_BUFFER_CACHE_SIZE]; /* data read from socket */int sb_timedout;void *sb_ssl;} RTMPSockBuf;/*RTMP的連接參數,即要建立RTMP連接所需的參數集注意:這是由客戶端的用戶提供的這個結構體里的字段的含義和rtmpdump中的選項聯系緊密,可以查看rtmpdump中選項的含義來幫助我們理解它們*/typedef struct RTMP_LNK{AVal hostname; //要連接的服務器的主機名AVal sockshost; //代理主機名稱AVal playpath0; /* parsed from URL */AVal playpath; /* passed in explicitly */AVal tcUrl; //要連接的目標流的URL,默認值為:rtmp[e]://host[:port]/app/playpath,由解析出的各個字段值拼接而成AVal swfUrl; //媒體的SWF播放器的URL,默認不設置任何值AVal pageUrl; //嵌入網頁的媒體的URL,默認不設置任何值AVal app; //要連接的服務器上的appAVal auth;AVal flashVer; //用于運行SWF播放器的Flash插件的版本,默認為“LUX 10,0,32,18"AVal subscribepath; //要訪問的流的名稱AVal usherToken;AVal token; //SecureToken Response中要使用的key,當服務器需要一個SecureToken驗證時使用AVal pubUser;AVal pubPasswd;AMFObject extras;int edepth;int seekTime;int stopTime;#define RTMP_LF_AUTH 0x0001 /* using auth param */
#define RTMP_LF_LIVE 0x0002 /* stream is live */
#define RTMP_LF_SWFV 0x0004 /* do SWF verification */
#define RTMP_LF_PLST 0x0008 /* send playlist before play */
#define RTMP_LF_BUFX 0x0010 /* toggle stream on BufferEmpty msg */
#define RTMP_LF_FTCU 0x0020 /* free tcUrl on close */
#define RTMP_LF_FAPU 0x0040 /* free app on close */int lFlags;int swfAge;int protocol; //服務器的rtmp協議類型int timeout; /* connection timeout in seconds */int pFlags; /* unused, but kept to avoid breaking ABI */unsigned short socksport; //代理主機的端口unsigned short port; //服務器的端口#ifdef CRYPTO
#define RTMP_SWF_HASHLEN 32void *dh; /* for encryption */void *rc4keyIn;void *rc4keyOut;uint32_t SWFSize;uint8_t SWFHash[RTMP_SWF_HASHLEN];char SWFVerificationResponse[RTMP_SWF_HASHLEN+10];
#endif} RTMP_LNK;//RTMP業務層,即建立rtmp流之后對rtmp流做必要操作所需的參數//必要操作如:seek操作,resume操作/* state for read() wrapper */typedef struct RTMP_READ{char *buf; //指向讀取緩沖區char *bufpos; //指向未處理數據的指針unsigned int buflen; //未處理數據的大小uint32_t timestamp; //RTMP流的當前時間戳uint8_t dataType; //RTMP流的數據類型,即是否包含音頻數據和視頻數據 0x04為音頻 0x01為視頻,使用的是flv的表示法uint8_t flags; //解析flag,包含以下幾個值
#define RTMP_READ_HEADER 0x01 //表示是否在當前rtmp流的開頭中插入flv header,默認不會設置這個狀態,置位表示已添加flv header
#define RTMP_READ_RESUME 0x02 //表示是否要進行resume
#define RTMP_READ_NO_IGNORE 0x04
#define RTMP_READ_GOTKF 0x08 //表示是否完成了resume
#define RTMP_READ_GOTFLVK 0x10
#define RTMP_READ_SEEKING 0x20 //表示是否要執行seek操作int8_t status; //讀取的當前狀態,表示當前的流的分析結果,為以下四個取值,為0表示正常
#define RTMP_READ_COMPLETE -3
#define RTMP_READ_ERROR -2
#define RTMP_READ_EOF -1
#define RTMP_READ_IGNORE 0/* if bResume == TRUE */ //resume時需要指定的字段,用于幫助流定義resume的位置uint8_t initialFrameType; //定位的幀的類型,即是視頻幀還是音頻幀uint32_t nResumeTS; //定位的幀的時間戳char *metaHeader; //要resume的流的metedata數據char *initialFrame; //定位的幀的datauint32_t nMetaHeaderSize; //要resume的流的metadata數據的尺寸uint32_t nInitialFrameSize; //定位的幀的data lengthuint32_t nIgnoredFrameCounter;uint32_t nIgnoredFlvFrameCounter;} RTMP_READ;typedef struct RTMP_METHOD{AVal name;int num;} RTMP_METHOD;//表示一個RTMP流,用于保存這個RTMP流的相關參數typedef struct RTMP{int m_inChunkSize; //接收max chunk sizeint m_outChunkSize; //發送max chunk sizeint m_nBWCheckCounter; //帶寬檢測計數器int m_nBytesIn; //接受到的字節的總數量int m_nBytesInSent; //發送的字節的總數量int m_nBufferMS; // 當前緩沖的時間長度,以MS為單位int m_stream_id; /* returned in _result from createStream */ //Message Stream IDint m_mediaChannel; //當前media使用的chunk Stream iduint32_t m_mediaStamp; //當前media的時間戳uint32_t m_pauseStamp; //當前media暫停時的時間戳int m_pausing; //是否暫停狀態int m_nServerBW; //window sizeint m_nClientBW; //Set Peer Bandwidth Message中的window sizeuint8_t m_nClientBW2; //Set Peer Bandwidth Message中的limit typeuint8_t m_bPlaying; //當前是否playuint8_t m_bSendEncoding;uint8_t m_bSendCounter;int m_numInvokes; //記錄RTMP發起的invoke的數量int m_numCalls; //m_methodCalls中的數量RTMP_METHOD *m_methodCalls; /* remote method calls queue */int m_channelsAllocatedIn;int m_channelsAllocatedOut;RTMPPacket **m_vecChannelsIn;RTMPPacket **m_vecChannelsOut;int *m_channelTimestamp; /* abs timestamp of last packet */double m_fAudioCodecs; /* audioCodecs for the connect packet */double m_fVideoCodecs; /* videoCodecs for the connect packet */double m_fEncoding; /* AMF0 or AMF3 */double m_fDuration; /* duration of stream in seconds */int m_msgCounter; /* RTMPT stuff */int m_polling;int m_resplen;int m_unackd;AVal m_clientID;RTMP_READ m_read; // RTMP_Read()操作的上下文RTMPPacket m_write; // RTMP_Write()操作使用的可復用報文對象RTMPSockBuf m_sb; // RTMP_ReadPacket()讀包操作的上下文RTMP_LNK Link; // RTMP連接上下文} RTMP;