Improve support for high-resolution stats

* This patch adds a new microsecond-resolution function call, LiGetMicroseconds(), to complement
the existing LiGetMillis(). Many variables used by stats have been updated to work at this
higher resolution and now provide better results when displaying e.g. sub-millisecond frametime stats.
To try and avoid confusion, variables that now contain microseconds have been renamed with a suffix
of 'Us', and those ending in 'Ms' contain milliseconds. I originally experimented with nanoseconds but it
felt like overkill for our needs.

Public API in Limelight.h:
uint64_t LiGetMicroseconds(void);
uint64_t LiGetMillis(void);
const RTP_AUDIO_STATS* LiGetRTPAudioStats(void);  // provides access to RTP data for the overlay stats
const RTP_VIDEO_STATS* LiGetRTPVideoStats(void);

Note: Users of this library may need to make changes. If using LiGetMillis() to track the duration of
something that is shown to the user, consider switching to LiGetMicroseconds(). Remember to divide by
1000 at time of display to show in milliseconds.
This commit is contained in:
Andy Grundman 2024-09-12 03:24:29 -04:00 committed by Cameron Gutman
commit 82ee2d6590
15 changed files with 354 additions and 108 deletions

View file

@ -17,9 +17,9 @@ static bool decodingFrame;
static int frameType;
static uint16_t lastPacketPayloadLength;
static bool strictIdrFrameWait;
static uint64_t syntheticPtsBase;
static uint64_t syntheticPtsBaseUs;
static uint16_t frameHostProcessingLatency;
static uint64_t firstPacketReceiveTime;
static uint64_t firstPacketReceiveTimeUs;
static unsigned int firstPacketPresentationTime;
static bool dropStatePending;
static bool idrFrameProcessed;
@ -68,9 +68,9 @@ void initializeVideoDepacketizer(int pktSize) {
waitingForRefInvalFrame = false;
lastPacketInStream = UINT32_MAX;
decodingFrame = false;
syntheticPtsBase = 0;
syntheticPtsBaseUs = 0;
frameHostProcessingLatency = 0;
firstPacketReceiveTime = 0;
firstPacketReceiveTimeUs = 0;
firstPacketPresentationTime = 0;
lastPacketPayloadLength = 0;
dropStatePending = false;
@ -483,9 +483,9 @@ static void reassembleFrame(int frameNumber) {
qdu->decodeUnit.frameType = frameType;
qdu->decodeUnit.frameNumber = frameNumber;
qdu->decodeUnit.frameHostProcessingLatency = frameHostProcessingLatency;
qdu->decodeUnit.receiveTimeMs = firstPacketReceiveTime;
qdu->decodeUnit.receiveTimeUs = firstPacketReceiveTimeUs;
qdu->decodeUnit.presentationTimeMs = firstPacketPresentationTime;
qdu->decodeUnit.enqueueTimeMs = LiGetMillis();
qdu->decodeUnit.enqueueTimeUs = PltGetMicroseconds();
// These might be wrong for a few frames during a transition between SDR and HDR,
// but the effects shouldn't very noticable since that's an infrequent operation.
@ -714,16 +714,16 @@ static void processAvcHevcRtpPayloadSlow(PBUFFER_DESC currentPos, PLENTRY_INTERN
void requestDecoderRefresh(void) {
// Wait for the next IDR frame
waitingForIdrFrame = true;
// Flush the decode unit queue
freeDecodeUnitList(LbqFlushQueueItems(&decodeUnitQueue));
// Request the receive thread drop its state
// on the next call. We can't do it here because
// it may be trying to queue DUs and we'll nuke
// the state out from under it.
dropStatePending = true;
// Request the IDR frame
LiRequestIdrFrame();
}
@ -740,7 +740,7 @@ static bool isFirstPacket(uint8_t flags, uint8_t fecBlockNumber) {
// Process an RTP Payload
// The caller will free *existingEntry unless we NULL it
static void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length,
uint64_t receiveTimeMs, unsigned int presentationTimeMs,
uint64_t receiveTimeUs, uint64_t presentationTimeMs,
PLENTRY_INTERNAL* existingEntry) {
BUFFER_DESC currentPos;
uint32_t frameIndex;
@ -768,7 +768,7 @@ static void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length,
LC_ASSERT_VT((flags & ~(FLAG_SOF | FLAG_EOF | FLAG_CONTAINS_PIC_DATA)) == 0);
streamPacketIndex = videoPacket->streamPacketIndex;
// Drop packets from a previously corrupt frame
if (isBefore32(frameIndex, nextFrameNumber)) {
return;
@ -791,10 +791,10 @@ static void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length,
}
return;
}
// Verify that we didn't receive an incomplete frame
LC_ASSERT(firstPacket ^ decodingFrame);
// Check sequencing of this frame to ensure we didn't
// miss one in between
if (firstPacket) {
@ -823,19 +823,19 @@ static void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length,
// We're now decoding a frame
decodingFrame = true;
frameType = FRAME_TYPE_PFRAME;
firstPacketReceiveTime = receiveTimeMs;
firstPacketReceiveTimeUs = receiveTimeUs;
// Some versions of Sunshine don't send a valid PTS, so we will
// synthesize one using the receive time as the time base.
if (!syntheticPtsBase) {
syntheticPtsBase = receiveTimeMs;
if (!syntheticPtsBaseUs) {
syntheticPtsBaseUs = receiveTimeUs;
}
if (!presentationTimeMs && frameIndex > 0) {
firstPacketPresentationTime = (unsigned int)(receiveTimeMs - syntheticPtsBase);
firstPacketPresentationTime = (unsigned int)((receiveTimeUs - syntheticPtsBaseUs) / 1000);
}
else {
firstPacketPresentationTime = presentationTimeMs;
firstPacketPresentationTime = (unsigned int)presentationTimeMs;
}
}
@ -1154,7 +1154,7 @@ void queueRtpPacket(PRTPV_QUEUE_ENTRY queueEntryPtr) {
RTPV_QUEUE_ENTRY queueEntry = *queueEntryPtr;
LC_ASSERT(!queueEntry.isParity);
LC_ASSERT(queueEntry.receiveTimeMs != 0);
LC_ASSERT(queueEntry.receiveTimeUs != 0);
dataOffset = sizeof(*queueEntry.packet);
if (queueEntry.packet->header & FLAG_EXTENSION) {
@ -1173,7 +1173,7 @@ void queueRtpPacket(PRTPV_QUEUE_ENTRY queueEntryPtr) {
processRtpPayload((PNV_VIDEO_PACKET)(((char*)queueEntry.packet) + dataOffset),
queueEntry.length - dataOffset,
queueEntry.receiveTimeMs,
queueEntry.receiveTimeUs,
queueEntry.presentationTimeMs,
&existingEntry);