summaryrefslogtreecommitdiff
path: root/src/server/sv_snapshot.c
diff options
context:
space:
mode:
authorPaweł Redman <pawel.redman@gmail.com>2017-03-22 17:56:34 +0100
committerPaweł Redman <pawel.redman@gmail.com>2017-03-22 17:56:34 +0100
commit6a777afc079c2a8d3af3ecd2145fe8dd50567a39 (patch)
tree520f4489cebf8564ef6cb27064ceea45cbc005b3 /src/server/sv_snapshot.c
Funko sources as released by Rotacak.HEADmaster
Diffstat (limited to 'src/server/sv_snapshot.c')
-rw-r--r--src/server/sv_snapshot.c693
1 files changed, 693 insertions, 0 deletions
diff --git a/src/server/sv_snapshot.c b/src/server/sv_snapshot.c
new file mode 100644
index 0000000..47471ba
--- /dev/null
+++ b/src/server/sv_snapshot.c
@@ -0,0 +1,693 @@
+/*
+===========================================================================
+Copyright (C) 1999-2005 Id Software, Inc.
+Copyright (C) 2000-2006 Tim Angus
+
+This file is part of Tremulous.
+
+Tremulous is free software; you can redistribute it
+and/or modify it under the terms of the GNU General Public License as
+published by the Free Software Foundation; either version 2 of the License,
+or (at your option) any later version.
+
+Tremulous is distributed in the hope that it will be
+useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Tremulous; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+===========================================================================
+*/
+
+#include "server.h"
+
+
+/*
+=============================================================================
+
+Delta encode a client frame onto the network channel
+
+A normal server packet will look like:
+
+4 sequence number (high bit set if an oversize fragment)
+<optional reliable commands>
+1 svc_snapshot
+4 last client reliable command
+4 serverTime
+1 lastframe for delta compression
+1 snapFlags
+1 areaBytes
+<areabytes>
+<playerstate>
+<packetentities>
+
+=============================================================================
+*/
+
+/*
+=============
+SV_EmitPacketEntities
+
+Writes a delta update of an entityState_t list to the message.
+=============
+*/
+static void SV_EmitPacketEntities( clientSnapshot_t *from, clientSnapshot_t *to, msg_t *msg ) {
+ entityState_t *oldent, *newent;
+ int oldindex, newindex;
+ int oldnum, newnum;
+ int from_num_entities;
+
+ // generate the delta update
+ if ( !from ) {
+ from_num_entities = 0;
+ } else {
+ from_num_entities = from->num_entities;
+ }
+
+ newent = NULL;
+ oldent = NULL;
+ newindex = 0;
+ oldindex = 0;
+ while ( newindex < to->num_entities || oldindex < from_num_entities ) {
+ if ( newindex >= to->num_entities ) {
+ newnum = 9999;
+ } else {
+ newent = &svs.snapshotEntities[(to->first_entity+newindex) % svs.numSnapshotEntities];
+ newnum = newent->number;
+ }
+
+ if ( oldindex >= from_num_entities ) {
+ oldnum = 9999;
+ } else {
+ oldent = &svs.snapshotEntities[(from->first_entity+oldindex) % svs.numSnapshotEntities];
+ oldnum = oldent->number;
+ }
+
+ if ( newnum == oldnum ) {
+ // delta update from old position
+ // because the force parm is qfalse, this will not result
+ // in any bytes being emited if the entity has not changed at all
+ MSG_WriteDeltaEntity (msg, oldent, newent, qfalse );
+ oldindex++;
+ newindex++;
+ continue;
+ }
+
+ if ( newnum < oldnum ) {
+ // this is a new entity, send it from the baseline
+ MSG_WriteDeltaEntity (msg, &sv.svEntities[newnum].baseline, newent, qtrue );
+ newindex++;
+ continue;
+ }
+
+ if ( newnum > oldnum ) {
+ // the old entity isn't present in the new message
+ MSG_WriteDeltaEntity (msg, oldent, NULL, qtrue );
+ oldindex++;
+ continue;
+ }
+ }
+
+ MSG_WriteBits( msg, (MAX_GENTITIES-1), GENTITYNUM_BITS ); // end of packetentities
+}
+
+
+
+/*
+==================
+SV_WriteSnapshotToClient
+==================
+*/
+static void SV_WriteSnapshotToClient( client_t *client, msg_t *msg ) {
+ clientSnapshot_t *frame, *oldframe;
+ int lastframe;
+ int i;
+ int snapFlags;
+
+ // this is the snapshot we are creating
+ frame = &client->frames[ client->netchan.outgoingSequence & PACKET_MASK ];
+
+ // try to use a previous frame as the source for delta compressing the snapshot
+ if ( client->deltaMessage <= 0 || client->state != CS_ACTIVE ) {
+ // client is asking for a retransmit
+ oldframe = NULL;
+ lastframe = 0;
+ } else if ( client->netchan.outgoingSequence - client->deltaMessage
+ >= (PACKET_BACKUP - 3) ) {
+ // client hasn't gotten a good message through in a long time
+ Com_DPrintf ("%s: Delta request from out of date packet.\n", client->name);
+ oldframe = NULL;
+ lastframe = 0;
+ } else {
+ // we have a valid snapshot to delta from
+ oldframe = &client->frames[ client->deltaMessage & PACKET_MASK ];
+ lastframe = client->netchan.outgoingSequence - client->deltaMessage;
+
+ // the snapshot's entities may still have rolled off the buffer, though
+ if ( oldframe->first_entity <= svs.nextSnapshotEntities - svs.numSnapshotEntities ) {
+ Com_DPrintf ("%s: Delta request from out of date entities.\n", client->name);
+ oldframe = NULL;
+ lastframe = 0;
+ }
+ }
+
+ MSG_WriteByte (msg, svc_snapshot);
+
+ // NOTE, MRE: now sent at the start of every message from server to client
+ // let the client know which reliable clientCommands we have received
+ //MSG_WriteLong( msg, client->lastClientCommand );
+
+ // send over the current server time so the client can drift
+ // its view of time to try to match
+ if( client->oldServerTime ) {
+ // The server has not yet got an acknowledgement of the
+ // new gamestate from this client, so continue to send it
+ // a time as if the server has not restarted. Note from
+ // the client's perspective this time is strictly speaking
+ // incorrect, but since it'll be busy loading a map at
+ // the time it doesn't really matter.
+ MSG_WriteLong (msg, sv.time + client->oldServerTime);
+ } else {
+ MSG_WriteLong (msg, sv.time);
+ }
+
+ // what we are delta'ing from
+ MSG_WriteByte (msg, lastframe);
+
+ snapFlags = svs.snapFlagServerBit;
+ if ( client->rateDelayed ) {
+ snapFlags |= SNAPFLAG_RATE_DELAYED;
+ }
+ if ( client->state != CS_ACTIVE ) {
+ snapFlags |= SNAPFLAG_NOT_ACTIVE;
+ }
+
+ MSG_WriteByte (msg, snapFlags);
+
+ // send over the areabits
+ MSG_WriteByte (msg, frame->areabytes);
+ MSG_WriteData (msg, frame->areabits, frame->areabytes);
+
+ // delta encode the playerstate
+ if ( oldframe ) {
+ MSG_WriteDeltaPlayerstate( msg, &oldframe->ps, &frame->ps );
+ } else {
+ MSG_WriteDeltaPlayerstate( msg, NULL, &frame->ps );
+ }
+
+ // delta encode the entities
+ SV_EmitPacketEntities (oldframe, frame, msg);
+
+ // padding for rate debugging
+ if ( sv_padPackets->integer ) {
+ for ( i = 0 ; i < sv_padPackets->integer ; i++ ) {
+ MSG_WriteByte (msg, svc_nop);
+ }
+ }
+}
+
+
+/*
+==================
+SV_UpdateServerCommandsToClient
+
+(re)send all server commands the client hasn't acknowledged yet
+==================
+*/
+void SV_UpdateServerCommandsToClient( client_t *client, msg_t *msg ) {
+ int i;
+
+ // write any unacknowledged serverCommands
+ for ( i = client->reliableAcknowledge + 1 ; i <= client->reliableSequence ; i++ ) {
+ MSG_WriteByte( msg, svc_serverCommand );
+ MSG_WriteLong( msg, i );
+ MSG_WriteString( msg, client->reliableCommands[ i & (MAX_RELIABLE_COMMANDS-1) ] );
+ }
+ client->reliableSent = client->reliableSequence;
+}
+
+/*
+=============================================================================
+
+Build a client snapshot structure
+
+=============================================================================
+*/
+
+#define MAX_SNAPSHOT_ENTITIES 1024
+typedef struct {
+ int numSnapshotEntities;
+ int snapshotEntities[MAX_SNAPSHOT_ENTITIES];
+} snapshotEntityNumbers_t;
+
+/*
+=======================
+SV_QsortEntityNumbers
+=======================
+*/
+static int QDECL SV_QsortEntityNumbers( const void *a, const void *b ) {
+ int *ea, *eb;
+
+ ea = (int *)a;
+ eb = (int *)b;
+
+ if ( *ea == *eb ) {
+ Com_Error( ERR_DROP, "SV_QsortEntityStates: duplicated entity" );
+ }
+
+ if ( *ea < *eb ) {
+ return -1;
+ }
+
+ return 1;
+}
+
+
+/*
+===============
+SV_AddEntToSnapshot
+===============
+*/
+static void SV_AddEntToSnapshot( svEntity_t *svEnt, sharedEntity_t *gEnt, snapshotEntityNumbers_t *eNums ) {
+ // if we have already added this entity to this snapshot, don't add again
+ if ( svEnt->snapshotCounter == sv.snapshotCounter ) {
+ return;
+ }
+ svEnt->snapshotCounter = sv.snapshotCounter;
+
+ // if we are full, silently discard entities
+ if ( eNums->numSnapshotEntities == MAX_SNAPSHOT_ENTITIES ) {
+ return;
+ }
+
+ eNums->snapshotEntities[ eNums->numSnapshotEntities ] = gEnt->s.number;
+ eNums->numSnapshotEntities++;
+}
+
+/*
+===============
+SV_AddEntitiesVisibleFromPoint
+===============
+*/
+static void SV_AddEntitiesVisibleFromPoint( vec3_t origin, clientSnapshot_t *frame,
+ snapshotEntityNumbers_t *eNums, qboolean portal ) {
+ int e, i;
+ sharedEntity_t *ent;
+ svEntity_t *svEnt;
+ int l;
+ int clientarea, clientcluster;
+ int leafnum;
+ int c_fullsend;
+ byte *clientpvs;
+ byte *bitvector;
+
+ // during an error shutdown message we may need to transmit
+ // the shutdown message after the server has shutdown, so
+ // specfically check for it
+ if ( !sv.state ) {
+ return;
+ }
+
+ leafnum = CM_PointLeafnum (origin);
+ clientarea = CM_LeafArea (leafnum);
+ clientcluster = CM_LeafCluster (leafnum);
+
+ // calculate the visible areas
+ frame->areabytes = CM_WriteAreaBits( frame->areabits, clientarea );
+
+ clientpvs = CM_ClusterPVS (clientcluster);
+
+ c_fullsend = 0;
+
+ for ( e = 0 ; e < sv.num_entities ; e++ ) {
+ ent = SV_GentityNum(e);
+
+ // never send entities that aren't linked in
+ if ( !ent->r.linked ) {
+ continue;
+ }
+
+ if (ent->s.number != e) {
+ Com_DPrintf ("FIXING ENT->S.NUMBER!!!\n");
+ ent->s.number = e;
+ }
+
+ // entities can be flagged to explicitly not be sent to the client
+ if ( ent->r.svFlags & SVF_NOCLIENT ) {
+ continue;
+ }
+
+ // entities can be flagged to be sent to only one client
+ if ( ent->r.svFlags & SVF_SINGLECLIENT ) {
+ if ( ent->r.singleClient != frame->ps.clientNum ) {
+ continue;
+ }
+ }
+ // entities can be flagged to be sent to everyone but one client
+ if ( ent->r.svFlags & SVF_NOTSINGLECLIENT ) {
+ if ( ent->r.singleClient == frame->ps.clientNum ) {
+ continue;
+ }
+ }
+ // entities can be flagged to be sent to a given mask of clients
+ if ( ent->r.svFlags & SVF_CLIENTMASK ) {
+ if (frame->ps.clientNum >= 32)
+ Com_Error( ERR_DROP, "SVF_CLIENTMASK: cientNum > 32\n" );
+ if (~ent->r.singleClient & (1 << frame->ps.clientNum))
+ continue;
+ }
+
+ svEnt = SV_SvEntityForGentity( ent );
+
+ // don't double add an entity through portals
+ if ( svEnt->snapshotCounter == sv.snapshotCounter ) {
+ continue;
+ }
+
+ // broadcast entities are always sent
+ if ( ent->r.svFlags & SVF_BROADCAST ) {
+ SV_AddEntToSnapshot( svEnt, ent, eNums );
+ continue;
+ }
+
+ // ignore if not touching a PV leaf
+ // check area
+ if ( !CM_AreasConnected( clientarea, svEnt->areanum ) ) {
+ // doors can legally straddle two areas, so
+ // we may need to check another one
+ if ( !CM_AreasConnected( clientarea, svEnt->areanum2 ) ) {
+ continue; // blocked by a door
+ }
+ }
+
+ bitvector = clientpvs;
+
+ // check individual leafs
+ if ( !svEnt->numClusters ) {
+ continue;
+ }
+ l = 0;
+ for ( i=0 ; i < svEnt->numClusters ; i++ ) {
+ l = svEnt->clusternums[i];
+ if ( bitvector[l >> 3] & (1 << (l&7) ) ) {
+ break;
+ }
+ }
+
+ // if we haven't found it to be visible,
+ // check overflow clusters that coudln't be stored
+ if ( i == svEnt->numClusters ) {
+ if ( svEnt->lastCluster ) {
+ for ( ; l <= svEnt->lastCluster ; l++ ) {
+ if ( bitvector[l >> 3] & (1 << (l&7) ) ) {
+ break;
+ }
+ }
+ if ( l == svEnt->lastCluster ) {
+ continue; // not visible
+ }
+ } else {
+ continue;
+ }
+ }
+
+ // add it
+ SV_AddEntToSnapshot( svEnt, ent, eNums );
+
+ // if its a portal entity, add everything visible from its camera position
+ if ( ent->r.svFlags & SVF_PORTAL ) {
+ if ( ent->s.generic1 ) {
+ vec3_t dir;
+ VectorSubtract(ent->s.origin, origin, dir);
+ if ( VectorLengthSquared(dir) > (float) ent->s.generic1 * ent->s.generic1 ) {
+ continue;
+ }
+ }
+ SV_AddEntitiesVisibleFromPoint( ent->s.origin2, frame, eNums, qtrue );
+ }
+
+ }
+}
+
+/*
+=============
+SV_BuildClientSnapshot
+
+Decides which entities are going to be visible to the client, and
+copies off the playerstate and areabits.
+
+This properly handles multiple recursive portals, but the render
+currently doesn't.
+
+For viewing through other player's eyes, clent can be something other than client->gentity
+=============
+*/
+static void SV_BuildClientSnapshot( client_t *client ) {
+ vec3_t org;
+ clientSnapshot_t *frame;
+ snapshotEntityNumbers_t entityNumbers;
+ int i;
+ sharedEntity_t *ent;
+ entityState_t *state;
+ svEntity_t *svEnt;
+ sharedEntity_t *clent;
+ int clientNum;
+ playerState_t *ps;
+
+ // bump the counter used to prevent double adding
+ sv.snapshotCounter++;
+
+ // this is the frame we are creating
+ frame = &client->frames[ client->netchan.outgoingSequence & PACKET_MASK ];
+
+ // clear everything in this snapshot
+ entityNumbers.numSnapshotEntities = 0;
+ Com_Memset( frame->areabits, 0, sizeof( frame->areabits ) );
+
+ // https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=62
+ frame->num_entities = 0;
+
+ clent = client->gentity;
+ if ( !clent || client->state == CS_ZOMBIE ) {
+ return;
+ }
+
+ // grab the current playerState_t
+ ps = SV_GameClientNum( client - svs.clients );
+ frame->ps = *ps;
+
+ // never send client's own entity, because it can
+ // be regenerated from the playerstate
+ clientNum = frame->ps.clientNum;
+ if ( clientNum < 0 || clientNum >= MAX_GENTITIES ) {
+ Com_Error( ERR_DROP, "SV_SvEntityForGentity: bad gEnt" );
+ }
+ svEnt = &sv.svEntities[ clientNum ];
+
+ svEnt->snapshotCounter = sv.snapshotCounter;
+
+ // find the client's viewpoint
+ VectorCopy( ps->origin, org );
+ org[2] += ps->viewheight;
+
+ // add all the entities directly visible to the eye, which
+ // may include portal entities that merge other viewpoints
+ SV_AddEntitiesVisibleFromPoint( org, frame, &entityNumbers, qfalse );
+
+ // if there were portals visible, there may be out of order entities
+ // in the list which will need to be resorted for the delta compression
+ // to work correctly. This also catches the error condition
+ // of an entity being included twice.
+ qsort( entityNumbers.snapshotEntities, entityNumbers.numSnapshotEntities,
+ sizeof( entityNumbers.snapshotEntities[0] ), SV_QsortEntityNumbers );
+
+ // now that all viewpoint's areabits have been OR'd together, invert
+ // all of them to make it a mask vector, which is what the renderer wants
+ for ( i = 0 ; i < MAX_MAP_AREA_BYTES/4 ; i++ ) {
+ ((int *)frame->areabits)[i] = ((int *)frame->areabits)[i] ^ -1;
+ }
+
+ // copy the entity states out
+ frame->num_entities = 0;
+ frame->first_entity = svs.nextSnapshotEntities;
+ for ( i = 0 ; i < entityNumbers.numSnapshotEntities ; i++ ) {
+ ent = SV_GentityNum(entityNumbers.snapshotEntities[i]);
+ state = &svs.snapshotEntities[svs.nextSnapshotEntities % svs.numSnapshotEntities];
+ *state = ent->s;
+ svs.nextSnapshotEntities++;
+ // this should never hit, map should always be restarted first in SV_Frame
+ if ( svs.nextSnapshotEntities >= 0x7FFFFFFE ) {
+ Com_Error(ERR_FATAL, "svs.nextSnapshotEntities wrapped");
+ }
+ frame->num_entities++;
+ }
+}
+
+
+/*
+====================
+SV_RateMsec
+
+Return the number of msec a given size message is supposed
+to take to clear, based on the current rate
+====================
+*/
+#define HEADER_RATE_BYTES 48 // include our header, IP header, and some overhead
+static int SV_RateMsec( client_t *client, int messageSize ) {
+ int rate;
+ int rateMsec;
+
+ // individual messages will never be larger than fragment size
+ if ( messageSize > 1500 ) {
+ messageSize = 1500;
+ }
+ rate = client->rate;
+ if ( sv_maxRate->integer ) {
+ if ( sv_maxRate->integer < 1000 ) {
+ Cvar_Set( "sv_MaxRate", "1000" );
+ }
+ if ( sv_maxRate->integer < rate ) {
+ rate = sv_maxRate->integer;
+ }
+ }
+ if ( sv_minRate->integer ) {
+ if ( sv_minRate->integer < 1000 )
+ Cvar_Set( "sv_minRate", "1000" );
+ if ( sv_minRate->integer > rate )
+ rate = sv_minRate->integer;
+ }
+
+ rateMsec = ( messageSize + HEADER_RATE_BYTES ) * 1000 / rate * com_timescale->value;
+
+ return rateMsec;
+}
+
+/*
+=======================
+SV_SendMessageToClient
+
+Called by SV_SendClientSnapshot and SV_SendClientGameState
+=======================
+*/
+void SV_SendMessageToClient( msg_t *msg, client_t *client ) {
+ int rateMsec;
+
+ // record information about the message
+ client->frames[client->netchan.outgoingSequence & PACKET_MASK].messageSize = msg->cursize;
+ client->frames[client->netchan.outgoingSequence & PACKET_MASK].messageSent = svs.time;
+ client->frames[client->netchan.outgoingSequence & PACKET_MASK].messageAcked = -1;
+
+ // send the datagram
+ SV_Netchan_Transmit( client, msg ); //msg->cursize, msg->data );
+
+ // set nextSnapshotTime based on rate and requested number of updates
+
+ // local clients get snapshots every server frame
+ // TTimo - https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=491
+ // added sv_lanForceRate check
+ if ( client->netchan.remoteAddress.type == NA_LOOPBACK || (sv_lanForceRate->integer && Sys_IsLANAddress (client->netchan.remoteAddress)) ) {
+ client->nextSnapshotTime = svs.time + (1000.0 / sv_fps->integer * com_timescale->value);
+ return;
+ }
+
+ // normal rate / snapshotMsec calculation
+ rateMsec = SV_RateMsec(client, msg->cursize);
+
+ if ( rateMsec < client->snapshotMsec * com_timescale->value) {
+ // never send more packets than this, no matter what the rate is at
+ rateMsec = client->snapshotMsec * com_timescale->value;
+ client->rateDelayed = qfalse;
+ } else {
+ client->rateDelayed = qtrue;
+ }
+
+ client->nextSnapshotTime = svs.time + rateMsec * com_timescale->value;
+
+ // don't pile up empty snapshots while connecting
+ if ( client->state != CS_ACTIVE ) {
+ // a gigantic connection message may have already put the nextSnapshotTime
+ // more than a second away, so don't shorten it
+ // do shorten if client is downloading
+ if (!*client->downloadName && client->nextSnapshotTime < svs.time + 1000 * com_timescale->value)
+ client->nextSnapshotTime = svs.time + 1000 * com_timescale->value;
+ }
+}
+
+
+/*
+=======================
+SV_SendClientSnapshot
+
+Also called by SV_FinalMessage
+
+=======================
+*/
+void SV_SendClientSnapshot( client_t *client ) {
+ byte msg_buf[MAX_MSGLEN];
+ msg_t msg;
+
+ // build the snapshot
+ SV_BuildClientSnapshot( client );
+
+ MSG_Init (&msg, msg_buf, sizeof(msg_buf));
+ msg.allowoverflow = qtrue;
+
+ // NOTE, MRE: all server->client messages now acknowledge
+ // let the client know which reliable clientCommands we have received
+ MSG_WriteLong( &msg, client->lastClientCommand );
+
+ // (re)send any reliable server commands
+ SV_UpdateServerCommandsToClient( client, &msg );
+
+ // send over all the relevant entityState_t
+ // and the playerState_t
+ SV_WriteSnapshotToClient( client, &msg );
+
+ // Add any download data if the client is downloading
+ SV_WriteDownloadToClient( client, &msg );
+
+ // check for overflow
+ if ( msg.overflowed ) {
+ Com_Printf ("WARNING: msg overflowed for %s\n", client->name);
+ MSG_Clear (&msg);
+ }
+
+ SV_SendMessageToClient( &msg, client );
+}
+
+
+/*
+=======================
+SV_SendClientMessages
+=======================
+*/
+void SV_SendClientMessages( void ) {
+ int i;
+ client_t *c;
+
+ // send a message to each connected client
+ for (i=0, c = svs.clients ; i < sv_maxclients->integer ; i++, c++) {
+ if (!c->state) {
+ continue; // not connected
+ }
+
+ if ( svs.time < c->nextSnapshotTime ) {
+ continue; // not time yet
+ }
+
+ // send additional message fragments if the last message
+ // was too large to send at once
+ if ( c->netchan.unsentFragments ) {
+ c->nextSnapshotTime = svs.time +
+ SV_RateMsec( c, c->netchan.unsentLength - c->netchan.unsentFragmentStart );
+ SV_Netchan_TransmitNextFragment( c );
+ continue;
+ }
+
+ // generate and send a new message
+ SV_SendClientSnapshot( c );
+ }
+}
+