Commit current work
This is a temporary branch because I'm missing a USB drive, huehue
This commit is contained in:
parent
311e2a545d
commit
9e3341a32a
|
|
@ -0,0 +1,121 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef NAZARA_ENETHOST_HPP
|
||||
#define NAZARA_ENETHOST_HPP
|
||||
|
||||
#include <Nazara/Prerequesites.hpp>
|
||||
#include <Nazara/Core/Bitset.hpp>
|
||||
#include <Nazara/Core/Clock.hpp>
|
||||
#include <Nazara/Core/MemoryPool.hpp>
|
||||
#include <Nazara/Network/ENetHost.hpp>
|
||||
#include <Nazara/Network/ENetProtocol.hpp>
|
||||
#include <Nazara/Network/IpAddress.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
#include <Nazara/Network/SocketPoller.hpp>
|
||||
#include <Nazara/Network/UdpSocket.hpp>
|
||||
#include <deque>
|
||||
#include <queue>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class ENetPeer;
|
||||
|
||||
class NAZARA_NETWORK_API ENetHost
|
||||
{
|
||||
friend ENetPeer;
|
||||
friend class Network;
|
||||
|
||||
public:
|
||||
inline ENetHost();
|
||||
ENetHost(const ENetHost&) = delete;
|
||||
ENetHost(ENetHost&&) = default;
|
||||
inline ~ENetHost();
|
||||
|
||||
void Broadcast(UInt8 channelId, ENetPacketFlags flags, NetPacket&& packet);
|
||||
|
||||
bool Connect(const IpAddress& remoteAddress, std::size_t channelCount = 0, UInt32 data = 0);
|
||||
bool Connect(const String& hostName, NetProtocol protocol = NetProtocol_Any, const String& service = "http", ResolveError* error = nullptr, std::size_t channelCount = 0, UInt32 data = 0);
|
||||
|
||||
inline bool Create(NetProtocol protocol, UInt16 port, std::size_t peerCount, std::size_t channelCount = 0);
|
||||
bool Create(const IpAddress& address, std::size_t peerCount, std::size_t channelCount = 0);
|
||||
bool Create(const IpAddress& address, std::size_t peerCount, std::size_t channelCount, UInt32 incomingBandwidth, UInt32 outgoingBandwidth);
|
||||
void Destroy();
|
||||
|
||||
void Flush();
|
||||
|
||||
int Service(ENetEvent* event, UInt32 timeout);
|
||||
|
||||
ENetHost& operator=(const ENetHost&) = delete;
|
||||
ENetHost& operator=(ENetHost&&) = default;
|
||||
|
||||
private:
|
||||
bool InitSocket(const IpAddress& address);
|
||||
|
||||
inline void AddToDispatchQueue(ENetPeer* peer);
|
||||
inline void RemoveFromDispatchQueue(ENetPeer* peer);
|
||||
|
||||
bool DispatchIncomingCommands(ENetEvent* event);
|
||||
|
||||
ENetPeer* HandleConnect(ENetProtocolHeader* header, ENetProtocol* command);
|
||||
bool HandleIncomingCommands(ENetEvent* event);
|
||||
bool HandleSendReliable(ENetPeer& peer, const ENetProtocol& command, UInt8** currentData);
|
||||
|
||||
int ReceiveIncomingCommands(ENetEvent* event);
|
||||
|
||||
void NotifyConnect(ENetPeer* peer, ENetEvent* event);
|
||||
void NotifyDisconnect(ENetPeer*, ENetEvent* event);
|
||||
|
||||
void ThrottleBandwidth();
|
||||
|
||||
static bool Initialize();
|
||||
static void Uninitialize();
|
||||
|
||||
std::array<ENetProtocol, ENetConstants::ENetProtocol_MaximumPacketCommands> m_commands;
|
||||
std::array<UInt8, ENetConstants::ENetProtocol_MaximumMTU> m_packetData[2];
|
||||
std::bernoulli_distribution m_packetLossProbability;
|
||||
std::size_t m_bandwidthLimitedPeers;
|
||||
std::size_t m_bufferCount;
|
||||
std::size_t m_channelLimit;
|
||||
std::size_t m_commandCount;
|
||||
std::size_t m_duplicatePeers;
|
||||
std::size_t m_maximumPacketSize;
|
||||
std::size_t m_maximumWaitingData;
|
||||
std::size_t m_receivedDataLength;
|
||||
std::vector<ENetPeer> m_peers;
|
||||
Bitset<UInt64> m_dispatchQueue;
|
||||
MemoryPool m_packetPool;
|
||||
IpAddress m_address;
|
||||
IpAddress m_receivedAddress;
|
||||
SocketPoller m_poller;
|
||||
UdpSocket m_socket;
|
||||
UInt32 m_bandwidthThrottleEpoch;
|
||||
UInt32 m_connectedPeers;
|
||||
UInt32 m_mtu;
|
||||
UInt32 m_randomSeed;
|
||||
UInt32 m_incomingBandwidth;
|
||||
UInt32 m_outgoingBandwidth;
|
||||
UInt32 m_serviceTime;
|
||||
UInt32 m_totalSentData;
|
||||
UInt32 m_totalSentPackets;
|
||||
UInt32 m_totalReceivedData;
|
||||
UInt32 m_totalReceivedPackets;
|
||||
UInt8* m_receivedData;
|
||||
bool m_isSimulationEnabled;
|
||||
bool m_shouldAcceptConnections;
|
||||
bool m_recalculateBandwidthLimits;
|
||||
|
||||
static std::mt19937 s_randomGenerator;
|
||||
static std::mt19937_64 s_randomGenerator64;
|
||||
};
|
||||
}
|
||||
|
||||
#include <Nazara/Network/ENetHost.inl>
|
||||
|
||||
#endif // NAZARA_RUDPSERVER_HPP
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Network/ENetHost.hpp>
|
||||
#include <utility>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
inline ENetHost::ENetHost() :
|
||||
m_packetPool(sizeof(ENetPacket))
|
||||
{
|
||||
}
|
||||
|
||||
inline ENetHost::~ENetHost()
|
||||
{
|
||||
Destroy();
|
||||
}
|
||||
|
||||
inline bool ENetHost::Create(NetProtocol protocol, UInt16 port, std::size_t peerCount, std::size_t channelCount)
|
||||
{
|
||||
NazaraAssert(protocol != NetProtocol_Any, "Any protocol not supported for Listen"); //< TODO
|
||||
NazaraAssert(protocol != NetProtocol_Unknown, "Invalid protocol");
|
||||
|
||||
IpAddress any;
|
||||
switch (protocol)
|
||||
{
|
||||
case NetProtocol_Any:
|
||||
case NetProtocol_Unknown:
|
||||
NazaraInternalError("Invalid protocol Any at this point");
|
||||
return false;
|
||||
|
||||
case NetProtocol_IPv4:
|
||||
any = IpAddress::AnyIpV4;
|
||||
break;
|
||||
|
||||
case NetProtocol_IPv6:
|
||||
any = IpAddress::AnyIpV6;
|
||||
break;
|
||||
}
|
||||
|
||||
any.SetPort(port);
|
||||
return Create(any, peerCount, channelCount);
|
||||
}
|
||||
|
||||
inline void ENetHost::Destroy()
|
||||
{
|
||||
m_poller.Clear();
|
||||
m_peers.clear();
|
||||
m_socket.Close();
|
||||
}
|
||||
|
||||
inline void ENetHost::AddToDispatchQueue(ENetPeer* peer)
|
||||
{
|
||||
m_dispatchQueue.UnboundedSet(peer->m_incomingPeerID);
|
||||
}
|
||||
|
||||
inline void ENetHost::RemoveFromDispatchQueue(ENetPeer* peer)
|
||||
{
|
||||
m_dispatchQueue.UnboundedReset(peer->m_incomingPeerID);
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/Network/DebugOff.hpp>
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef NAZARA_ENETPACKET_HPP
|
||||
#define NAZARA_ENETPACKET_HPP
|
||||
|
||||
#include <Nazara/Prerequesites.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
enum ENetPacketFlag
|
||||
{
|
||||
ENetPacketFlag_NoAllocate,
|
||||
ENetPacketFlag_Reliable,
|
||||
ENetPacketFlag_Unsequenced,
|
||||
ENetPacketFlag_UnreliableFragment,
|
||||
ENetPacketFlag_Sent
|
||||
};
|
||||
|
||||
template<>
|
||||
struct EnumAsFlags<ENetPacketFlag>
|
||||
{
|
||||
static constexpr bool value = true;
|
||||
static constexpr int max = ENetPacketFlag_Sent;
|
||||
};
|
||||
|
||||
using ENetPacketFlags = Flags<ENetPacketFlag>;
|
||||
|
||||
class MemoryPool;
|
||||
|
||||
struct ENetPacket
|
||||
{
|
||||
MemoryPool* owner;
|
||||
ENetPacketFlags flags;
|
||||
NetPacket data;
|
||||
std::size_t referenceCount = 0;
|
||||
};
|
||||
|
||||
struct ENetPacketRef
|
||||
{
|
||||
ENetPacketRef() = default;
|
||||
|
||||
ENetPacketRef(ENetPacket* packet)
|
||||
{
|
||||
Reset(packet);
|
||||
}
|
||||
|
||||
~ENetPacketRef()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
void Reset(ENetPacket* packet = nullptr);
|
||||
|
||||
operator ENetPacket*() const
|
||||
{
|
||||
return m_packet;
|
||||
}
|
||||
|
||||
ENetPacket* operator->() const
|
||||
{
|
||||
return m_packet;
|
||||
}
|
||||
|
||||
ENetPacketRef& operator=(ENetPacket* packet)
|
||||
{
|
||||
Reset(packet);
|
||||
}
|
||||
|
||||
ENetPacket* m_packet = nullptr;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // NAZARA_ENETPACKET_HPP
|
||||
|
|
@ -0,0 +1,205 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef NAZARA_ENETPEER_HPP
|
||||
#define NAZARA_ENETPEER_HPP
|
||||
|
||||
#include <Nazara/Prerequesites.hpp>
|
||||
#include <Nazara/Core/Bitset.hpp>
|
||||
#include <Nazara/Core/Clock.hpp>
|
||||
#include <Nazara/Core/MemoryPool.hpp>
|
||||
#include <Nazara/Network/ENetPacket.hpp>
|
||||
#include <Nazara/Network/ENetProtocol.hpp>
|
||||
#include <Nazara/Network/IpAddress.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
#include <Nazara/Network/UdpSocket.hpp>
|
||||
#include <deque>
|
||||
#include <queue>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class ENetHost;
|
||||
|
||||
class NAZARA_NETWORK_API ENetPeer
|
||||
{
|
||||
friend ENetHost;
|
||||
friend struct PacketRef;
|
||||
|
||||
public:
|
||||
ENetPeer(const ENetPeer&) = delete;
|
||||
ENetPeer(ENetPeer&&) = default;
|
||||
~ENetPeer() = default;
|
||||
|
||||
void Disconnect(UInt32 data);
|
||||
void DisconnectLater(UInt32 data);
|
||||
void DisconnectNow(UInt32 data);
|
||||
|
||||
void Ping();
|
||||
|
||||
bool Receive(ENetPacketRef* packet, UInt8* channelId);
|
||||
void Reset();
|
||||
|
||||
bool Send(UInt8 channelId, ENetPacketRef packetRef);
|
||||
bool Send(UInt8 channelId, ENetPacketFlags flags, NetPacket&& packet);
|
||||
|
||||
void ThrottleConfigure(UInt32 interval, UInt32 acceleration, UInt32 deceleration);
|
||||
|
||||
ENetPeer& operator=(const ENetPeer&) = delete;
|
||||
ENetPeer& operator=(ENetPeer&&) = default;
|
||||
|
||||
private:
|
||||
ENetPeer(ENetHost* host, UInt16 peerId);
|
||||
|
||||
void InitIncoming(std::size_t channelCount, const IpAddress& address, ENetProtocolConnect& incomingCommand);
|
||||
void InitOutgoing(std::size_t channelCount, const IpAddress& address, UInt32 connectId, UInt32 windowSize);
|
||||
|
||||
struct Acknowledgement;
|
||||
struct Channel;
|
||||
struct IncomingCommmand;
|
||||
struct OutgoingCommand;
|
||||
|
||||
// Protocol functions
|
||||
inline void ChangeState(ENetPeerState state);
|
||||
inline void DispatchState(ENetPeerState state);
|
||||
|
||||
void DispatchIncomingReliableCommands(Channel& channel);
|
||||
void DispatchIncomingUnreliableCommands(Channel& channel);
|
||||
|
||||
void OnConnect();
|
||||
void OnDisconnect();
|
||||
|
||||
ENetProtocolCommand RemoveSentReliableCommands(UInt16 reliableSequenceNumber, UInt8 channelId);
|
||||
void RemoveSentUnreliableCommands();
|
||||
|
||||
void ResetQueues();
|
||||
|
||||
bool QueueAcknowledgement(ENetProtocol& command, UInt16 sentTime);
|
||||
IncomingCommmand* QueueIncomingCommand(ENetProtocol& command, const void* data, std::size_t dataLength, UInt32 flags, UInt32 fragmentCount);
|
||||
void QueueOutgoingCommand(ENetProtocol& command, ENetPacketRef packet, UInt32 offset, UInt16 length);
|
||||
|
||||
void SetupOutgoingCommand(OutgoingCommand& outgoingCommand);
|
||||
|
||||
int Throttle(UInt32 rtt);
|
||||
|
||||
struct Acknowledgement
|
||||
{
|
||||
ENetProtocol command;
|
||||
UInt32 sentTime;
|
||||
};
|
||||
|
||||
struct Channel
|
||||
{
|
||||
Channel()
|
||||
{
|
||||
incomingReliableSequenceNumber = 0;
|
||||
incomingUnreliableSequenceNumber = 0;
|
||||
outgoingReliableSequenceNumber = 0;
|
||||
outgoingUnreliableSequenceNumber = 0;
|
||||
usedReliableWindows = 0;
|
||||
reliableWindows.fill(0);
|
||||
}
|
||||
|
||||
std::array<UInt16, ENetPeer_ReliableWindows> reliableWindows;
|
||||
std::list<IncomingCommmand> incomingReliableCommands;
|
||||
std::list<IncomingCommmand> incomingUnreliableCommands;
|
||||
UInt16 incomingReliableSequenceNumber;
|
||||
UInt16 incomingUnreliableSequenceNumber;
|
||||
UInt16 outgoingReliableSequenceNumber;
|
||||
UInt16 outgoingUnreliableSequenceNumber;
|
||||
UInt16 usedReliableWindows;
|
||||
};
|
||||
|
||||
struct IncomingCommmand
|
||||
{
|
||||
ENetProtocol command;
|
||||
UInt16 reliableSequenceNumber;
|
||||
UInt16 unreliableSequenceNumber;
|
||||
UInt32 fragmentsRemaining;
|
||||
std::vector<UInt32> fragments;
|
||||
ENetPacketRef packet;
|
||||
};
|
||||
|
||||
struct OutgoingCommand
|
||||
{
|
||||
ENetProtocol command;
|
||||
ENetPacketRef packet;
|
||||
UInt16 fragmentLength;
|
||||
UInt16 reliableSequenceNumber;
|
||||
UInt16 sendAttempts;
|
||||
UInt16 unreliableSequenceNumber;
|
||||
UInt32 fragmentOffset;
|
||||
UInt32 roundTripTimeout;
|
||||
UInt32 roundTripTimeoutLimit;
|
||||
UInt32 sentTime;
|
||||
};
|
||||
|
||||
ENetHost* m_host;
|
||||
IpAddress m_address; /**< Internet address of the peer */
|
||||
std::vector<Channel> m_channels;
|
||||
std::list<Acknowledgement> m_acknowledgements;
|
||||
std::list<IncomingCommmand> m_dispatchedCommands;
|
||||
std::list<OutgoingCommand> m_outgoingReliableCommands;
|
||||
std::list<OutgoingCommand> m_outgoingUnreliableCommands;
|
||||
std::list<OutgoingCommand> m_sentReliableCommands;
|
||||
std::list<OutgoingCommand> m_sentUnreliableCommands;
|
||||
MemoryPool m_packetPool;
|
||||
//ENetListNode m_dispatchList;
|
||||
ENetPeerState m_state;
|
||||
UInt8 m_incomingSessionID;
|
||||
UInt8 m_outgoingSessionID;
|
||||
UInt16 m_incomingPeerID;
|
||||
UInt16 m_incomingUnsequencedGroup;
|
||||
UInt16 m_outgoingPeerID;
|
||||
UInt16 m_outgoingReliableSequenceNumber;
|
||||
UInt16 m_outgoingUnsequencedGroup;
|
||||
UInt32 m_connectID;
|
||||
UInt32 m_earliestTimeout;
|
||||
UInt32 m_eventData;
|
||||
UInt32 m_highestRoundTripTimeVariance;
|
||||
UInt32 m_incomingBandwidth; /**< Downstream bandwidth of the client in bytes/second */
|
||||
UInt32 m_incomingBandwidthThrottleEpoch;
|
||||
UInt32 m_incomingDataTotal;
|
||||
UInt32 m_lastReceiveTime;
|
||||
UInt32 m_lastRoundTripTime;
|
||||
UInt32 m_lastRoundTripTimeVariance;
|
||||
UInt32 m_lastSendTime;
|
||||
UInt32 m_lowestRoundTripTime;
|
||||
UInt32 m_mtu;
|
||||
UInt32 m_nextTimeout;
|
||||
UInt32 m_outgoingBandwidth; /**< Upstream bandwidth of the client in bytes/second */
|
||||
UInt32 m_outgoingBandwidthThrottleEpoch;
|
||||
UInt32 m_outgoingDataTotal;
|
||||
UInt32 m_packetLoss; /**< mean packet loss of reliable packets as a ratio with respect to the constant ENET_PEER_PACKET_LOSS_SCALE */
|
||||
UInt32 m_packetLossEpoch;
|
||||
UInt32 m_packetLossVariance;
|
||||
UInt32 m_packetThrottle;
|
||||
UInt32 m_packetThrottleAcceleration;
|
||||
UInt32 m_packetThrottleCounter;
|
||||
UInt32 m_packetThrottleDeceleration;
|
||||
UInt32 m_packetThrottleEpoch;
|
||||
UInt32 m_packetThrottleInterval;
|
||||
UInt32 m_packetThrottleLimit;
|
||||
UInt32 m_packetsLost;
|
||||
UInt32 m_packetsSent;
|
||||
UInt32 m_pingInterval;
|
||||
UInt32 m_reliableDataInTransit;
|
||||
UInt32 m_roundTripTime; /**< mean round trip time (RTT), in milliseconds, between sending a reliable packet and receiving its acknowledgment */
|
||||
UInt32 m_roundTripTimeVariance;
|
||||
UInt32 m_timeoutLimit;
|
||||
UInt32 m_timeoutMaximum;
|
||||
UInt32 m_timeoutMinimum;
|
||||
UInt32 m_unsequencedWindow[ENetPeer_ReliableWindowSize / 32];
|
||||
UInt32 m_windowSize;
|
||||
std::size_t m_totalWaitingData;
|
||||
};
|
||||
}
|
||||
|
||||
#include <Nazara/Network/ENetPeer.inl>
|
||||
|
||||
#endif // NAZARA_ENETPEER_HPP
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Network/ENetPeer.hpp>
|
||||
#include <utility>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
inline void ENetPeer::ChangeState(ENetPeerState state)
|
||||
{
|
||||
if (state == ENetPeerState::Connected || state == ENetPeerState::DisconnectLater)
|
||||
OnConnect();
|
||||
else
|
||||
OnDisconnect();
|
||||
|
||||
m_state = state;
|
||||
}
|
||||
|
||||
inline void ENetPeer::DispatchState(ENetPeerState state)
|
||||
{
|
||||
ChangeState(state);
|
||||
|
||||
m_host->AddToDispatchQueue(this);
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/Network/DebugOff.hpp>
|
||||
|
|
@ -0,0 +1,272 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Network module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef NAZARA_ENETPROTOCOL_HPP
|
||||
#define NAZARA_ENETPROTOCOL_HPP
|
||||
|
||||
#include <Nazara/Prerequesites.hpp>
|
||||
#include <Nazara/Network/ENetPacket.hpp>
|
||||
#include <vector>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
// Constants for the ENet implementation and protocol
|
||||
enum ENetConstants
|
||||
{
|
||||
ENetHost_BandwidthThrottleInterval = 1000,
|
||||
ENetHost_DefaultMaximumPacketSize = 32 * 1024 * 1024,
|
||||
ENetHost_DefaultMaximumWaitingData = 32 * 1024 * 1024,
|
||||
ENetHost_DefaultMTU = 1400,
|
||||
ENetHost_ReceiveBufferSize = 256 * 1024,
|
||||
ENetHost_SendBufferSize = 256 * 1024,
|
||||
|
||||
ENetPeer_DefaultPacketThrottle = 32,
|
||||
ENetPeer_DefaultRoundTripTime = 500,
|
||||
ENetPeer_FreeReliableWindows = 8,
|
||||
ENetPeer_FreeUnsequencedWindows = 32,
|
||||
ENetPeer_PacketLossInterval = 10000,
|
||||
ENetPeer_PacketLossScale = (1 << 16),
|
||||
ENetPeer_PacketThrottleAcceleration = 2,
|
||||
ENetPeer_PacketThrottleCounter = 7,
|
||||
ENetPeer_PacketThrottleDeceleration = 2,
|
||||
ENetPeer_PacketThrottleInterval = 5000,
|
||||
ENetPeer_PacketThrottleScale = 32,
|
||||
ENetPeer_PingInterval = 500,
|
||||
ENetPeer_ReliableWindows = 16,
|
||||
ENetPeer_ReliableWindowSize = 0x1000,
|
||||
ENetPeer_TimeoutLimit = 32,
|
||||
ENetPeer_TimeoutMaximum = 30000,
|
||||
ENetPeer_TimeoutMinimum = 5000,
|
||||
ENetPeer_UnsequencedWindows = 64,
|
||||
ENetPeer_UnsequencedWindowSize = 1024,
|
||||
ENetPeer_WindowSizeScale = 64 * 1024,
|
||||
|
||||
ENetProtocol_MaximumChannelCount = 255,
|
||||
ENetProtocol_MaximumFragmentCount = 1024 * 1024,
|
||||
ENetProtocol_MaximumMTU = 4096,
|
||||
ENetProtocol_MaximumPacketCommands = 32,
|
||||
ENetProtocol_MaximumPeerId = 0xFFF,
|
||||
ENetProtocol_MaximumWindowSize = 65536,
|
||||
ENetProtocol_MinimumChannelCount = 1,
|
||||
ENetProtocol_MinimumMTU = 576,
|
||||
ENetProtocol_MinimumWindowSize = 4096
|
||||
};
|
||||
|
||||
enum class ENetPeerState
|
||||
{
|
||||
AcknowledgingConnect = 2,
|
||||
AcknowledgingDisconnect = 8,
|
||||
Connecting = 1,
|
||||
ConnectionPending = 3,
|
||||
ConnectionSucceeded = 4,
|
||||
Connected = 5,
|
||||
Disconnected = 0,
|
||||
Disconnecting = 7,
|
||||
DisconnectLater = 6,
|
||||
Zombie = 9
|
||||
};
|
||||
|
||||
enum ENetProtocolCommand
|
||||
{
|
||||
// Keeping the values is important for compatibility with the native ENet protocol
|
||||
ENetProtocolCommand_Acknowledge = 1,
|
||||
ENetProtocolCommand_BandwidthLimit = 10,
|
||||
ENetProtocolCommand_Connect = 2,
|
||||
ENetProtocolCommand_Disconnect = 4,
|
||||
ENetProtocolCommand_None = 0,
|
||||
ENetProtocolCommand_Ping = 5,
|
||||
ENetProtocolCommand_SendFragment = 8,
|
||||
ENetProtocolCommand_SendReliable = 6,
|
||||
ENetProtocolCommand_SendUnreliable = 7,
|
||||
ENetProtocolCommand_SendUnreliableFragment = 12,
|
||||
ENetProtocolCommand_SendUnsequenced = 9,
|
||||
ENetProtocolCommand_ThrottleConfigure = 11,
|
||||
ENetProtocolCommand_VerifyConnect = 3,
|
||||
ENetProtocolCommand_Count = 13,
|
||||
|
||||
ENetProtocolCommand_Mask = 0x0F
|
||||
};
|
||||
|
||||
enum ENetProtocolFlag
|
||||
{
|
||||
ENetProtocolFlag_Acknowledge = (1 << 7),
|
||||
ENetProtocolFlag_Unsequenced = (1 << 6),
|
||||
|
||||
ENetProtocolHeaderFlag_Compressed = (1 << 14),
|
||||
ENetProtocolHeaderFlag_SentTime = (1 << 15),
|
||||
ENetProtocolHeaderFlag_Mask = ENetProtocolHeaderFlag_Compressed | ENetProtocolHeaderFlag_SentTime,
|
||||
|
||||
ENetProtocolHeaderSessionMask = (3 << 12),
|
||||
ENetProtocolHeaderSessionShift = 12
|
||||
};
|
||||
|
||||
enum class ENetEventType
|
||||
{
|
||||
/** no event occurred within the specified time limit */
|
||||
None,
|
||||
|
||||
/** a connection request initiated by enet_host_connect has completed.
|
||||
* The peer field contains the peer which successfully connected.
|
||||
*/
|
||||
Connect,
|
||||
|
||||
/** a peer has disconnected. This event is generated on a successful
|
||||
* completion of a disconnect initiated by enet_peer_disconnect, if
|
||||
* a peer has timed out, or if a connection request initialized by
|
||||
* enet_host_connect has timed out. The peer field contains the peer
|
||||
* which disconnected. The data field contains user supplied data
|
||||
* describing the disconnection, or 0, if none is available.
|
||||
*/
|
||||
Disconnect,
|
||||
|
||||
/** a packet has been received from a peer. The peer field specifies the
|
||||
* peer which sent the packet. The channelID field specifies the channel
|
||||
* number upon which the packet was received. The packet field contains
|
||||
* the packet that was received; this packet must be destroyed with
|
||||
* enet_packet_destroy after use.
|
||||
*/
|
||||
Receive
|
||||
};
|
||||
|
||||
struct ENetEvent
|
||||
{
|
||||
ENetEventType type;
|
||||
ENetPeer* peer;
|
||||
UInt8 channelId;
|
||||
UInt32 data;
|
||||
ENetPacketRef packet;
|
||||
};
|
||||
|
||||
struct ENetProtocolHeader
|
||||
{
|
||||
UInt16 peerID;
|
||||
UInt16 sentTime;
|
||||
};
|
||||
|
||||
struct ENetProtocolCommandHeader
|
||||
{
|
||||
UInt8 command;
|
||||
UInt8 channelID;
|
||||
UInt16 reliableSequenceNumber;
|
||||
};
|
||||
|
||||
struct ENetProtocolAcknowledge
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 receivedReliableSequenceNumber;
|
||||
UInt16 receivedSentTime;
|
||||
};
|
||||
|
||||
struct ENetProtocolConnect
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 outgoingPeerID;
|
||||
UInt8 incomingSessionID;
|
||||
UInt8 outgoingSessionID;
|
||||
UInt32 mtu;
|
||||
UInt32 windowSize;
|
||||
UInt32 channelCount;
|
||||
UInt32 incomingBandwidth;
|
||||
UInt32 outgoingBandwidth;
|
||||
UInt32 packetThrottleInterval;
|
||||
UInt32 packetThrottleAcceleration;
|
||||
UInt32 packetThrottleDeceleration;
|
||||
UInt32 connectID;
|
||||
UInt32 data;
|
||||
};
|
||||
|
||||
struct ENetProtocolBandwidthLimit
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt32 incomingBandwidth;
|
||||
UInt32 outgoingBandwidth;
|
||||
};
|
||||
|
||||
struct ENetProtocolDisconnect
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt32 data;
|
||||
};
|
||||
|
||||
struct ENetProtocolPing
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
};
|
||||
|
||||
struct ENetProtocolSendFragment
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 startSequenceNumber;
|
||||
UInt16 dataLength;
|
||||
UInt32 fragmentCount;
|
||||
UInt32 fragmentNumber;
|
||||
UInt32 totalLength;
|
||||
UInt32 fragmentOffset;
|
||||
};
|
||||
|
||||
struct ENetProtocolSendReliable
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 dataLength;
|
||||
};
|
||||
|
||||
struct ENetProtocolSendUnreliable
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 unreliableSequenceNumber;
|
||||
UInt16 dataLength;
|
||||
};
|
||||
|
||||
struct ENetProtocolSendUnsequenced
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 unsequencedGroup;
|
||||
UInt16 dataLength;
|
||||
};
|
||||
|
||||
struct ENetProtocolThrottleConfigure
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt32 packetThrottleInterval;
|
||||
UInt32 packetThrottleAcceleration;
|
||||
UInt32 packetThrottleDeceleration;
|
||||
};
|
||||
|
||||
struct ENetProtocolVerifyConnect
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
UInt16 outgoingPeerID;
|
||||
UInt8 incomingSessionID;
|
||||
UInt8 outgoingSessionID;
|
||||
UInt32 mtu;
|
||||
UInt32 windowSize;
|
||||
UInt32 channelCount;
|
||||
UInt32 incomingBandwidth;
|
||||
UInt32 outgoingBandwidth;
|
||||
UInt32 packetThrottleInterval;
|
||||
UInt32 packetThrottleAcceleration;
|
||||
UInt32 packetThrottleDeceleration;
|
||||
UInt32 connectID;
|
||||
};
|
||||
|
||||
union ENetProtocol
|
||||
{
|
||||
ENetProtocolCommandHeader header;
|
||||
ENetProtocolAcknowledge acknowledge;
|
||||
ENetProtocolBandwidthLimit bandwidthLimit;
|
||||
ENetProtocolConnect connect;
|
||||
ENetProtocolDisconnect disconnect;
|
||||
ENetProtocolPing ping;
|
||||
ENetProtocolSendReliable sendReliable;
|
||||
ENetProtocolSendUnreliable sendUnreliable;
|
||||
ENetProtocolSendUnsequenced sendUnsequenced;
|
||||
ENetProtocolSendFragment sendFragment;
|
||||
ENetProtocolThrottleConfigure throttleConfigure;
|
||||
ENetProtocolVerifyConnect verifyConnect;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // NAZARA_ENETPROTOCOL_HPP
|
||||
|
|
@ -0,0 +1,743 @@
|
|||
#include <Nazara/Network/ENetHost.hpp>
|
||||
#include <Nazara/Core/Clock.hpp>
|
||||
#include <Nazara/Core/Endianness.hpp>
|
||||
#include <Nazara/Core/OffsetOf.hpp>
|
||||
#include <Nazara/Network/ENetPeer.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
#define ENET_TIME_OVERFLOW 86400000
|
||||
|
||||
#define ENET_TIME_LESS(a, b) ((a) - (b) >= ENET_TIME_OVERFLOW)
|
||||
#define ENET_TIME_GREATER(a, b) ((b) - (a) >= ENET_TIME_OVERFLOW)
|
||||
#define ENET_TIME_LESS_EQUAL(a, b) (! ENET_TIME_GREATER (a, b))
|
||||
#define ENET_TIME_GREATER_EQUAL(a, b) (! ENET_TIME_LESS (a, b))
|
||||
|
||||
#define ENET_TIME_DIFFERENCE(a, b) ((a) - (b) >= ENET_TIME_OVERFLOW ? (b) - (a) : (a) - (b))
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
/// Temporary
|
||||
template<typename T>
|
||||
T HostToNet(T value)
|
||||
{
|
||||
#ifdef NAZARA_LITTLE_ENDIAN
|
||||
return SwapBytes(value);
|
||||
#else
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Temporary
|
||||
template<typename T>
|
||||
T NetToHost(T value)
|
||||
{
|
||||
#ifdef NAZARA_LITTLE_ENDIAN
|
||||
return SwapBytes(value);
|
||||
#else
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void ENetHost::Broadcast(UInt8 channelId, ENetPacketFlags flags, NetPacket&& packet)
|
||||
{
|
||||
ENetPacket* enetPacket = m_packetPool.New<ENetPacket>();
|
||||
enetPacket->flags = flags;
|
||||
enetPacket->data = std::move(packet);
|
||||
enetPacket->owner = &m_packetPool;
|
||||
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if (peer.m_state != ENetPeerState::Connected)
|
||||
continue;
|
||||
|
||||
peer.Send(channelId, enetPacket);
|
||||
}
|
||||
}
|
||||
|
||||
bool ENetHost::Connect(const IpAddress& remoteAddress, std::size_t channelCount, UInt32 data)
|
||||
{
|
||||
NazaraAssert(remoteAddress.IsValid(), "Invalid remote address");
|
||||
NazaraAssert(remoteAddress.GetPort() != 0, "Remote address has no port");
|
||||
|
||||
std::size_t peerId;
|
||||
for (peerId = 0; peerId < m_peers.size(); ++peerId)
|
||||
{
|
||||
if (m_peers[peerId].m_state == ENetPeerState::Disconnected)
|
||||
break;
|
||||
}
|
||||
|
||||
if (peerId >= m_peers.size())
|
||||
{
|
||||
NazaraError("Insufficient peers");
|
||||
return false;
|
||||
}
|
||||
|
||||
m_channelLimit = Clamp<std::size_t>(channelCount, ENetConstants::ENetProtocol_MinimumChannelCount, ENetConstants::ENetProtocol_MaximumChannelCount);
|
||||
|
||||
UInt32 windowSize;
|
||||
if (m_outgoingBandwidth == 0)
|
||||
windowSize = ENetProtocol_MaximumWindowSize;
|
||||
else
|
||||
windowSize = (m_outgoingBandwidth / ENetConstants::ENetPeer_WindowSizeScale) * ENetProtocol_MinimumWindowSize;
|
||||
|
||||
ENetPeer& peer = m_peers[peerId];
|
||||
peer.InitOutgoing(channelCount, remoteAddress, ++m_randomSeed, windowSize);
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_Connect | ENetProtocolFlag_Acknowledge;
|
||||
command.header.channelID = 0xFF;
|
||||
|
||||
command.connect.channelCount = HostToNet(static_cast<UInt32>(channelCount));
|
||||
command.connect.connectID = peer.m_connectID;
|
||||
command.connect.data = HostToNet(data);
|
||||
command.connect.incomingBandwidth = HostToNet(m_incomingBandwidth);
|
||||
command.connect.incomingSessionID = peer.m_incomingSessionID;
|
||||
command.connect.mtu = HostToNet(peer.m_mtu);
|
||||
command.connect.outgoingBandwidth = HostToNet(m_outgoingBandwidth);
|
||||
command.connect.outgoingPeerID = HostToNet(peer.m_incomingPeerID);
|
||||
command.connect.outgoingSessionID = peer.m_outgoingSessionID;
|
||||
command.connect.packetThrottleAcceleration = HostToNet(peer.m_packetThrottleAcceleration);
|
||||
command.connect.packetThrottleDeceleration = HostToNet(peer.m_packetThrottleDeceleration);
|
||||
command.connect.packetThrottleInterval = HostToNet(peer.m_packetThrottleInterval);
|
||||
command.connect.windowSize = HostToNet(peer.m_windowSize);
|
||||
|
||||
peer.QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ENetHost::Connect(const String& hostName, NetProtocol protocol, const String& service, ResolveError* error, std::size_t channelCount, UInt32 data)
|
||||
{
|
||||
std::vector<HostnameInfo> results = IpAddress::ResolveHostname(protocol, hostName, service, error);
|
||||
if (results.empty())
|
||||
return false;
|
||||
|
||||
IpAddress hostnameAddress;
|
||||
for (const HostnameInfo& result : results)
|
||||
{
|
||||
if (!result.address)
|
||||
continue;
|
||||
|
||||
if (result.socketType != SocketType_UDP)
|
||||
continue;
|
||||
|
||||
hostnameAddress = result.address;
|
||||
break; //< Take first valid address
|
||||
}
|
||||
|
||||
return Connect(hostnameAddress, channelCount, data);
|
||||
}
|
||||
|
||||
bool ENetHost::Create(const IpAddress& address, std::size_t peerCount, std::size_t channelCount)
|
||||
{
|
||||
return Create(address, peerCount, channelCount, 0, 0);
|
||||
}
|
||||
|
||||
bool ENetHost::Create(const IpAddress& address, std::size_t peerCount, std::size_t channelCount, UInt32 incomingBandwidth, UInt32 outgoingBandwidth)
|
||||
{
|
||||
NazaraAssert(address.IsValid(), "Invalid listening address");
|
||||
|
||||
if (peerCount > ENetConstants::ENetProtocol_MaximumPeerId)
|
||||
{
|
||||
NazaraError("Peer count exceeds maximum peer count supported by protocol (" + String::Number(ENetConstants::ENetProtocol_MaximumPeerId) + ")");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!InitSocket(address))
|
||||
return false;
|
||||
|
||||
m_peers.resize(peerCount);
|
||||
|
||||
m_address = address;
|
||||
m_randomSeed = *reinterpret_cast<UInt32*>(this);
|
||||
m_randomSeed += s_randomGenerator();
|
||||
m_randomSeed = (m_randomSeed << 16) | (m_randomSeed >> 16);
|
||||
m_channelLimit = Clamp<std::size_t>(channelCount, ENetConstants::ENetProtocol_MinimumChannelCount, ENetConstants::ENetProtocol_MaximumChannelCount);
|
||||
m_incomingBandwidth = incomingBandwidth;
|
||||
m_outgoingBandwidth = outgoingBandwidth;
|
||||
m_bandwidthThrottleEpoch = 0;
|
||||
m_recalculateBandwidthLimits = false;
|
||||
m_mtu = ENetConstants::ENetHost_DefaultMTU;
|
||||
m_commandCount = 0;
|
||||
m_bufferCount = 0;
|
||||
m_receivedAddress = IpAddress::AnyIpV4;
|
||||
m_receivedData = nullptr;
|
||||
m_receivedDataLength = 0;
|
||||
|
||||
m_totalSentData = 0;
|
||||
m_totalSentPackets = 0;
|
||||
m_totalReceivedData = 0;
|
||||
m_totalReceivedPackets = 0;
|
||||
|
||||
m_bandwidthLimitedPeers = 0;
|
||||
m_connectedPeers = 0;
|
||||
m_duplicatePeers = ENetConstants::ENetProtocol_MaximumPeerId;
|
||||
m_maximumPacketSize = ENetConstants::ENetHost_DefaultMaximumPacketSize;
|
||||
m_maximumWaitingData = ENetConstants::ENetHost_DefaultMaximumWaitingData;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ENetHost::Flush()
|
||||
{
|
||||
m_serviceTime = GetElapsedMilliseconds();
|
||||
|
||||
SendOutgoingCommands(nullptr, 0);
|
||||
}
|
||||
|
||||
int ENetHost::Service(ENetEvent* event, UInt32 timeout)
|
||||
{
|
||||
UInt32 waitCondition;
|
||||
|
||||
if (event)
|
||||
{
|
||||
event->type = ENetEventType::None;
|
||||
event->peer = nullptr;
|
||||
event->packet = nullptr;
|
||||
|
||||
if (DispatchIncomingCommands(event))
|
||||
return 1;
|
||||
}
|
||||
|
||||
m_serviceTime = GetElapsedMilliseconds();
|
||||
timeout += m_serviceTime;
|
||||
|
||||
do
|
||||
{
|
||||
if (ENET_TIME_DIFFERENCE(m_serviceTime, m_bandwidthThrottleEpoch) >= ENetConstants::ENetHost_BandwidthThrottleInterval)
|
||||
ThrottleBandwidth();
|
||||
|
||||
switch (SendOutgoingCommands(event, true))
|
||||
{
|
||||
case 1:
|
||||
return 1;
|
||||
|
||||
case -1:
|
||||
#ifdef ENET_DEBUG
|
||||
perror("Error sending outgoing packets");
|
||||
#endif
|
||||
return -1;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (ReceiveIncomingCommands(event))
|
||||
{
|
||||
case 1:
|
||||
return 1;
|
||||
|
||||
case -1:
|
||||
#ifdef ENET_DEBUG
|
||||
perror("Error receiving incoming packets");
|
||||
#endif
|
||||
|
||||
return -1;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (SendOutgoingCommands(event, 1))
|
||||
{
|
||||
case 1:
|
||||
return 1;
|
||||
|
||||
case -1:
|
||||
#ifdef ENET_DEBUG
|
||||
perror("Error sending outgoing packets");
|
||||
#endif
|
||||
|
||||
return -1;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (event)
|
||||
{
|
||||
switch (DispatchIncomingCommands(event))
|
||||
{
|
||||
case 1:
|
||||
return 1;
|
||||
|
||||
case -1:
|
||||
#ifdef ENET_DEBUG
|
||||
perror("Error dispatching incoming packets");
|
||||
#endif
|
||||
|
||||
return -1;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ENET_TIME_GREATER_EQUAL(m_serviceTime, timeout))
|
||||
return 0;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
m_serviceTime = GetElapsedMilliseconds();
|
||||
|
||||
if (ENET_TIME_GREATER_EQUAL(m_serviceTime, timeout))
|
||||
return 0;
|
||||
|
||||
SocketError error;
|
||||
if (m_poller.Wait(ENET_TIME_DIFFERENCE(timeout, m_serviceTime), &error))
|
||||
break;
|
||||
|
||||
if (error != SocketError_NoError)
|
||||
return -1;
|
||||
}
|
||||
|
||||
m_serviceTime = GetElapsedMilliseconds();
|
||||
}
|
||||
while (m_poller.IsReady(m_socket));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ENetHost::InitSocket(const IpAddress& address)
|
||||
{
|
||||
if (!m_socket.Create(address.GetProtocol()))
|
||||
return false;
|
||||
|
||||
m_socket.EnableBlocking(false);
|
||||
m_socket.EnableBroadcasting(true);
|
||||
m_socket.SetReceiveBufferSize(ENetConstants::ENetHost_ReceiveBufferSize);
|
||||
m_socket.SetSendBufferSize(ENetConstants::ENetHost_SendBufferSize);
|
||||
|
||||
if (!address.IsLoopback())
|
||||
{
|
||||
if (m_socket.Bind(address) != SocketState_Bound)
|
||||
{
|
||||
NazaraError("Failed to bind address " + address.ToString());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
m_poller.RegisterSocket(m_socket);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ENetHost::DispatchIncomingCommands(ENetEvent* event)
|
||||
{
|
||||
for (std::size_t bit = m_dispatchQueue.FindFirst(); bit != m_dispatchQueue.npos; bit = m_dispatchQueue.FindNext(bit))
|
||||
{
|
||||
m_dispatchQueue.Reset(bit);
|
||||
|
||||
ENetPeer& peer = m_peers[bit];
|
||||
switch (peer.m_state)
|
||||
{
|
||||
case ENetPeerState::ConnectionPending:
|
||||
case ENetPeerState::ConnectionSucceeded:
|
||||
peer.ChangeState(ENetPeerState::Connected);
|
||||
|
||||
event->type = ENetEventType::Connect;
|
||||
event->peer = &peer;
|
||||
event->data = peer.m_eventData;
|
||||
return true;
|
||||
|
||||
case ENetPeerState::Zombie:
|
||||
m_recalculateBandwidthLimits = true;
|
||||
|
||||
event->type = ENetEventType::Disconnect;
|
||||
event->peer = &peer;
|
||||
event->data = peer.m_eventData;
|
||||
|
||||
peer.Reset();
|
||||
return true;
|
||||
|
||||
case ENetPeerState::Connected:
|
||||
if (peer.m_dispatchedCommands.empty())
|
||||
continue;
|
||||
|
||||
if (!peer.Receive(&event->packet, &event->channelId))
|
||||
continue;
|
||||
|
||||
event->type = ENetEventType::Receive;
|
||||
event->peer = &peer;
|
||||
|
||||
if (!peer.m_dispatchedCommands.empty())
|
||||
AddToDispatchQueue(&peer);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ENetPeer* ENetHost::HandleConnect(ENetProtocolHeader* header, ENetProtocol* command)
|
||||
{
|
||||
UInt32 channelCount = NetToHost(command->connect.channelCount);
|
||||
|
||||
if (channelCount < ENetProtocol_MinimumChannelCount || channelCount > ENetProtocol_MaximumChannelCount)
|
||||
return nullptr;
|
||||
|
||||
std::size_t duplicatePeers = 0;
|
||||
ENetPeer* peer = nullptr;
|
||||
for (ENetPeer& currentPeer : m_peers)
|
||||
{
|
||||
if (currentPeer.m_state == ENetPeerState::Disconnected)
|
||||
{
|
||||
if (!peer)
|
||||
peer = ¤tPeer;
|
||||
}
|
||||
else if (currentPeer.m_state != ENetPeerState::Connecting)
|
||||
{
|
||||
// Compare users without comparing their port
|
||||
IpAddress first(currentPeer.m_address);
|
||||
first.SetPort(0);
|
||||
|
||||
IpAddress second(m_receivedAddress);
|
||||
second.SetPort(0);
|
||||
|
||||
if (first == second)
|
||||
{
|
||||
if (currentPeer.m_address.GetPort() == m_receivedAddress.GetPort() && currentPeer.m_connectID == command->connect.connectID)
|
||||
return nullptr;
|
||||
|
||||
++duplicatePeers;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!peer || duplicatePeers >= m_duplicatePeers)
|
||||
return nullptr;
|
||||
|
||||
channelCount = std::min(channelCount, m_channelLimit);
|
||||
|
||||
peer->InitIncoming(channelCount, m_receivedAddress, command->connect);
|
||||
|
||||
UInt32 windowSize;
|
||||
if (m_incomingBandwidth == 0)
|
||||
windowSize = ENetConstants::ENetProtocol_MaximumWindowSize;
|
||||
else
|
||||
windowSize = (m_incomingBandwidth / ENetConstants::ENetPeer_WindowSizeScale) * ENetConstants::ENetProtocol_MinimumWindowSize;
|
||||
|
||||
windowSize = std::max(windowSize, NetToHost(command->connect.windowSize));
|
||||
windowSize = Clamp<UInt32>(windowSize, ENetConstants::ENetProtocol_MinimumWindowSize, ENetConstants::ENetProtocol_MaximumWindowSize);
|
||||
|
||||
ENetProtocol verifyCommand;
|
||||
verifyCommand.header.command = ENetProtocolCommand_VerifyConnect | ENetProtocolFlag_Acknowledge;
|
||||
verifyCommand.header.channelID = 0xFF;
|
||||
verifyCommand.verifyConnect.outgoingPeerID = HostToNet(peer->m_incomingPeerID);
|
||||
verifyCommand.verifyConnect.incomingSessionID = peer->m_outgoingSessionID;
|
||||
verifyCommand.verifyConnect.outgoingSessionID = peer->m_incomingSessionID;
|
||||
verifyCommand.verifyConnect.mtu = HostToNet(peer->m_mtu);
|
||||
verifyCommand.verifyConnect.windowSize = HostToNet(windowSize);
|
||||
verifyCommand.verifyConnect.channelCount = HostToNet(channelCount);
|
||||
verifyCommand.verifyConnect.incomingBandwidth = HostToNet(m_incomingBandwidth);
|
||||
verifyCommand.verifyConnect.outgoingBandwidth = HostToNet(m_outgoingBandwidth);
|
||||
verifyCommand.verifyConnect.packetThrottleInterval = HostToNet(peer->m_packetThrottleInterval);
|
||||
verifyCommand.verifyConnect.packetThrottleAcceleration = HostToNet(peer->m_packetThrottleAcceleration);
|
||||
verifyCommand.verifyConnect.packetThrottleDeceleration = HostToNet(peer->m_packetThrottleDeceleration);
|
||||
verifyCommand.verifyConnect.connectID = peer->m_connectID;
|
||||
|
||||
peer->QueueOutgoingCommand(verifyCommand, nullptr, 0, 0);
|
||||
|
||||
return peer;
|
||||
}
|
||||
|
||||
bool ENetHost::HandleIncomingCommands(ENetEvent* event)
|
||||
{
|
||||
if (m_receivedDataLength < NazaraOffsetOf(ENetProtocolHeader, sentTime))
|
||||
return false;
|
||||
|
||||
ENetProtocolHeader* header = reinterpret_cast<ENetProtocolHeader*>(m_receivedData);
|
||||
|
||||
peerID = NetToHost(header->peerID);
|
||||
sessionID = (peerID & ENET_PROTOCOL_HEADER_SESSION_MASK) >> ENET_PROTOCOL_HEADER_SESSION_SHIFT;
|
||||
flags = peerID & ENET_PROTOCOL_HEADER_FLAG_MASK;
|
||||
peerID &= ~(ENET_PROTOCOL_HEADER_FLAG_MASK | ENET_PROTOCOL_HEADER_SESSION_MASK);
|
||||
|
||||
headerSize = (flags & ENET_PROTOCOL_HEADER_FLAG_SENT_TIME ? sizeof(ENetProtocolHeader) : (size_t) & ((ENetProtocolHeader *)0)->sentTime);
|
||||
if (host->checksum != NULL)
|
||||
headerSize += sizeof(enet_uint32);
|
||||
|
||||
if (peerID == ENET_PROTOCOL_MAXIMUM_PEER_ID)
|
||||
peer = NULL;
|
||||
else
|
||||
if (peerID >= host->peerCount)
|
||||
return 0;
|
||||
else
|
||||
{
|
||||
peer = &host->peers[peerID];
|
||||
|
||||
if (peer->state == ENET_PEER_STATE_DISCONNECTED ||
|
||||
peer->state == ENET_PEER_STATE_ZOMBIE ||
|
||||
((host->receivedAddress.host != peer->address.host ||
|
||||
host->receivedAddress.port != peer->address.port) &&
|
||||
peer->address.host != ENET_HOST_BROADCAST) ||
|
||||
(peer->outgoingPeerID < ENET_PROTOCOL_MAXIMUM_PEER_ID &&
|
||||
sessionID != peer->incomingSessionID))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ENetHost::HandleSendReliable(ENetPeer& peer, const ENetProtocol& command, UInt8** currentData)
|
||||
{
|
||||
if (command.header.channelID >= peer.m_channels.size() || (peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater))
|
||||
return false;
|
||||
|
||||
UInt16 dataLength = NetToHost(command.sendReliable.dataLength);
|
||||
*currentData += dataLength;
|
||||
if (dataLength >= m_maximumPacketSize || *currentData < m_receivedData || *currentData > &m_receivedData[m_receivedDataLength])
|
||||
return false;
|
||||
|
||||
if (!peer.QueueIncomingCommand(command, co))
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ENetHost::ReceiveIncomingCommands(ENetEvent* event)
|
||||
{
|
||||
for (unsigned int i = 0; i < 256; ++i)
|
||||
{
|
||||
NetPacket packet;
|
||||
|
||||
std::size_t receivedLength;
|
||||
if (!m_socket.Receive(m_packetData[0].data(), m_packetData[0].size(), &m_receivedAddress, &receivedLength))
|
||||
return -1; //< Error
|
||||
|
||||
if (receivedLength == 0)
|
||||
return 0;
|
||||
|
||||
m_receivedData = m_packetData[0].data();
|
||||
m_receivedDataLength = receivedLength;
|
||||
|
||||
m_totalReceivedData += receivedLength;
|
||||
m_totalReceivedPackets++;
|
||||
|
||||
// Intercept
|
||||
|
||||
switch (HandleIncomingCommands(event))
|
||||
{
|
||||
case 1:
|
||||
return 1;
|
||||
|
||||
case -1:
|
||||
return -1;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ENetHost::NotifyConnect(ENetPeer* peer, ENetEvent* event)
|
||||
{
|
||||
m_recalculateBandwidthLimits = true;
|
||||
|
||||
if (event)
|
||||
{
|
||||
peer->ChangeState(ENetPeerState::Connected);
|
||||
|
||||
event->type = ENetEventType::Connect;
|
||||
event->peer = peer;
|
||||
event->data = peer->m_eventData;
|
||||
}
|
||||
else
|
||||
peer->DispatchState(peer->m_state == ENetPeerState::Connecting ? ENetPeerState::ConnectionSucceeded : ENetPeerState::ConnectionPending);
|
||||
}
|
||||
|
||||
void ENetHost::NotifyDisconnect(ENetPeer* peer, ENetEvent* event)
|
||||
{
|
||||
if (peer->m_state >= ENetPeerState::ConnectionPending)
|
||||
m_recalculateBandwidthLimits = true;
|
||||
|
||||
if (peer->m_state != ENetPeerState::Connecting && (peer->m_state < ENetPeerState::ConnectionSucceeded))
|
||||
peer->Reset();
|
||||
else if (event)
|
||||
{
|
||||
event->type = ENetEventType::Disconnect;
|
||||
event->peer = peer;
|
||||
event->data = peer->m_eventData;
|
||||
|
||||
peer->Reset();
|
||||
}
|
||||
else
|
||||
{
|
||||
peer->m_eventData = 0;
|
||||
|
||||
peer->DispatchState(ENetPeerState::Zombie);
|
||||
}
|
||||
}
|
||||
|
||||
void ENetHost::ThrottleBandwidth()
|
||||
{
|
||||
UInt32 currentTime = GetElapsedMilliseconds();
|
||||
UInt32 elapsedTime = currentTime - m_bandwidthThrottleEpoch;
|
||||
|
||||
if (elapsedTime < ENetConstants::ENetHost_BandwidthThrottleInterval)
|
||||
return;
|
||||
|
||||
m_bandwidthThrottleEpoch = currentTime;
|
||||
|
||||
if (m_connectedPeers == 0)
|
||||
return;
|
||||
|
||||
UInt32 dataTotal = ~0;
|
||||
UInt32 bandwidth = ~0;
|
||||
|
||||
if (m_outgoingBandwidth != 0)
|
||||
{
|
||||
bandwidth = (m_outgoingBandwidth * elapsedTime) / 1000;
|
||||
|
||||
dataTotal = 0;
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if (peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater)
|
||||
continue;
|
||||
|
||||
dataTotal += peer.m_outgoingDataTotal;
|
||||
}
|
||||
}
|
||||
|
||||
UInt32 peersRemaining = m_connectedPeers;
|
||||
UInt32 bandwidthLimit = ~0;
|
||||
UInt32 throttle = ~0;
|
||||
bool needsAdjustment = m_bandwidthLimitedPeers > 0;
|
||||
|
||||
while (peersRemaining > 0 && needsAdjustment)
|
||||
{
|
||||
needsAdjustment = false;
|
||||
|
||||
if (dataTotal <= bandwidth)
|
||||
throttle = ENetConstants::ENetPeer_PacketThrottleScale;
|
||||
else
|
||||
throttle = (bandwidth * ENetConstants::ENetPeer_PacketThrottleScale) / dataTotal;
|
||||
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if ((peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater) ||
|
||||
peer.m_incomingBandwidth == 0 || peer.m_outgoingBandwidthThrottleEpoch == currentTime)
|
||||
continue;
|
||||
|
||||
UInt32 peerBandwidth = (peer.m_incomingBandwidth * elapsedTime) / 1000;
|
||||
if ((throttle * peer.m_outgoingDataTotal) / ENetConstants::ENetPeer_PacketThrottleScale <= peerBandwidth)
|
||||
continue;
|
||||
|
||||
peer.m_packetThrottleLimit = (peerBandwidth * ENetConstants::ENetPeer_PacketThrottleScale) / peer.m_outgoingDataTotal;
|
||||
|
||||
if (peer.m_packetThrottleLimit == 0)
|
||||
peer.m_packetThrottleLimit = 1;
|
||||
|
||||
if (peer.m_packetThrottle > peer.m_packetThrottleLimit)
|
||||
peer.m_packetThrottle = peer.m_packetThrottleLimit;
|
||||
|
||||
peer.m_outgoingBandwidthThrottleEpoch = currentTime;
|
||||
|
||||
peer.m_incomingDataTotal = 0;
|
||||
peer.m_outgoingDataTotal = 0;
|
||||
|
||||
needsAdjustment = true;
|
||||
--peersRemaining;
|
||||
bandwidth -= peerBandwidth;
|
||||
dataTotal -= peerBandwidth;
|
||||
}
|
||||
}
|
||||
|
||||
if (peersRemaining > 0)
|
||||
{
|
||||
if (dataTotal <= bandwidth)
|
||||
throttle = ENetConstants::ENetPeer_PacketThrottleScale;
|
||||
else
|
||||
throttle = (bandwidth * ENetConstants::ENetPeer_PacketThrottleScale) / dataTotal;
|
||||
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if ((peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater) ||
|
||||
peer.m_outgoingBandwidthThrottleEpoch == currentTime)
|
||||
continue;
|
||||
|
||||
peer.m_packetThrottleLimit = throttle;
|
||||
|
||||
if (peer.m_packetThrottle > peer.m_packetThrottleLimit)
|
||||
peer.m_packetThrottle = peer.m_packetThrottleLimit;
|
||||
|
||||
peer.m_incomingDataTotal = 0;
|
||||
peer.m_outgoingDataTotal = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (m_recalculateBandwidthLimits)
|
||||
{
|
||||
m_recalculateBandwidthLimits = false;
|
||||
|
||||
peersRemaining = m_connectedPeers;
|
||||
bandwidth = m_incomingBandwidth;
|
||||
needsAdjustment = true;
|
||||
|
||||
if (bandwidth == 0)
|
||||
bandwidthLimit = 0;
|
||||
else
|
||||
{
|
||||
while (peersRemaining > 0 && needsAdjustment)
|
||||
{
|
||||
needsAdjustment = false;
|
||||
bandwidthLimit = bandwidth / peersRemaining;
|
||||
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if ((peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater) ||
|
||||
peer.m_incomingBandwidthThrottleEpoch == currentTime)
|
||||
continue;
|
||||
|
||||
if (peer.m_outgoingBandwidth > 0 && peer.m_outgoingBandwidth >= bandwidthLimit)
|
||||
continue;
|
||||
|
||||
peer.m_incomingBandwidthThrottleEpoch = currentTime;
|
||||
|
||||
needsAdjustment = true;
|
||||
--peersRemaining;
|
||||
bandwidth -= peer.m_outgoingBandwidth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ENetPeer& peer : m_peers)
|
||||
{
|
||||
if (peer.m_state != ENetPeerState::Connected && peer.m_state != ENetPeerState::DisconnectLater)
|
||||
continue;
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_BandwidthLimit | ENetProtocolFlag_Acknowledge;
|
||||
command.header.channelID = 0xFF;
|
||||
command.bandwidthLimit.outgoingBandwidth = HostToNet(m_outgoingBandwidth);
|
||||
|
||||
if (peer.m_incomingBandwidthThrottleEpoch == currentTime)
|
||||
command.bandwidthLimit.incomingBandwidth = HostToNet(peer.m_outgoingBandwidth);
|
||||
else
|
||||
command.bandwidthLimit.incomingBandwidth = HostToNet(bandwidthLimit);
|
||||
|
||||
peer.QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool ENetHost::Initialize()
|
||||
{
|
||||
std::random_device device;
|
||||
s_randomGenerator.seed(device());
|
||||
s_randomGenerator64.seed(device());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ENetHost::Uninitialize()
|
||||
{
|
||||
}
|
||||
|
||||
std::mt19937 ENetHost::s_randomGenerator;
|
||||
std::mt19937_64 ENetHost::s_randomGenerator64;
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
#include <Nazara/Network/ENetPacket.hpp>
|
||||
#include <Nazara/Core/MemoryPool.hpp>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
/// Temporary
|
||||
void ENetPacketRef::Reset(ENetPacket* packet = nullptr)
|
||||
{
|
||||
if (m_packet)
|
||||
{
|
||||
if (--m_packet->referenceCount == 0)
|
||||
{
|
||||
m_packet->owner->Delete(m_packet);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
m_packet = packet;
|
||||
if (m_packet)
|
||||
m_packet->referenceCount++;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,885 @@
|
|||
#include <Nazara/Network/ENetPeer.hpp>
|
||||
#include <Nazara/Core/Endianness.hpp>
|
||||
#include <Nazara/Network/ENetHost.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
/// Temporary
|
||||
template<typename T>
|
||||
T HostToNet(T value)
|
||||
{
|
||||
#ifdef NAZARA_LITTLE_ENDIAN
|
||||
return SwapBytes(value);
|
||||
#else
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Temporary
|
||||
template<typename T>
|
||||
T NetToHost(T value)
|
||||
{
|
||||
#ifdef NAZARA_LITTLE_ENDIAN
|
||||
return SwapBytes(value);
|
||||
#else
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
static std::size_t commandSizes[ENetProtocolCommand_Count] =
|
||||
{
|
||||
0,
|
||||
sizeof(ENetProtocolAcknowledge),
|
||||
sizeof(ENetProtocolConnect),
|
||||
sizeof(ENetProtocolVerifyConnect),
|
||||
sizeof(ENetProtocolDisconnect),
|
||||
sizeof(ENetProtocolPing),
|
||||
sizeof(ENetProtocolSendReliable),
|
||||
sizeof(ENetProtocolSendUnreliable),
|
||||
sizeof(ENetProtocolSendFragment),
|
||||
sizeof(ENetProtocolSendUnsequenced),
|
||||
sizeof(ENetProtocolBandwidthLimit),
|
||||
sizeof(ENetProtocolThrottleConfigure),
|
||||
sizeof(ENetProtocolSendFragment)
|
||||
};
|
||||
|
||||
std::size_t enet_protocol_command_size(UInt8 commandNumber)
|
||||
{
|
||||
return commandSizes[commandNumber & ENetProtocolCommand_Mask];
|
||||
}
|
||||
}
|
||||
|
||||
ENetPeer::ENetPeer(ENetHost* host, UInt16 peerId) :
|
||||
m_packetPool(sizeof(ENetPacket)),
|
||||
m_host(host),
|
||||
m_incomingPeerID(peerId),
|
||||
m_incomingSessionID(0xFF),
|
||||
m_outgoingSessionID(0xFF)
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
void ENetPeer::Disconnect(UInt32 data)
|
||||
{
|
||||
if (m_state == ENetPeerState::Disconnecting ||
|
||||
m_state == ENetPeerState::Disconnected ||
|
||||
m_state == ENetPeerState::AcknowledgingDisconnect ||
|
||||
m_state == ENetPeerState::Zombie)
|
||||
return;
|
||||
|
||||
ResetQueues();
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_Disconnect;
|
||||
command.header.channelID = 0xFF;
|
||||
command.disconnect.data = HostToNet(data);
|
||||
|
||||
if (m_state == ENetPeerState::Connected || m_state == ENetPeerState::DisconnectLater)
|
||||
command.header.command |= ENetProtocolFlag_Acknowledge;
|
||||
else
|
||||
command.header.command |= ENetProtocolFlag_Unsequenced;
|
||||
|
||||
QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
|
||||
if (m_state == ENetPeerState::Connected || m_state == ENetPeerState::DisconnectLater)
|
||||
{
|
||||
OnDisconnect();
|
||||
|
||||
m_state = ENetPeerState::Disconnecting;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_host->Flush();
|
||||
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
void ENetPeer::DisconnectLater(UInt32 data)
|
||||
{
|
||||
if ((m_state == ENetPeerState::Connected || m_state == ENetPeerState::DisconnectLater) &&
|
||||
!m_outgoingReliableCommands.empty() &&
|
||||
!m_outgoingUnreliableCommands.empty() &&
|
||||
!m_sentReliableCommands.empty())
|
||||
{
|
||||
m_state = ENetPeerState::DisconnectLater;
|
||||
m_eventData = data;
|
||||
}
|
||||
else
|
||||
Disconnect(data);
|
||||
}
|
||||
|
||||
void ENetPeer::DisconnectNow(UInt32 data)
|
||||
{
|
||||
if (m_state == ENetPeerState::Disconnected)
|
||||
return;
|
||||
|
||||
if (m_state != ENetPeerState::Zombie && m_state != ENetPeerState::Disconnecting)
|
||||
{
|
||||
ResetQueues();
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_Disconnect | ENetProtocolFlag_Unsequenced;
|
||||
command.header.channelID = 0xFF;
|
||||
command.disconnect.data = HostToNet(data);
|
||||
|
||||
QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
|
||||
m_host->Flush();
|
||||
}
|
||||
|
||||
Reset();
|
||||
}
|
||||
|
||||
void ENetPeer::Ping()
|
||||
{
|
||||
if (m_state != ENetPeerState::Connected)
|
||||
return;
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_Ping | ENetProtocolFlag_Acknowledge;
|
||||
command.header.channelID = 0xFF;
|
||||
|
||||
QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
}
|
||||
|
||||
bool ENetPeer::Receive(ENetPacketRef* packet, UInt8* channelId)
|
||||
{
|
||||
if (m_dispatchedCommands.empty())
|
||||
return false;
|
||||
|
||||
IncomingCommmand& incomingCommand = m_dispatchedCommands.front();
|
||||
|
||||
m_totalWaitingData -= incomingCommand.packet->data.GetSize();
|
||||
|
||||
if (packet)
|
||||
*packet = std::move(incomingCommand.packet);
|
||||
|
||||
if (channelId)
|
||||
*channelId = incomingCommand.command.header.channelID;
|
||||
|
||||
m_dispatchedCommands.pop_front();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ENetPeer::Reset()
|
||||
{
|
||||
OnDisconnect();
|
||||
|
||||
m_outgoingPeerID = ENetConstants::ENetProtocol_MaximumPeerId;
|
||||
m_connectID = 0;
|
||||
|
||||
m_state = ENetPeerState::Disconnected;
|
||||
|
||||
m_incomingBandwidth = 0;
|
||||
m_outgoingBandwidth = 0;
|
||||
m_incomingBandwidthThrottleEpoch = 0;
|
||||
m_outgoingBandwidthThrottleEpoch = 0;
|
||||
m_incomingDataTotal = 0;
|
||||
m_outgoingDataTotal = 0;
|
||||
m_lastSendTime = 0;
|
||||
m_lastReceiveTime = 0;
|
||||
m_nextTimeout = 0;
|
||||
m_earliestTimeout = 0;
|
||||
m_packetLossEpoch = 0;
|
||||
m_packetsSent = 0;
|
||||
m_packetsLost = 0;
|
||||
m_packetLoss = 0;
|
||||
m_packetLossVariance = 0;
|
||||
m_packetThrottle = ENetConstants::ENetProtocol_MaximumWindowSize;
|
||||
m_packetThrottleLimit = ENetConstants::ENetPeer_PacketThrottleScale;
|
||||
m_packetThrottleCounter = 0;
|
||||
m_packetThrottleEpoch = 0;
|
||||
m_packetThrottleAcceleration = ENetConstants::ENetPeer_PacketThrottleAcceleration;
|
||||
m_packetThrottleDeceleration = ENetConstants::ENetPeer_PacketThrottleDeceleration;
|
||||
m_packetThrottleInterval = ENetConstants::ENetPeer_PacketThrottleInterval;
|
||||
m_pingInterval = ENetConstants::ENetPeer_PingInterval;
|
||||
m_timeoutLimit = ENetConstants::ENetPeer_TimeoutLimit;
|
||||
m_timeoutMinimum = ENetConstants::ENetPeer_TimeoutMinimum;
|
||||
m_timeoutMaximum = ENetConstants::ENetPeer_TimeoutMaximum;
|
||||
m_lastRoundTripTime = ENetConstants::ENetPeer_DefaultRoundTripTime;
|
||||
m_lowestRoundTripTime = ENetConstants::ENetPeer_DefaultRoundTripTime;
|
||||
m_lastRoundTripTimeVariance = 0;
|
||||
m_highestRoundTripTimeVariance = 0;
|
||||
m_roundTripTime = ENetConstants::ENetPeer_DefaultRoundTripTime;
|
||||
m_roundTripTimeVariance = 0;
|
||||
m_mtu = m_host->m_mtu;
|
||||
m_reliableDataInTransit = 0;
|
||||
m_outgoingReliableSequenceNumber = 0;
|
||||
m_windowSize = ENetConstants::ENetProtocol_MaximumWindowSize;
|
||||
m_incomingUnsequencedGroup = 0;
|
||||
m_outgoingUnsequencedGroup = 0;
|
||||
m_eventData = 0;
|
||||
m_totalWaitingData = 0;
|
||||
|
||||
std::memset(m_unsequencedWindow, 0, sizeof(m_unsequencedWindow));
|
||||
|
||||
ResetQueues();
|
||||
}
|
||||
|
||||
bool ENetPeer::Send(UInt8 channelId, ENetPacketFlags flags, NetPacket&& packet)
|
||||
{
|
||||
ENetPacket* enetPacket = m_packetPool.New<ENetPacket>();
|
||||
enetPacket->flags = flags;
|
||||
enetPacket->data = std::move(packet);
|
||||
enetPacket->owner = &m_packetPool;
|
||||
|
||||
return Send(channelId, enetPacket);
|
||||
}
|
||||
|
||||
bool ENetPeer::Send(UInt8 channelId, ENetPacketRef packetRef)
|
||||
{
|
||||
if (m_state != ENetPeerState::Connected || channelId >= m_channels.size() || packetRef->data.GetSize() > m_host->m_maximumPacketSize)
|
||||
return false;
|
||||
|
||||
Channel& channel = m_channels[channelId];
|
||||
|
||||
std::size_t fragmentLength = m_mtu - sizeof(ENetProtocolHeader) - sizeof(ENetProtocolSendFragment);
|
||||
//if (m_host->m_checksum != nullptr)
|
||||
// fragmentLength -= sizeof(UInt32);
|
||||
|
||||
UInt32 packetSize = static_cast<UInt32>(packetRef->data.GetSize());
|
||||
if (packetSize > fragmentLength)
|
||||
{
|
||||
UInt32 fragmentCount = (packetSize + fragmentLength - 1) / fragmentLength;
|
||||
UInt32 fragmentNumber;
|
||||
UInt32 fragmentOffset;
|
||||
|
||||
UInt8 commandNumber;
|
||||
UInt16 startSequenceNumber;
|
||||
|
||||
if (fragmentCount > ENetConstants::ENetProtocol_MaximumFragmentCount)
|
||||
return false;
|
||||
|
||||
if ((packetRef->flags & (ENetPacketFlag_Reliable | ENetPacketFlag_UnreliableFragment)) == ENetPacketFlag_UnreliableFragment &&
|
||||
channel.outgoingUnreliableSequenceNumber < 0xFFFF)
|
||||
{
|
||||
commandNumber = ENetProtocolCommand_SendUnreliable;
|
||||
startSequenceNumber = HostToNet(channel.outgoingUnreliableSequenceNumber + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
commandNumber = ENetProtocolCommand_SendFragment | ENetProtocolFlag_Acknowledge;
|
||||
startSequenceNumber = HostToNet(channel.outgoingReliableSequenceNumber + 1);
|
||||
}
|
||||
|
||||
for (fragmentNumber = 0,
|
||||
fragmentOffset = 0;
|
||||
fragmentOffset < packetSize;
|
||||
++fragmentNumber,
|
||||
fragmentOffset += fragmentLength)
|
||||
{
|
||||
if (packetSize - fragmentOffset < fragmentLength)
|
||||
fragmentLength = packetSize - fragmentOffset;
|
||||
|
||||
OutgoingCommand outgoingCommand;
|
||||
outgoingCommand.fragmentOffset = fragmentOffset;
|
||||
outgoingCommand.fragmentLength = fragmentLength;
|
||||
outgoingCommand.packet = packetRef;
|
||||
outgoingCommand.command.header.command = commandNumber;
|
||||
outgoingCommand.command.header.channelID = channelId;
|
||||
outgoingCommand.command.sendFragment.startSequenceNumber = startSequenceNumber;
|
||||
outgoingCommand.command.sendFragment.dataLength = HostToNet(fragmentLength);
|
||||
outgoingCommand.command.sendFragment.fragmentCount = HostToNet(fragmentCount);
|
||||
outgoingCommand.command.sendFragment.fragmentNumber = HostToNet(fragmentNumber);
|
||||
outgoingCommand.command.sendFragment.totalLength = HostToNet(packetSize);
|
||||
outgoingCommand.command.sendFragment.fragmentOffset = HostToNet(fragmentOffset);
|
||||
|
||||
SetupOutgoingCommand(outgoingCommand);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.channelID = channelId;
|
||||
|
||||
if ((packetRef->flags & (ENetPacketFlag_Reliable | ENetPacketFlag_Unsequenced)) == ENetPacketFlag_Unsequenced)
|
||||
command.header.command = ENetProtocolCommand_SendUnsequenced | ENetProtocolFlag_Unsequenced;
|
||||
else if (packetRef->flags & ENetPacketFlag_Reliable || channel.outgoingUnreliableSequenceNumber >= 0xFFFF)
|
||||
command.header.command = ENetProtocolCommand_SendReliable | ENetProtocolFlag_Acknowledge;
|
||||
else
|
||||
command.header.command = ENetProtocolCommand_SendUnreliable;
|
||||
|
||||
QueueOutgoingCommand(command, packetRef, 0, packetSize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ENetPeer::ThrottleConfigure(UInt32 interval, UInt32 acceleration, UInt32 deceleration)
|
||||
{
|
||||
m_packetThrottleInterval = interval;
|
||||
m_packetThrottleAcceleration = acceleration;
|
||||
m_packetThrottleDeceleration = deceleration;
|
||||
|
||||
ENetProtocol command;
|
||||
command.header.command = ENetProtocolCommand_ThrottleConfigure | ENetProtocolFlag_Acknowledge;
|
||||
command.header.channelID = 0xFF;
|
||||
|
||||
command.throttleConfigure.packetThrottleInterval = HostToNet(interval);
|
||||
command.throttleConfigure.packetThrottleAcceleration = HostToNet(acceleration);
|
||||
command.throttleConfigure.packetThrottleDeceleration = HostToNet(deceleration);
|
||||
|
||||
QueueOutgoingCommand(command, nullptr, 0, 0);
|
||||
}
|
||||
|
||||
void ENetPeer::InitIncoming(std::size_t channelCount, const IpAddress& address, ENetProtocolConnect& incomingCommand)
|
||||
{
|
||||
m_channels.resize(channelCount);
|
||||
m_address = address;
|
||||
|
||||
m_connectID = incomingCommand.connectID;
|
||||
m_eventData = NetToHost(incomingCommand.data);
|
||||
m_incomingBandwidth = NetToHost(incomingCommand.incomingBandwidth);
|
||||
m_outgoingBandwidth = NetToHost(incomingCommand.outgoingBandwidth);
|
||||
m_packetThrottleInterval = NetToHost(incomingCommand.packetThrottleInterval);
|
||||
m_packetThrottleAcceleration = NetToHost(incomingCommand.packetThrottleAcceleration);
|
||||
m_packetThrottleDeceleration = NetToHost(incomingCommand.packetThrottleDeceleration);
|
||||
m_outgoingPeerID = NetToHost(incomingCommand.outgoingPeerID);
|
||||
m_state = ENetPeerState::AcknowledgingConnect;
|
||||
|
||||
UInt8 incomingSessionId, outgoingSessionId;
|
||||
|
||||
incomingSessionId = incomingCommand.incomingSessionID == 0xFF ? m_outgoingSessionID : incomingCommand.incomingSessionID;
|
||||
incomingSessionId = (incomingSessionId + 1) & (ENetProtocolHeaderSessionMask >> ENetProtocolHeaderSessionShift);
|
||||
if (incomingSessionId == m_outgoingSessionID)
|
||||
incomingSessionId = (incomingSessionId + 1) & (ENetProtocolHeaderSessionMask >> ENetProtocolHeaderSessionShift);
|
||||
m_outgoingSessionID = incomingSessionId;
|
||||
|
||||
outgoingSessionId = incomingCommand.outgoingSessionID == 0xFF ? m_incomingSessionID : incomingCommand.outgoingSessionID;
|
||||
outgoingSessionId = (outgoingSessionId + 1) & (ENetProtocolHeaderSessionMask >> ENetProtocolHeaderSessionShift);
|
||||
if (outgoingSessionId == m_incomingSessionID)
|
||||
outgoingSessionId = (outgoingSessionId + 1) & (ENetProtocolHeaderSessionMask >> ENetProtocolHeaderSessionShift);
|
||||
m_incomingSessionID = outgoingSessionId;
|
||||
|
||||
m_mtu = Clamp<UInt32>(NetToHost(incomingCommand.mtu), ENetConstants::ENetProtocol_MinimumMTU, ENetConstants::ENetProtocol_MaximumMTU);
|
||||
|
||||
if (m_host->m_outgoingBandwidth == 0 && m_incomingBandwidth == 0)
|
||||
m_windowSize = ENetConstants::ENetProtocol_MaximumWindowSize;
|
||||
else if (m_host->m_outgoingBandwidth == 0 || m_incomingBandwidth == 0)
|
||||
m_windowSize = (std::max(m_host->m_outgoingBandwidth, m_incomingBandwidth) / ENetConstants::ENetPeer_WindowSizeScale) * ENetConstants::ENetProtocol_MinimumWindowSize;
|
||||
else
|
||||
m_windowSize = (std::min(m_host->m_outgoingBandwidth, m_incomingBandwidth) / ENetConstants::ENetPeer_WindowSizeScale) * ENetConstants::ENetProtocol_MinimumWindowSize;
|
||||
|
||||
m_windowSize = Clamp<UInt32>(m_windowSize, ENetConstants::ENetProtocol_MinimumWindowSize, ENetConstants::ENetProtocol_MaximumWindowSize);
|
||||
}
|
||||
|
||||
void ENetPeer::InitOutgoing(std::size_t channelCount, const IpAddress& address, UInt32 connectId, UInt32 windowSize)
|
||||
{
|
||||
m_channels.resize(channelCount);
|
||||
|
||||
m_address = address;
|
||||
m_connectID = connectId;
|
||||
m_state = ENetPeerState::Connecting;
|
||||
m_windowSize = Clamp<UInt32>(windowSize, ENetConstants::ENetProtocol_MinimumWindowSize, ENetConstants::ENetProtocol_MaximumWindowSize);
|
||||
}
|
||||
|
||||
void ENetPeer::DispatchIncomingReliableCommands(Channel& channel)
|
||||
{
|
||||
auto currentCommand = channel.incomingReliableCommands.begin();
|
||||
for (; currentCommand != channel.incomingReliableCommands.end(); ++currentCommand)
|
||||
{
|
||||
IncomingCommmand& incomingCommand = *currentCommand;
|
||||
|
||||
if (incomingCommand.fragmentsRemaining > 0 || incomingCommand.reliableSequenceNumber != (channel.incomingReliableSequenceNumber + 1))
|
||||
break;
|
||||
|
||||
channel.incomingReliableSequenceNumber = incomingCommand.reliableSequenceNumber;
|
||||
|
||||
if (!incomingCommand.fragments.empty())
|
||||
channel.incomingReliableSequenceNumber += incomingCommand.fragments.size() - 1;
|
||||
}
|
||||
|
||||
if (currentCommand == channel.incomingReliableCommands.begin())
|
||||
return;
|
||||
|
||||
channel.incomingUnreliableSequenceNumber = 0;
|
||||
|
||||
m_dispatchedCommands.splice(m_dispatchedCommands.end(), m_dispatchedCommands, channel.incomingReliableCommands.begin(), currentCommand);
|
||||
|
||||
m_host->AddToDispatchQueue(this);
|
||||
|
||||
if (!channel.incomingUnreliableCommands.empty())
|
||||
DispatchIncomingUnreliableCommands(channel);
|
||||
}
|
||||
|
||||
void ENetPeer::DispatchIncomingUnreliableCommands(Channel& channel)
|
||||
{
|
||||
std::list<IncomingCommmand>::iterator currentCommand;
|
||||
std::list<IncomingCommmand>::iterator droppedCommand;
|
||||
std::list<IncomingCommmand>::iterator startCommand;
|
||||
|
||||
for (droppedCommand = startCommand = currentCommand = channel.incomingUnreliableCommands.begin();
|
||||
currentCommand != channel.incomingUnreliableCommands.end();
|
||||
++currentCommand)
|
||||
{
|
||||
IncomingCommmand& incomingCommand = *currentCommand;
|
||||
|
||||
if ((incomingCommand.command.header.command & ENetProtocolCommand_Mask) == ENetProtocolCommand_SendUnsequenced)
|
||||
continue;
|
||||
|
||||
if (incomingCommand.reliableSequenceNumber == channel.incomingReliableSequenceNumber)
|
||||
{
|
||||
if (incomingCommand.fragmentsRemaining <= 0)
|
||||
{
|
||||
channel.incomingUnreliableSequenceNumber = incomingCommand.unreliableSequenceNumber;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (startCommand != currentCommand)
|
||||
{
|
||||
m_dispatchedCommands.splice(m_dispatchedCommands.end(), m_dispatchedCommands, startCommand, currentCommand);
|
||||
|
||||
m_host->AddToDispatchQueue(this);
|
||||
|
||||
droppedCommand = currentCommand;
|
||||
}
|
||||
else if (droppedCommand != currentCommand)
|
||||
{
|
||||
droppedCommand = currentCommand;
|
||||
--droppedCommand;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
UInt16 reliableWindow = incomingCommand.reliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
UInt16 currentWindow = channel.incomingReliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
|
||||
if (incomingCommand.reliableSequenceNumber < channel.incomingReliableSequenceNumber)
|
||||
reliableWindow += ENetConstants::ENetPeer_ReliableWindows;
|
||||
|
||||
if (reliableWindow >= currentWindow && reliableWindow < currentWindow + ENetConstants::ENetPeer_ReliableWindowSize - 1)
|
||||
break;
|
||||
|
||||
droppedCommand = currentCommand;
|
||||
++droppedCommand;
|
||||
|
||||
if (startCommand != currentCommand)
|
||||
{
|
||||
m_dispatchedCommands.splice(m_dispatchedCommands.end(), m_dispatchedCommands, startCommand, currentCommand);
|
||||
|
||||
m_host->AddToDispatchQueue(this);
|
||||
}
|
||||
}
|
||||
|
||||
startCommand = currentCommand;
|
||||
++startCommand;
|
||||
}
|
||||
|
||||
if (startCommand != currentCommand)
|
||||
{
|
||||
m_dispatchedCommands.splice(m_dispatchedCommands.end(), m_dispatchedCommands, startCommand, currentCommand);
|
||||
|
||||
m_host->AddToDispatchQueue(this);
|
||||
|
||||
droppedCommand = currentCommand;
|
||||
}
|
||||
|
||||
channel.incomingUnreliableCommands.erase(channel.incomingUnreliableCommands.begin(), droppedCommand);
|
||||
}
|
||||
|
||||
void ENetPeer::OnConnect()
|
||||
{
|
||||
if (m_state != ENetPeerState::Connected && m_state != ENetPeerState::DisconnectLater)
|
||||
{
|
||||
if (m_incomingBandwidth != 0)
|
||||
++m_host->m_bandwidthLimitedPeers;
|
||||
|
||||
++m_host->m_connectedPeers;
|
||||
}
|
||||
}
|
||||
|
||||
void ENetPeer::OnDisconnect()
|
||||
{
|
||||
if (m_state == ENetPeerState::Connected || m_state == ENetPeerState::DisconnectLater)
|
||||
{
|
||||
if (m_incomingBandwidth != 0)
|
||||
--m_host->m_bandwidthLimitedPeers;
|
||||
|
||||
--m_host->m_connectedPeers;
|
||||
}
|
||||
}
|
||||
|
||||
ENetProtocolCommand ENetPeer::RemoveSentReliableCommands(UInt16 reliableSequenceNumber, UInt8 channelId)
|
||||
{
|
||||
std::list<OutgoingCommand>* commandList = nullptr;
|
||||
|
||||
bool found = true;
|
||||
auto currentCommand = m_sentReliableCommands.begin();
|
||||
commandList = &m_sentReliableCommands;
|
||||
for (; currentCommand != m_sentReliableCommands.end(); ++currentCommand)
|
||||
{
|
||||
found = true;
|
||||
|
||||
if (currentCommand->reliableSequenceNumber == reliableSequenceNumber &&
|
||||
currentCommand->command.header.channelID == channelId)
|
||||
break;
|
||||
}
|
||||
|
||||
bool wasSent = true;
|
||||
if (currentCommand == m_sentReliableCommands.end())
|
||||
{
|
||||
currentCommand = m_outgoingReliableCommands.begin();
|
||||
commandList = &m_sentReliableCommands;
|
||||
for (; currentCommand != m_outgoingReliableCommands.end(); ++currentCommand)
|
||||
{
|
||||
found = true;
|
||||
|
||||
if (currentCommand->sendAttempts < 1)
|
||||
return ENetProtocolCommand_None;
|
||||
|
||||
if (currentCommand->reliableSequenceNumber == reliableSequenceNumber &&
|
||||
currentCommand->command.header.channelID == channelId)
|
||||
break;
|
||||
}
|
||||
|
||||
if (currentCommand == m_outgoingReliableCommands.end())
|
||||
return ENetProtocolCommand_None;
|
||||
|
||||
wasSent = false;
|
||||
}
|
||||
|
||||
if (!found) //< Really useful?
|
||||
return ENetProtocolCommand_None;
|
||||
|
||||
if (channelId < m_channels.size())
|
||||
{
|
||||
Channel& channel = m_channels[channelId];
|
||||
|
||||
UInt16 reliableWindow = reliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
if (channel.reliableWindows[reliableWindow] > 0)
|
||||
{
|
||||
--channel.reliableWindows[reliableWindow];
|
||||
if (!channel.reliableWindows[reliableWindow])
|
||||
channel.usedReliableWindows &= ~(1 << reliableWindow);
|
||||
}
|
||||
}
|
||||
|
||||
ENetProtocolCommand commandNumber = static_cast<ENetProtocolCommand>(currentCommand->command.header.command & ENetProtocolCommand_Mask);
|
||||
|
||||
if (currentCommand->packet && wasSent)
|
||||
m_reliableDataInTransit -= currentCommand->fragmentLength;
|
||||
|
||||
commandList->erase(currentCommand);
|
||||
|
||||
if (m_sentReliableCommands.empty())
|
||||
return commandNumber;
|
||||
|
||||
currentCommand = m_sentReliableCommands.begin();
|
||||
m_nextTimeout = currentCommand->sentTime + currentCommand->roundTripTimeout;
|
||||
|
||||
return commandNumber;
|
||||
}
|
||||
|
||||
void ENetPeer::RemoveSentUnreliableCommands()
|
||||
{
|
||||
m_sentUnreliableCommands.clear();
|
||||
}
|
||||
|
||||
void ENetPeer::ResetQueues()
|
||||
{
|
||||
m_host->RemoveFromDispatchQueue(this);
|
||||
|
||||
m_acknowledgements.clear();
|
||||
m_dispatchedCommands.clear();
|
||||
m_outgoingReliableCommands.clear();
|
||||
m_outgoingUnreliableCommands.clear();
|
||||
m_sentReliableCommands.clear();
|
||||
m_sentUnreliableCommands.clear();
|
||||
|
||||
m_channels.clear();
|
||||
}
|
||||
|
||||
bool ENetPeer::QueueAcknowledgement(ENetProtocol& command, UInt16 sentTime)
|
||||
{
|
||||
if (command.header.channelID < m_channels.size())
|
||||
{
|
||||
Channel& channel = m_channels[command.header.channelID];
|
||||
|
||||
UInt16 reliableWindow = command.header.reliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
UInt16 currentWindow = channel.incomingReliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
|
||||
if (command.header.reliableSequenceNumber < channel.incomingReliableSequenceNumber)
|
||||
reliableWindow += ENetConstants::ENetPeer_ReliableWindows;
|
||||
|
||||
if (reliableWindow >= currentWindow + ENetConstants::ENetPeer_FreeReliableWindows - 1 && reliableWindow <= currentWindow + ENetConstants::ENetPeer_FreeReliableWindows)
|
||||
return false;
|
||||
}
|
||||
|
||||
Acknowledgement acknowledgment;
|
||||
acknowledgment.command = command;
|
||||
acknowledgment.sentTime = sentTime;
|
||||
|
||||
m_outgoingDataTotal += sizeof(Acknowledgement);
|
||||
|
||||
m_acknowledgements.emplace_back(acknowledgment);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ENetPeer::IncomingCommmand* ENetPeer::QueueIncomingCommand(ENetProtocol& command, const void* data, std::size_t dataLength, UInt32 flags, UInt32 fragmentCount)
|
||||
{
|
||||
static IncomingCommmand dummyCommand;
|
||||
|
||||
UInt32 reliableSequenceNumber = 0;
|
||||
UInt32 unreliableSequenceNumber = 0;
|
||||
UInt16 reliableWindow;
|
||||
UInt16 currentWindow;
|
||||
|
||||
auto discardCommand = [&]() -> IncomingCommmand*
|
||||
{
|
||||
if (fragmentCount > 0)
|
||||
return nullptr; //< Error
|
||||
|
||||
return &dummyCommand;
|
||||
};
|
||||
|
||||
if (m_state == ENetPeerState::DisconnectLater)
|
||||
return discardCommand();
|
||||
|
||||
Channel& channel = m_channels[command.header.channelID];
|
||||
|
||||
if ((command.header.command & ENetProtocolCommand_Mask) != ENetProtocolCommand_SendUnsequenced)
|
||||
{
|
||||
reliableSequenceNumber = command.header.reliableSequenceNumber;
|
||||
reliableWindow = reliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
currentWindow = channel.incomingReliableSequenceNumber / ENetConstants::ENetPeer_ReliableWindowSize;
|
||||
|
||||
if (reliableSequenceNumber < channel.incomingReliableSequenceNumber)
|
||||
reliableWindow += ENetConstants::ENetPeer_ReliableWindows;
|
||||
|
||||
if (reliableWindow < currentWindow || reliableWindow >= currentWindow + ENetConstants::ENetPeer_FreeReliableWindows - 1)
|
||||
return discardCommand();
|
||||
}
|
||||
|
||||
std::list<IncomingCommmand>* commandList = nullptr;
|
||||
std::list<IncomingCommmand>::reverse_iterator currentCommand;
|
||||
|
||||
switch (command.header.command & ENetProtocolCommand_Mask)
|
||||
{
|
||||
case ENetProtocolCommand_SendFragment:
|
||||
case ENetProtocolCommand_SendReliable:
|
||||
{
|
||||
if (reliableSequenceNumber == channel.incomingReliableSequenceNumber)
|
||||
return discardCommand();
|
||||
|
||||
commandList = &channel.incomingReliableCommands;
|
||||
|
||||
for (currentCommand = channel.incomingReliableCommands.rbegin(); currentCommand != channel.incomingReliableCommands.rend(); ++currentCommand)
|
||||
{
|
||||
IncomingCommmand& incomingCommand = *currentCommand;
|
||||
|
||||
if (reliableSequenceNumber >= channel.incomingReliableSequenceNumber)
|
||||
{
|
||||
if (incomingCommand.reliableSequenceNumber < channel.incomingReliableSequenceNumber)
|
||||
continue;
|
||||
}
|
||||
else
|
||||
if (incomingCommand.reliableSequenceNumber >= channel.incomingReliableSequenceNumber)
|
||||
break;
|
||||
|
||||
if (incomingCommand.reliableSequenceNumber <= reliableSequenceNumber)
|
||||
{
|
||||
if (incomingCommand.reliableSequenceNumber < reliableSequenceNumber)
|
||||
break;
|
||||
|
||||
return discardCommand();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case ENetProtocolCommand_SendUnreliable:
|
||||
case ENetProtocolCommand_SendUnreliableFragment:
|
||||
{
|
||||
unreliableSequenceNumber = NetToHost(command.sendUnreliable.unreliableSequenceNumber);
|
||||
|
||||
if (reliableSequenceNumber == channel.incomingReliableSequenceNumber && unreliableSequenceNumber <= channel.incomingUnreliableSequenceNumber)
|
||||
return discardCommand();
|
||||
|
||||
commandList = &channel.incomingUnreliableCommands;
|
||||
|
||||
for (currentCommand = channel.incomingUnreliableCommands.rbegin(); currentCommand != channel.incomingUnreliableCommands.rend(); ++currentCommand)
|
||||
{
|
||||
IncomingCommmand& incomingCommand = *currentCommand;
|
||||
|
||||
if ((command.header.command & ENetProtocolCommand_Mask) == ENetProtocolCommand_SendUnsequenced) //< wtf
|
||||
continue;
|
||||
|
||||
if (reliableSequenceNumber >= channel.incomingReliableSequenceNumber)
|
||||
{
|
||||
if (incomingCommand.reliableSequenceNumber < channel.incomingReliableSequenceNumber)
|
||||
continue;
|
||||
}
|
||||
else
|
||||
if (incomingCommand.reliableSequenceNumber >= channel.incomingReliableSequenceNumber)
|
||||
break;
|
||||
|
||||
if (incomingCommand.reliableSequenceNumber < reliableSequenceNumber)
|
||||
break;
|
||||
|
||||
if (incomingCommand.reliableSequenceNumber > reliableSequenceNumber)
|
||||
continue;
|
||||
|
||||
if (incomingCommand.unreliableSequenceNumber <= unreliableSequenceNumber)
|
||||
{
|
||||
if (incomingCommand.unreliableSequenceNumber < unreliableSequenceNumber)
|
||||
break;
|
||||
|
||||
return discardCommand();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case ENetProtocolCommand_SendUnsequenced:
|
||||
{
|
||||
commandList = &channel.incomingUnreliableCommands;
|
||||
|
||||
currentCommand = channel.incomingUnreliableCommands.rend();
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return discardCommand();
|
||||
}
|
||||
|
||||
if (m_totalWaitingData >= m_host->m_maximumWaitingData)
|
||||
return nullptr;
|
||||
|
||||
ENetPacket* packet = m_packetPool.New<ENetPacket>();
|
||||
packet->flags = flags;
|
||||
packet->data.Reset(0, data, dataLength);
|
||||
packet->owner = &m_packetPool;
|
||||
|
||||
IncomingCommmand incomingCommand;
|
||||
incomingCommand.reliableSequenceNumber = command.header.reliableSequenceNumber;
|
||||
incomingCommand.unreliableSequenceNumber = unreliableSequenceNumber & 0xFFFF;
|
||||
incomingCommand.command = command;
|
||||
incomingCommand.packet = packet;
|
||||
incomingCommand.fragments.resize(fragmentCount, 0);
|
||||
incomingCommand.fragmentsRemaining = fragmentCount;
|
||||
|
||||
auto it = commandList->insert(currentCommand.base(), incomingCommand);
|
||||
|
||||
switch (command.header.command & ENetProtocolCommand_Mask)
|
||||
{
|
||||
case ENetProtocolCommand_SendFragment:
|
||||
case ENetProtocolCommand_SendReliable:
|
||||
DispatchIncomingReliableCommands(channel);
|
||||
break;
|
||||
|
||||
default:
|
||||
DispatchIncomingUnreliableCommands(channel);
|
||||
break;
|
||||
}
|
||||
|
||||
return &(*it);
|
||||
}
|
||||
|
||||
void ENetPeer::QueueOutgoingCommand(ENetProtocol& command, ENetPacketRef packet, UInt32 offset, UInt16 length)
|
||||
{
|
||||
OutgoingCommand outgoingCommand;
|
||||
outgoingCommand.command = command;
|
||||
outgoingCommand.fragmentLength = length;
|
||||
outgoingCommand.fragmentOffset = length;
|
||||
outgoingCommand.packet = packet;
|
||||
|
||||
SetupOutgoingCommand(outgoingCommand);
|
||||
}
|
||||
|
||||
void ENetPeer::SetupOutgoingCommand(OutgoingCommand& outgoingCommand)
|
||||
{
|
||||
m_outgoingDataTotal += enet_protocol_command_size(outgoingCommand.command.header.command) + outgoingCommand.fragmentLength;
|
||||
|
||||
if (outgoingCommand.command.header.channelID == 0xFF)
|
||||
{
|
||||
++m_outgoingReliableSequenceNumber;
|
||||
|
||||
outgoingCommand.reliableSequenceNumber = m_outgoingReliableSequenceNumber;
|
||||
outgoingCommand.unreliableSequenceNumber = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
Channel* channel = &m_channels[outgoingCommand.command.header.channelID];
|
||||
if (outgoingCommand.command.header.command & ENetProtocolFlag_Acknowledge)
|
||||
{
|
||||
++channel->outgoingReliableSequenceNumber;
|
||||
channel->outgoingUnreliableSequenceNumber = 0;
|
||||
|
||||
outgoingCommand.reliableSequenceNumber = channel->outgoingReliableSequenceNumber;
|
||||
outgoingCommand.unreliableSequenceNumber = 0;
|
||||
}
|
||||
else if (outgoingCommand.command.header.command & ENetProtocolFlag_Unsequenced)
|
||||
{
|
||||
++m_outgoingUnsequencedGroup;
|
||||
|
||||
outgoingCommand.reliableSequenceNumber = 0;
|
||||
outgoingCommand.unreliableSequenceNumber = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (outgoingCommand.fragmentOffset == 0)
|
||||
++channel->outgoingUnreliableSequenceNumber;
|
||||
|
||||
outgoingCommand.reliableSequenceNumber = channel->outgoingReliableSequenceNumber;
|
||||
outgoingCommand.unreliableSequenceNumber = channel->outgoingUnreliableSequenceNumber;
|
||||
}
|
||||
}
|
||||
|
||||
outgoingCommand.sendAttempts = 0;
|
||||
outgoingCommand.sentTime = 0;
|
||||
outgoingCommand.roundTripTimeout = 0;
|
||||
outgoingCommand.roundTripTimeoutLimit = 0;
|
||||
outgoingCommand.command.header.reliableSequenceNumber = HostToNet(outgoingCommand.reliableSequenceNumber);
|
||||
|
||||
switch (outgoingCommand.command.header.command & ENetProtocolCommand_Mask)
|
||||
{
|
||||
case ENetProtocolCommand_SendUnreliable:
|
||||
outgoingCommand.command.sendUnreliable.unreliableSequenceNumber = HostToNet(outgoingCommand.unreliableSequenceNumber);
|
||||
break;
|
||||
|
||||
case ENetProtocolCommand_SendUnsequenced:
|
||||
outgoingCommand.command.sendUnsequenced.unsequencedGroup = HostToNet(m_outgoingUnsequencedGroup);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (outgoingCommand.command.header.command & ENetProtocolCommand_Acknowledge)
|
||||
m_outgoingReliableCommands.emplace_back(outgoingCommand);
|
||||
else
|
||||
m_outgoingUnreliableCommands.emplace_back(outgoingCommand);
|
||||
}
|
||||
|
||||
int ENetPeer::Throttle(UInt32 rtt)
|
||||
{
|
||||
if (m_lastRoundTripTime <= m_lastRoundTripTimeVariance)
|
||||
m_packetThrottle = m_packetThrottleLimit;
|
||||
else
|
||||
{
|
||||
if (rtt < m_lastRoundTripTime)
|
||||
{
|
||||
m_packetThrottle = std::max(m_packetThrottle + m_packetThrottleAcceleration, m_packetThrottleLimit);
|
||||
return 1;
|
||||
}
|
||||
else if (rtt > m_lastRoundTripTime + 2 * m_lastRoundTripTimeVariance)
|
||||
{
|
||||
if (m_packetThrottle > m_packetThrottleDeceleration)
|
||||
m_packetThrottle -= m_packetThrottleDeceleration;
|
||||
else
|
||||
m_packetThrottle = 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,642 @@
|
|||
// Copyright (C) 2017 Jérôme Leclercq
|
||||
// This file is part of the "Nazara Engine - Utility module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Network/ENetHost.hpp>
|
||||
#include <Nazara/Core/CallOnExit.hpp>
|
||||
#include <Nazara/Core/Log.hpp>
|
||||
#include <Nazara/Network/NetPacket.hpp>
|
||||
#include <Nazara/Network/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
/*!
|
||||
* \ingroup network
|
||||
* \class Nz::ENetConnection
|
||||
* \brief Network class that represents a reliable UDP connection, based on ENet library
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \brief Constructs a RUdpConnection object by default
|
||||
*/
|
||||
|
||||
ENetSocket::ENetSocket() :
|
||||
m_bandwidthThrottleEpoch(0),
|
||||
m_mtu(ENetConstants::DefaultMTU),
|
||||
m_isSimulationEnabled(false),
|
||||
m_shouldAcceptConnections(true)
|
||||
{
|
||||
m_randomSeed = *reinterpret_cast<UInt32*>(this);
|
||||
m_randomSeed += s_randomGenerator();
|
||||
m_randomSeed = (m_randomSeed << 16) | (m_randomSeed >> 16);
|
||||
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Connects to the IpAddress
|
||||
* \return true
|
||||
*
|
||||
* \param remoteAddress Address to connect to
|
||||
*
|
||||
* \remark Produces a NazaraAssert if socket is not bound
|
||||
* \remark Produces a NazaraAssert if remote is invalid
|
||||
* \remark Produces a NazaraAssert if port is not specified
|
||||
*/
|
||||
|
||||
bool RUdpConnection::Connect(const IpAddress& remoteAddress)
|
||||
{
|
||||
NazaraAssert(m_socket.GetState() == SocketState_Bound, "Socket must be bound first");
|
||||
NazaraAssert(remoteAddress.IsValid(), "Invalid remote address");
|
||||
NazaraAssert(remoteAddress.GetPort() != 0, "Remote address has no port");
|
||||
|
||||
PeerData& client = RegisterPeer(remoteAddress, PeerState_Connecting);
|
||||
client.stateData1 = s_randomGenerator();
|
||||
|
||||
NetPacket connectionRequestPacket(NetCode_RequestConnection);
|
||||
connectionRequestPacket << client.stateData1;
|
||||
|
||||
EnqueuePacket(client, PacketPriority_Immediate, PacketReliability_Reliable, connectionRequestPacket);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Connects to the hostname
|
||||
* \return true If successful
|
||||
*
|
||||
* \param hostName Hostname of the remote
|
||||
* \param protocol Net protocol to use
|
||||
* \param service Specify the protocol used
|
||||
* \param error Optional argument to get the error
|
||||
*/
|
||||
|
||||
bool RUdpConnection::Connect(const String& hostName, NetProtocol protocol, const String& service, ResolveError* error)
|
||||
{
|
||||
std::vector<HostnameInfo> results = IpAddress::ResolveHostname(protocol, hostName, service, error);
|
||||
if (results.empty())
|
||||
{
|
||||
m_lastError = SocketError_ResolveError;
|
||||
return false;
|
||||
}
|
||||
|
||||
IpAddress hostnameAddress;
|
||||
for (const HostnameInfo& result : results)
|
||||
{
|
||||
if (!result.address)
|
||||
continue;
|
||||
|
||||
if (result.socketType != SocketType_UDP)
|
||||
continue;
|
||||
|
||||
hostnameAddress = result.address;
|
||||
break; //< Take first valid address
|
||||
}
|
||||
|
||||
return Connect(hostnameAddress);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Listens to a socket
|
||||
* \return true If successfully bound
|
||||
*
|
||||
* \param remoteAddress Address to listen to
|
||||
*/
|
||||
|
||||
bool RUdpConnection::Listen(const IpAddress& address)
|
||||
{
|
||||
if (!InitSocket(address.GetProtocol()))
|
||||
return false;
|
||||
|
||||
return m_socket.Bind(address) == SocketState_Bound;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Polls the message
|
||||
* \return true If there is a message
|
||||
*
|
||||
* \param message Message to poll
|
||||
*
|
||||
* \remark Produces a NazaraAssert if message is invalid
|
||||
*/
|
||||
|
||||
bool RUdpConnection::PollMessage(RUdpMessage* message)
|
||||
{
|
||||
NazaraAssert(message, "Invalid message");
|
||||
|
||||
if (m_receivedMessages.empty())
|
||||
return false;
|
||||
|
||||
*message = std::move(m_receivedMessages.front());
|
||||
m_receivedMessages.pop();
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Sends the packet to a peer
|
||||
* \return true If peer exists (false may result from disconnected client)
|
||||
*
|
||||
* \param peerIp IpAddress of the peer
|
||||
* \param priority Priority of the packet
|
||||
* \param reliability Policy of reliability of the packet
|
||||
* \param packet Packet to send
|
||||
*/
|
||||
|
||||
bool RUdpConnection::Send(const IpAddress& peerIp, PacketPriority priority, PacketReliability reliability, const NetPacket& packet)
|
||||
{
|
||||
auto it = m_peerByIP.find(peerIp);
|
||||
if (it == m_peerByIP.end())
|
||||
return false; /// Silently fail (probably a disconnected client)
|
||||
|
||||
EnqueuePacket(m_peers[it->second], priority, reliability, packet);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Updates the reliable connection
|
||||
*/
|
||||
|
||||
void RUdpConnection::Update()
|
||||
{
|
||||
m_currentTime = m_clock.GetMicroseconds();
|
||||
|
||||
NetPacket receivedPacket;
|
||||
IpAddress senderIp;
|
||||
while (m_socket.ReceivePacket(&receivedPacket, &senderIp))
|
||||
OnPacketReceived(senderIp, std::move(receivedPacket));
|
||||
|
||||
//for (unsigned int i = m_activeClients.FindFirst(); i != m_activeClients.npos; i = m_activeClients.FindNext(i))
|
||||
//{
|
||||
// PeerData& clientData = m_peers[i];
|
||||
|
||||
CallOnExit resetIterator([this] () { m_peerIterator = m_peers.size(); });
|
||||
|
||||
for (m_peerIterator = 0; m_peerIterator < m_peers.size(); ++m_peerIterator)
|
||||
{
|
||||
PeerData& peer = m_peers[m_peerIterator];
|
||||
|
||||
UInt32 timeSinceLastPacket = static_cast<UInt32>(m_currentTime - peer.lastPacketTime);
|
||||
if (timeSinceLastPacket > m_timeBeforeTimeOut)
|
||||
{
|
||||
DisconnectPeer(peer.index);
|
||||
continue;
|
||||
}
|
||||
else if (timeSinceLastPacket > m_timeBeforePing)
|
||||
{
|
||||
if (m_currentTime - peer.lastPingTime > m_pingInterval)
|
||||
{
|
||||
NetPacket pingPacket(NetCode_Ping);
|
||||
EnqueuePacket(peer, PacketPriority_Low, PacketReliability_Unreliable, pingPacket);
|
||||
}
|
||||
}
|
||||
|
||||
if (peer.state == PeerState_WillAck && m_currentTime - peer.stateData1 > m_forceAckSendTime)
|
||||
{
|
||||
NetPacket acknowledgePacket(NetCode_Acknowledge);
|
||||
EnqueuePacket(peer, PacketPriority_Low, PacketReliability_Reliable, acknowledgePacket);
|
||||
}
|
||||
|
||||
for (unsigned int priority = PacketPriority_Highest; priority <= PacketPriority_Lowest; ++priority)
|
||||
{
|
||||
std::vector<PendingPacket>& pendingPackets = peer.pendingPackets[priority];
|
||||
for (PendingPacket& packetData : pendingPackets)
|
||||
SendPacket(peer, std::move(packetData));
|
||||
|
||||
pendingPackets.clear();
|
||||
}
|
||||
|
||||
auto it = peer.pendingAckQueue.begin();
|
||||
while (it != peer.pendingAckQueue.end())
|
||||
{
|
||||
if (m_currentTime - it->timeSent > 2 * peer.roundTripTime)
|
||||
{
|
||||
OnPacketLost(peer, std::move(*it));
|
||||
it = peer.pendingAckQueue.erase(it);
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
//m_activeClients.Reset();
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Disconnects a peer
|
||||
*
|
||||
* \param peerIndex Index of the peer
|
||||
*
|
||||
* \remark Produces a NazaraNotice
|
||||
*/
|
||||
|
||||
void RUdpConnection::DisconnectPeer(std::size_t peerIndex)
|
||||
{
|
||||
PeerData& peer = m_peers[peerIndex];
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": " + peer.address.ToString() + " has been disconnected due to time-out");
|
||||
|
||||
OnPeerDisconnected(this, peer.address);
|
||||
|
||||
// Remove from IP lookup table
|
||||
m_peerByIP.erase(peer.address);
|
||||
|
||||
// Can we safely "remove" this slot?
|
||||
if (m_peerIterator >= m_peers.size() - 1 || peerIndex > m_peerIterator)
|
||||
{
|
||||
// Yes we can
|
||||
PeerData& newSlot = m_peers[peerIndex];
|
||||
newSlot = std::move(m_peers.back());
|
||||
newSlot.index = peerIndex; //< Update the moved slot index before resizing (in case it's the last one)
|
||||
}
|
||||
else
|
||||
{
|
||||
// Nope, let's be tricky
|
||||
PeerData& current = m_peers[m_peerIterator];
|
||||
PeerData& newSlot = m_peers[peerIndex];
|
||||
|
||||
newSlot = std::move(current);
|
||||
newSlot.index = peerIndex; //< Update the moved slot index
|
||||
|
||||
current = std::move(m_peers.back());
|
||||
current.index = m_peerIterator; //< Update the moved slot index
|
||||
|
||||
--m_peerIterator;
|
||||
}
|
||||
|
||||
// Pop the last entry (from where we moved our slot)
|
||||
m_peers.pop_back();
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Enqueues a packet in the sending list
|
||||
*
|
||||
* \param peer Data relative to the peer
|
||||
* \param priority Priority of the packet
|
||||
* \param reliability Policy of reliability of the packet
|
||||
* \param packet Packet to send
|
||||
*/
|
||||
|
||||
void RUdpConnection::EnqueuePacket(PeerData& peer, PacketPriority priority, PacketReliability reliability, const NetPacket& packet)
|
||||
{
|
||||
UInt16 protocolBegin = static_cast<UInt16>(m_protocol & 0xFFFF);
|
||||
UInt16 protocolEnd = static_cast<UInt16>((m_protocol & 0xFFFF0000) >> 16);
|
||||
|
||||
NetPacket data(packet.GetNetCode(), MessageHeader + packet.GetDataSize() + MessageFooter);
|
||||
data << protocolBegin;
|
||||
|
||||
data.GetStream()->SetCursorPos(NetPacket::HeaderSize + MessageHeader);
|
||||
data.Write(packet.GetConstData() + NetPacket::HeaderSize, packet.GetDataSize());
|
||||
|
||||
data << protocolEnd;
|
||||
EnqueuePacketInternal(peer, priority, reliability, std::move(data));
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Enqueues internally a packet in the sending list
|
||||
*
|
||||
* \param peer Data relative to the peer
|
||||
* \param priority Priority of the packet
|
||||
* \param reliability Policy of reliability of the packet
|
||||
* \param packet Packet to send
|
||||
*/
|
||||
|
||||
void RUdpConnection::EnqueuePacketInternal(PeerData& peer, PacketPriority priority, PacketReliability reliability, NetPacket&& data)
|
||||
{
|
||||
PendingPacket pendingPacket;
|
||||
pendingPacket.data = std::move(data);
|
||||
pendingPacket.priority = priority;
|
||||
pendingPacket.reliability = reliability;
|
||||
|
||||
peer.pendingPackets[priority].emplace_back(std::move(pendingPacket));
|
||||
m_activeClients.UnboundedSet(peer.index);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Inits the internal socket
|
||||
* \return true If successful
|
||||
*
|
||||
* \param protocol Net protocol to use
|
||||
*/
|
||||
|
||||
bool ENetSocket::InitSocket(NetProtocol protocol)
|
||||
{
|
||||
CallOnExit updateLastError([this]
|
||||
{
|
||||
m_lastError = m_socket.GetLastError();
|
||||
});
|
||||
|
||||
if (!m_socket.Create(protocol))
|
||||
return false;
|
||||
|
||||
m_socket.EnableBlocking(false);
|
||||
m_socket.SetReceiveBufferSize(ENetConstants::ReceiveBufferSize);
|
||||
m_socket.SetSendBufferSize(ENetConstants::SendBufferSize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Processes the acks
|
||||
*
|
||||
* \param peer Data relative to the peer
|
||||
* \param lastAck Last index of the ack
|
||||
* \param ackBits Bits for acking
|
||||
*/
|
||||
|
||||
void RUdpConnection::ProcessAcks(PeerData& peer, SequenceIndex lastAck, UInt32 ackBits)
|
||||
{
|
||||
auto it = peer.pendingAckQueue.begin();
|
||||
while (it != peer.pendingAckQueue.end())
|
||||
{
|
||||
bool acked = false;
|
||||
if (lastAck == it->sequenceId)
|
||||
acked = true;
|
||||
else if (!IsAckMoreRecent(it->sequenceId, lastAck))
|
||||
{
|
||||
unsigned int difference = ComputeSequenceDifference(lastAck, it->sequenceId);
|
||||
if (difference <= 32)
|
||||
acked = (ackBits >> (difference - 1)) & 1;
|
||||
}
|
||||
|
||||
if (acked)
|
||||
{
|
||||
it = peer.pendingAckQueue.erase(it);
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Registers a peer
|
||||
* \return Data relative to the peer
|
||||
*
|
||||
* \param address Address of the peer
|
||||
* \param state Status of the peer
|
||||
*/
|
||||
|
||||
RUdpConnection::PeerData& RUdpConnection::RegisterPeer(const IpAddress& address, PeerState state)
|
||||
{
|
||||
PeerData data;
|
||||
data.address = address;
|
||||
data.localSequence = 0;
|
||||
data.remoteSequence = 0;
|
||||
data.index = m_peers.size();
|
||||
data.lastPacketTime = m_currentTime;
|
||||
data.lastPingTime = m_currentTime;
|
||||
data.roundTripTime = 1'000'000; ///< Okay that's quite a lot
|
||||
data.state = state;
|
||||
|
||||
m_activeClients.UnboundedSet(data.index);
|
||||
m_peerByIP[address] = data.index;
|
||||
|
||||
m_peers.emplace_back(std::move(data));
|
||||
return m_peers.back();
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Operation to do when client requests a connection
|
||||
*
|
||||
* \param address Address of the peer
|
||||
* \param sequenceId Sequence index for the ack
|
||||
* \param token Token for connection
|
||||
*/
|
||||
|
||||
void RUdpConnection::OnClientRequestingConnection(const IpAddress& address, SequenceIndex sequenceId, UInt64 token)
|
||||
{
|
||||
// Call hook to check if client should be accepted or not
|
||||
OnPeerConnection(this, address);
|
||||
|
||||
PeerData& client = RegisterPeer(address, PeerState_Aknowledged);
|
||||
client.remoteSequence = sequenceId;
|
||||
|
||||
/// Acknowledge connection
|
||||
NetPacket connectionAcceptedPacket(NetCode_AcknowledgeConnection);
|
||||
//connectionAcceptedPacket << address;
|
||||
connectionAcceptedPacket << ~token;
|
||||
|
||||
EnqueuePacket(client, PacketPriority_Immediate, PacketReliability_Reliable, connectionAcceptedPacket);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Operation to do when a packet is lost
|
||||
*
|
||||
* \param peer Data relative to the peer
|
||||
* \param packet Pending packet
|
||||
*/
|
||||
|
||||
void RUdpConnection::OnPacketLost(PeerData& peer, PendingAckPacket&& packet)
|
||||
{
|
||||
//NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Lost packet " + String::Number(packet.sequenceId));
|
||||
|
||||
if (IsReliable(packet.reliability))
|
||||
EnqueuePacketInternal(peer, packet.priority, packet.reliability, std::move(packet.data));
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Operation to do when receiving a packet
|
||||
*
|
||||
* \param peerIndex Index of the peer
|
||||
*
|
||||
* \remark Produces a NazaraNotice
|
||||
*/
|
||||
|
||||
void RUdpConnection::OnPacketReceived(const IpAddress& peerIp, NetPacket&& packet)
|
||||
{
|
||||
UInt16 protocolBegin;
|
||||
UInt16 protocolEnd;
|
||||
SequenceIndex sequenceId;
|
||||
SequenceIndex lastAck;
|
||||
UInt32 ackBits;
|
||||
|
||||
packet.GetStream()->SetCursorPos(packet.GetSize() - MessageFooter);
|
||||
packet >> protocolEnd;
|
||||
|
||||
packet.GetStream()->SetCursorPos(NetPacket::HeaderSize);
|
||||
packet >> protocolBegin;
|
||||
|
||||
UInt32 protocolId = static_cast<UInt32>(protocolEnd) << 16 | protocolBegin;
|
||||
if (protocolId != m_protocol)
|
||||
return; ///< Ignore
|
||||
|
||||
packet >> sequenceId >> lastAck >> ackBits;
|
||||
|
||||
auto it = m_peerByIP.find(peerIp);
|
||||
if (it == m_peerByIP.end())
|
||||
{
|
||||
switch (packet.GetNetCode())
|
||||
{
|
||||
case NetCode_RequestConnection:
|
||||
{
|
||||
UInt64 token;
|
||||
packet >> token;
|
||||
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received NetCode_RequestConnection from " + peerIp.ToString() + ": " + String::Number(token));
|
||||
if (!m_shouldAcceptConnections)
|
||||
return; //< Ignore
|
||||
|
||||
OnClientRequestingConnection(peerIp, sequenceId, token);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return; //< Ignore
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
PeerData& peer = m_peers[it->second];
|
||||
peer.lastPacketTime = m_currentTime;
|
||||
|
||||
if (peer.receivedQueue.find(sequenceId) != peer.receivedQueue.end())
|
||||
return; //< Ignore
|
||||
|
||||
if (m_isSimulationEnabled && m_packetLossProbability(s_randomGenerator))
|
||||
{
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Lost packet " + String::Number(sequenceId) + " from " + peerIp.ToString() + " for simulation purpose");
|
||||
return;
|
||||
}
|
||||
|
||||
///< Receiving a packet from an acknowledged client means the connection works in both ways
|
||||
if (peer.state == PeerState_Aknowledged && packet.GetNetCode() != NetCode_RequestConnection)
|
||||
{
|
||||
peer.state = PeerState_Connected;
|
||||
OnPeerAcknowledged(this, peerIp);
|
||||
}
|
||||
|
||||
if (IsAckMoreRecent(sequenceId, peer.remoteSequence))
|
||||
peer.remoteSequence = sequenceId;
|
||||
|
||||
ProcessAcks(peer, lastAck, ackBits);
|
||||
|
||||
peer.receivedQueue.insert(sequenceId);
|
||||
|
||||
switch (packet.GetNetCode())
|
||||
{
|
||||
case NetCode_Acknowledge:
|
||||
return; //< Do not switch to will ack mode (to prevent infinite replies, just let's ping/pong do that)
|
||||
|
||||
case NetCode_AcknowledgeConnection:
|
||||
{
|
||||
if (peer.state == PeerState_Connected)
|
||||
break;
|
||||
|
||||
IpAddress externalAddress;
|
||||
UInt64 token;
|
||||
packet /*>> externalAddress*/ >> token;
|
||||
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received NetCode_AcknowledgeConnection from " + peerIp.ToString() + ": " + String::Number(token));
|
||||
if (token == ~peer.stateData1)
|
||||
{
|
||||
peer.state = PeerState_Connected;
|
||||
OnConnectedToPeer(this);
|
||||
}
|
||||
else
|
||||
{
|
||||
NazaraNotice("Received wrong token (" + String::Number(token) + " instead of " + String::Number(~peer.stateData1) + ") from client " + peer.address);
|
||||
return; //< Ignore
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case NetCode_RequestConnection:
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received NetCode_RequestConnection from " + peerIp.ToString());
|
||||
return; //< Ignore
|
||||
|
||||
case NetCode_Ping:
|
||||
{
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received NetCode_Ping from " + peerIp.ToString());
|
||||
|
||||
NetPacket pongPacket(NetCode_Pong);
|
||||
EnqueuePacket(peer, PacketPriority_Low, PacketReliability_Unreliable, pongPacket);
|
||||
break;
|
||||
}
|
||||
|
||||
case NetCode_Pong:
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received NetCode_Pong from " + peerIp.ToString());
|
||||
break;
|
||||
|
||||
default:
|
||||
{
|
||||
NazaraNotice(m_socket.GetBoundAddress().ToString() + ": Received 0x" + String::Number(packet.GetNetCode(), 16) + " from " + peerIp.ToString());
|
||||
RUdpMessage receivedMessage;
|
||||
receivedMessage.from = peerIp;
|
||||
receivedMessage.data = std::move(packet);
|
||||
|
||||
m_receivedMessages.emplace(std::move(receivedMessage));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!HasPendingPackets(peer))
|
||||
{
|
||||
peer.state = PeerState_WillAck;
|
||||
peer.stateData1 = m_currentTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Sends a packet to a peer
|
||||
*
|
||||
* \param peer Data relative to the peer
|
||||
* \param packet Pending packet
|
||||
*/
|
||||
|
||||
void RUdpConnection::SendPacket(PeerData& peer, PendingPacket&& packet)
|
||||
{
|
||||
if (peer.state == PeerState_WillAck)
|
||||
peer.state = PeerState_Connected;
|
||||
|
||||
SequenceIndex remoteSequence = peer.remoteSequence;
|
||||
|
||||
UInt32 previousAcks = 0;
|
||||
for (SequenceIndex ack : peer.receivedQueue)
|
||||
{
|
||||
if (ack == remoteSequence)
|
||||
continue;
|
||||
|
||||
unsigned int difference = ComputeSequenceDifference(remoteSequence, ack);
|
||||
if (difference <= 32U)
|
||||
previousAcks |= (1U << (difference - 1));
|
||||
}
|
||||
|
||||
SequenceIndex sequenceId = ++peer.localSequence;
|
||||
|
||||
packet.data.GetStream()->SetCursorPos(NetPacket::HeaderSize + sizeof(UInt16)); ///< Protocol begin has already been filled
|
||||
packet.data << sequenceId;
|
||||
packet.data << remoteSequence;
|
||||
packet.data << previousAcks;
|
||||
|
||||
m_socket.SendPacket(peer.address, packet.data);
|
||||
|
||||
PendingAckPacket pendingAckPacket;
|
||||
pendingAckPacket.data = std::move(packet.data);
|
||||
pendingAckPacket.priority = packet.priority;
|
||||
pendingAckPacket.reliability = packet.reliability;
|
||||
pendingAckPacket.sequenceId = sequenceId;
|
||||
pendingAckPacket.timeSent = m_currentTime;
|
||||
|
||||
peer.pendingAckQueue.emplace_back(std::move(pendingAckPacket));
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Initializes the RUdpConnection class
|
||||
* \return true
|
||||
*/
|
||||
|
||||
bool RUdpConnection::Initialize()
|
||||
{
|
||||
std::random_device device;
|
||||
s_randomGenerator.seed(device());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Uninitializes the RUdpConnection class
|
||||
*/
|
||||
|
||||
void RUdpConnection::Uninitialize()
|
||||
{
|
||||
}
|
||||
|
||||
std::mt19937_64 RUdpConnection::s_randomGenerator;
|
||||
}
|
||||
Loading…
Reference in New Issue