package udp import ( "bytes" "eq2emu/internal/opcodes" "errors" ) // PacketCombiner groups small packets together to reduce UDP overhead type PacketCombiner struct { pendingPackets []*ProtocolPacket // Packets awaiting combination maxSize int // Maximum combined packet size timeout int // Combination timeout in milliseconds } // NewPacketCombiner creates a combiner with default settings func NewPacketCombiner() *PacketCombiner { return &PacketCombiner{ maxSize: 256, // Default size threshold for combining timeout: 10, // Default timeout in ms } } // NewPacketCombinerWithConfig creates a combiner with custom settings func NewPacketCombinerWithConfig(maxSize, timeout int) *PacketCombiner { return &PacketCombiner{ maxSize: maxSize, timeout: timeout, } } // AddPacket queues a packet for potential combining func (pc *PacketCombiner) AddPacket(packet *ProtocolPacket) { pc.pendingPackets = append(pc.pendingPackets, packet) } // FlushCombined returns combined packets and clears the queue func (pc *PacketCombiner) FlushCombined() []*ProtocolPacket { if len(pc.pendingPackets) == 0 { return nil } if len(pc.pendingPackets) == 1 { // Single packet - no combining needed packet := pc.pendingPackets[0] pc.pendingPackets = nil return []*ProtocolPacket{packet} } // Combine multiple packets combined := pc.combineProtocolPackets(pc.pendingPackets) pc.pendingPackets = nil return []*ProtocolPacket{combined} } // combineProtocolPackets merges multiple packets into a single combined packet func (pc *PacketCombiner) combineProtocolPackets(packets []*ProtocolPacket) *ProtocolPacket { var buf bytes.Buffer for _, packet := range packets { serialized := packet.Serialize() pc.writeSizeHeader(&buf, len(serialized)) buf.Write(serialized) } return &ProtocolPacket{ Opcode: opcodes.OpCombined, Data: buf.Bytes(), } } // writeSizeHeader writes packet size using variable-length encoding func (pc *PacketCombiner) writeSizeHeader(buf *bytes.Buffer, size int) { if size >= 255 { // Large packet - use 3-byte header [0xFF][low][high] buf.WriteByte(0xFF) buf.WriteByte(byte(size)) buf.WriteByte(byte(size >> 8)) } else { // Small packet - use 1-byte header buf.WriteByte(byte(size)) } } // ParseCombinedPacket splits combined packet into individual packets func ParseCombinedPacket(data []byte) ([]*ProtocolPacket, error) { var packets []*ProtocolPacket offset := 0 for offset < len(data) { size, headerSize, err := readSizeHeader(data, offset) if err != nil { break } offset += headerSize if offset+size > len(data) { break // Incomplete packet } // Parse individual packet packetData := data[offset : offset+size] if packet, err := ParseProtocolPacket(packetData); err == nil { packets = append(packets, packet) } offset += size } return packets, nil } // readSizeHeader reads variable-length size header func readSizeHeader(data []byte, offset int) (size, headerSize int, err error) { if offset >= len(data) { return 0, 0, errors.New("insufficient data for size header") } if data[offset] == 0xFF { // 3-byte size header if offset+2 >= len(data) { return 0, 0, errors.New("insufficient data for 3-byte size header") } size = int(data[offset+1]) | (int(data[offset+2]) << 8) headerSize = 3 } else { // 1-byte size header size = int(data[offset]) headerSize = 1 } return size, headerSize, nil } // ShouldCombine determines if packets should be combined based on total size func (pc *PacketCombiner) ShouldCombine() bool { if len(pc.pendingPackets) < 2 { return false } totalSize := 0 for _, packet := range pc.pendingPackets { serialized := packet.Serialize() totalSize += len(serialized) // Add size header overhead if len(serialized) >= 255 { totalSize += 3 } else { totalSize += 1 } } return totalSize <= pc.maxSize } // HasPendingPackets returns true if packets are waiting to be combined func (pc *PacketCombiner) HasPendingPackets() bool { return len(pc.pendingPackets) > 0 } // GetPendingCount returns the number of packets waiting to be combined func (pc *PacketCombiner) GetPendingCount() int { return len(pc.pendingPackets) } // Clear removes all pending packets without combining func (pc *PacketCombiner) Clear() { pc.pendingPackets = nil } // SetMaxSize updates the maximum combined packet size func (pc *PacketCombiner) SetMaxSize(maxSize int) { pc.maxSize = maxSize } // SetTimeout updates the combination timeout func (pc *PacketCombiner) SetTimeout(timeout int) { pc.timeout = timeout } // GetStats returns packet combination statistics func (pc *PacketCombiner) GetStats() CombinerStats { return CombinerStats{ PendingCount: len(pc.pendingPackets), MaxSize: pc.maxSize, Timeout: pc.timeout, } } // CombinerStats contains packet combiner statistics type CombinerStats struct { PendingCount int // Number of packets waiting to be combined MaxSize int // Maximum combined packet size Timeout int // Combination timeout in milliseconds } // EstimateCombinedSize calculates the size if current packets were combined func (pc *PacketCombiner) EstimateCombinedSize() int { if len(pc.pendingPackets) == 0 { return 0 } totalSize := 0 for _, packet := range pc.pendingPackets { serialized := packet.Serialize() packetSize := len(serialized) totalSize += packetSize // Add size header overhead if packetSize >= 255 { totalSize += 3 } else { totalSize += 1 } } return totalSize } // ValidateCombinedPacket checks if combined packet data is well-formed func ValidateCombinedPacket(data []byte) error { offset := 0 count := 0 for offset < len(data) { size, headerSize, err := readSizeHeader(data, offset) if err != nil { return err } offset += headerSize if offset+size > len(data) { return errors.New("packet extends beyond data boundary") } offset += size count++ if count > 100 { // Sanity check return errors.New("too many packets in combined packet") } } return nil }