2017-03-19 02:31:25 +11:00
|
|
|
-- I, 20kdc, release this into the public domain.
|
|
|
|
-- No warranty is provided, implied or otherwise.
|
|
|
|
|
|
|
|
-- Copper Reliability Layer
|
|
|
|
-- Notably, this should be instantiated rather than the normal Copper instance.
|
|
|
|
|
|
|
|
local culib = require("culib")
|
|
|
|
|
|
|
|
-- onRReceive is now: (from, to, port, data, unreliablePacket)
|
|
|
|
-- where to can be anything for unreliable packets, but otherwise is the current hostname.
|
|
|
|
return function (hostname, transmit, onRReceive, time)
|
|
|
|
-- node.hostname should be used for hostname generally.
|
|
|
|
local node
|
2017-03-19 03:06:56 +11:00
|
|
|
|
|
|
|
-- The maximum amount of timers (used to cap memory usage)
|
|
|
|
local tuningMaxTimers = 0x200
|
|
|
|
local tuningClearAntiduplicate = 120
|
|
|
|
local tuningAttempts = 8
|
|
|
|
local tuningAttemptTime = 4
|
|
|
|
|
|
|
|
-- Just an array, no special index.
|
|
|
|
-- Contents : {
|
|
|
|
-- trigger function,
|
|
|
|
-- expiry time
|
|
|
|
-- }
|
|
|
|
local timers = {}
|
|
|
|
-- Indexes are globalIds, values are timers for deleting entries out of this table.
|
|
|
|
local weAcked = {}
|
|
|
|
-- Indexes are globalIds, values are { successFunc, deathTimer }
|
|
|
|
local needsAck = {}
|
|
|
|
|
|
|
|
local function addTimer(trig, expi)
|
|
|
|
if #timers < tuningMaxTimers then
|
|
|
|
local t = {trig, expi}
|
|
|
|
table.insert(timers, t)
|
|
|
|
return t
|
|
|
|
end
|
|
|
|
return nil
|
|
|
|
end
|
|
|
|
local function killTimer(t)
|
|
|
|
for i = 1, #timers do
|
|
|
|
if timers[i] == t then
|
|
|
|
table.remove(timers, i)
|
|
|
|
return
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
local function gen3Random()
|
|
|
|
return string.char(math.random(256) - 1) .. string.char(math.random(256) - 1) .. string.char(math.random(256) - 1)
|
|
|
|
end
|
|
|
|
local function genGlobalId(port)
|
|
|
|
local low = math.abs(math.floor(port)) % 256
|
|
|
|
local high = math.abs(math.floor(port / 256)) % 256
|
|
|
|
local portD = string.char(high) .. string.char(low)
|
|
|
|
return portD .. gen3Random()
|
|
|
|
end
|
|
|
|
|
2017-03-19 02:31:25 +11:00
|
|
|
local onReceive = function (nfrom, nto, data)
|
|
|
|
if data:len() < 6 then return end
|
|
|
|
local port = data:byte(2) + (data:byte(1) * 256)
|
2017-03-19 03:06:56 +11:00
|
|
|
local tp = data:byte(7)
|
|
|
|
local globalId = data:sub(1, 5)
|
|
|
|
if tp == 0x00 then
|
2017-03-19 02:31:25 +11:00
|
|
|
onRReceive(nfrom, nto, port, data, true)
|
|
|
|
return
|
|
|
|
end
|
2017-03-19 03:06:56 +11:00
|
|
|
if nto ~= node.hostname then return end
|
|
|
|
if (tp == 0x01) and weAcked[nto .. globalId] then
|
|
|
|
-- Only send one acknowledgement per packet,
|
|
|
|
-- so timers aren't generated by external packets
|
|
|
|
-- Not a perfect system but, eh.
|
|
|
|
weAcked[nto .. globalId] = addTimer(function ()
|
|
|
|
weAcked[nto .. globalId] = nil
|
|
|
|
end, tuningClearAntiduplicate)
|
|
|
|
onRReceive(nfrom, nto, port, data, false)
|
|
|
|
node.output(nto, nfrom, data:sub(1, 6) .. "\x02")
|
|
|
|
end
|
|
|
|
if (tp == 0x02) and needsAck[nfrom .. globalId] then
|
|
|
|
needsAck[nfrom .. globalId][1](nfrom)
|
|
|
|
killTimer(needsAck[nfrom .. globalId][2])
|
|
|
|
needsAck[nfrom .. globalId] = nil
|
|
|
|
end
|
2017-03-19 02:31:25 +11:00
|
|
|
if nto ~= node.hostname then
|
|
|
|
return
|
|
|
|
end
|
|
|
|
end
|
|
|
|
node = culib(hostname, transmit, onReceive, time)
|
|
|
|
|
|
|
|
local relib = {}
|
|
|
|
relib.refresh = function ()
|
|
|
|
node.refresh()
|
|
|
|
local i = 1
|
|
|
|
local t = time()
|
|
|
|
while i <= #timers do
|
|
|
|
if timers[i].expiry < t then
|
|
|
|
table.remove(timers, i)
|
|
|
|
else
|
|
|
|
i = i + 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
relib.input = node.input
|
2017-03-19 03:06:56 +11:00
|
|
|
relib.output = function (nto, port, data, unreliable, onSucceed, onFailure)
|
|
|
|
local gid = genGlobalId(port)
|
|
|
|
if unreliable then
|
|
|
|
node.output(node.hostname, nto, gid .. "\x00\x00" .. data)
|
|
|
|
return
|
|
|
|
end
|
|
|
|
local na = {onSucceed}
|
|
|
|
local attempt = -1
|
|
|
|
local doAttempt
|
|
|
|
doAttempt = function ()
|
|
|
|
attempt = attempt + 1
|
|
|
|
if attempt == tuningAttempts then
|
|
|
|
onFailure()
|
|
|
|
return
|
|
|
|
end
|
|
|
|
node.output(node.hostname, nto, gid .. string.char(attempt) .. "\x01" .. data)
|
|
|
|
na[2] = addTimer(doAttempt, tuningAttemptTime)
|
|
|
|
end
|
|
|
|
needsAck[nto .. gid] = na
|
|
|
|
doAttempt()
|
2017-03-19 02:31:25 +11:00
|
|
|
end
|
2017-03-19 03:06:56 +11:00
|
|
|
return relib
|
2017-03-19 02:31:25 +11:00
|
|
|
end
|