mirror of
https://github.com/20kdc/OC-KittenOS.git
synced 2024-11-27 04:48:05 +11:00
memopt & bugfixes: Clean up session system while keeping init flexible
Everest now lives and dies like the other side of the moon to sys-init, in order to get rid of the session nonsense that nobody was using anyway. While this means applications not using neoux will act slightly differently to applications that do, I don't think this will be a problem.
This commit is contained in:
parent
dc0feff44d
commit
1802ec6464
@ -57,14 +57,14 @@ monitors[0] = {nil, nil, 160, 50}
|
||||
-- line y
|
||||
local surfaces = {}
|
||||
|
||||
-- Stops the main loop
|
||||
local shuttingDown = false
|
||||
|
||||
local savingThrow = neo.requestAccess("x.neo.sys.manage")
|
||||
if savingThrow then
|
||||
savingThrow.registerForShutdownEvent()
|
||||
savingThrow.registerSavingThrow(function ()
|
||||
if #monitors > 0 then
|
||||
neo.executeAsync("sys-init", monitors[1][2])
|
||||
end
|
||||
neo.executeAsync("sys-everest")
|
||||
-- In this case the surfaces are leaked and hold references here. They have to be removed manually.
|
||||
-- Do this via a "primary event" (k.deregistration) and "deathtrap events"
|
||||
-- If a process evades the deathtrap then it clearly has reason to stay alive regardless of Everest status.
|
||||
@ -77,9 +77,6 @@ if savingThrow then
|
||||
end)
|
||||
end
|
||||
|
||||
-- Grab all available monitors when they become available
|
||||
local inSession = false
|
||||
|
||||
local function renderingAllowed()
|
||||
-- This is a safety feature to prevent implosion due to missing monitors.
|
||||
return #monitors > 0
|
||||
@ -370,7 +367,7 @@ everestProvider(function (pkg, pid, sendSig)
|
||||
local m = 0
|
||||
if renderingAllowed() then m = 1 end
|
||||
if surfaces[1] then m = surfaces[1][1] end
|
||||
local surf = {m, 1, 1, w, h}
|
||||
local surf = {m, 1, 2, w, h}
|
||||
local focusState = false
|
||||
local llid = lid
|
||||
lid = lid + 1
|
||||
@ -423,8 +420,6 @@ everestProvider(function (pkg, pid, sendSig)
|
||||
handleSpan(surf, 1, 1, vtitle, bg, fg)
|
||||
return
|
||||
end
|
||||
-- WCHAX : Wide-char-cleanup has to be done left-to-right, so this handles the important part of that.
|
||||
handleSpan(surf, surf[4], a, " ", 0, 0)
|
||||
a = a - 1
|
||||
end
|
||||
sendSig(llid, ev, a, b, c, d, e)
|
||||
@ -476,26 +471,11 @@ end)
|
||||
-- THE EVEREST USER API ENDS (now for the session API, which just does boring stuff)
|
||||
everestSessionProvider(function (pkg, pid, sendSig)
|
||||
return {
|
||||
startSession = function ()
|
||||
inSession = true
|
||||
end,
|
||||
endSession = function (startBristol)
|
||||
if not inSession then return end
|
||||
local m = nil
|
||||
if monitors[1] then
|
||||
m = monitors[1][2]
|
||||
end
|
||||
inSession = false
|
||||
for k = 1, #monitors do
|
||||
screens.disclaim(monitors[k][2])
|
||||
monitors[k] = nil
|
||||
end
|
||||
if startBristol then
|
||||
neo.executeAsync("sys-init", m)
|
||||
end
|
||||
reconcileAll()
|
||||
if not startBristol then
|
||||
return m
|
||||
endSession = function (gotoBristol)
|
||||
shuttingDown = true
|
||||
if gotoBristol then
|
||||
-- Notably, savingThrow only triggers for error-death...
|
||||
neo.executeAsync("sys-init", (monitors[1] or {})[2])
|
||||
end
|
||||
end
|
||||
}
|
||||
@ -562,7 +542,24 @@ local function key(ka, kc, down)
|
||||
end
|
||||
end
|
||||
|
||||
while true do
|
||||
-- take all displays!
|
||||
local function performClaim(s3)
|
||||
local gpu = screens.claim(s3)
|
||||
local gpucb = gpu and (gpu())
|
||||
if gpucb then
|
||||
local w, h = gpucb.getResolution()
|
||||
table.insert(monitors, {gpu, s3, w, h, -1, -1})
|
||||
-- This is required to ensure windows are moved off of the null monitor.
|
||||
-- Luckily, there's an obvious sign if they aren't - everest will promptly crash.
|
||||
reconcileAll()
|
||||
end
|
||||
end
|
||||
|
||||
for _, v in ipairs(screens.getClaimable()) do
|
||||
performClaim(v)
|
||||
end
|
||||
|
||||
while not shuttingDown do
|
||||
local s = {coroutine.yield()}
|
||||
if renderingAllowed() then
|
||||
if s[1] == "h.key_down" then
|
||||
@ -634,17 +631,7 @@ while true do
|
||||
end
|
||||
if s[1] == "x.neo.sys.screens" then
|
||||
if s[2] == "available" then
|
||||
if inSession then
|
||||
local gpu = screens.claim(s[3])
|
||||
local gpucb = gpu and (gpu())
|
||||
if gpucb then
|
||||
local w, h = gpucb.getResolution()
|
||||
table.insert(monitors, {gpu, s[3], w, h, -1, -1})
|
||||
-- This is required to ensure windows are moved off of the null monitor.
|
||||
-- Luckily, there's an obvious sign if they aren't - everest will promptly crash.
|
||||
reconcileAll()
|
||||
end
|
||||
end
|
||||
performClaim(s[3])
|
||||
end
|
||||
if s[2] == "lost" then
|
||||
for k, v in ipairs(monitors) do
|
||||
|
@ -39,7 +39,7 @@ local settings = {
|
||||
-- The list of settings is here:
|
||||
-- password
|
||||
password = "",
|
||||
["run.sys-everest"] = "yes",
|
||||
["sys-init.shell"] = "sys-everest",
|
||||
["run.sys-icecap"] = "yes",
|
||||
-- scr.w/h/d.<uuid>
|
||||
}
|
||||
@ -78,6 +78,8 @@ local monitorPool = {}
|
||||
local monitorClaims = {}
|
||||
-- [gpuAddr] = monitorAddr
|
||||
local currentGPUBinding = {}
|
||||
-- [gpuAddr] = userCount
|
||||
local currentGPUUsers = {}
|
||||
|
||||
local function announceFreeMonitor(address, except)
|
||||
for k, v in pairs(targsRD) do
|
||||
@ -92,10 +94,18 @@ local function getGPU(monitor)
|
||||
local bestD = 0
|
||||
for v in gpus.list() do
|
||||
v.bind(monitor.address, false)
|
||||
currentGPUBinding[v.address] = monitor.address
|
||||
local d = v.maxDepth()
|
||||
if d > bestD then
|
||||
bestG = v
|
||||
bestD = d
|
||||
bestU = currentGPUUsers[v.address] or 0
|
||||
elseif d == bestD then
|
||||
if (currentGPUUsers[v.address] or 0) < bestU then
|
||||
bestG = v
|
||||
bestD = d
|
||||
bestU = currentGPUUsers[v.address] or 0
|
||||
end
|
||||
end
|
||||
end
|
||||
return bestG
|
||||
@ -124,6 +134,7 @@ local function getMonitorSettings(a)
|
||||
end
|
||||
local function setupMonitor(gpu, monitor)
|
||||
gpu.bind(monitor.address, false)
|
||||
currentGPUBinding[gpu.address] = monitor.address
|
||||
local maxW, maxH = gpu.maxResolution()
|
||||
local maxD = gpu.maxDepth()
|
||||
local w, h, d = getMonitorSettings(monitor.address)
|
||||
@ -208,8 +219,7 @@ donkonitRDProvider(function (pkg, pid, sendSig)
|
||||
targsRD[pid] = {sendSig, function ()
|
||||
for k, v in pairs(claimed) do
|
||||
-- Nothing to really do here
|
||||
monitorClaims[k] = nil
|
||||
announceFreeMonitor(k, pid)
|
||||
v(false)
|
||||
end
|
||||
end}
|
||||
return {
|
||||
@ -231,11 +241,13 @@ donkonitRDProvider(function (pkg, pid, sendSig)
|
||||
setupMonitor(gpu, v)
|
||||
gpu = gpu.address
|
||||
currentGPUBinding[gpu] = address
|
||||
currentGPUUsers[gpu] = (currentGPUUsers[gpu] or 0) + 1
|
||||
local disclaimer = function (wasDevLoss)
|
||||
-- we lost it
|
||||
monitorClaims[address] = nil
|
||||
claimed[address] = nil
|
||||
if not wasDevLoss then
|
||||
currentGPUUsers[gpu] = currentGPUUsers[gpu] - 1
|
||||
table.insert(monitorPool, v)
|
||||
announceFreeMonitor(address, pid)
|
||||
else
|
||||
@ -249,10 +261,7 @@ donkonitRDProvider(function (pkg, pid, sendSig)
|
||||
for v in gpus.list() do
|
||||
if v.address == gpu then
|
||||
if currentGPUBinding[gpu] ~= address then
|
||||
local _, v2 = v.bind(address, false)
|
||||
if v2 then
|
||||
return
|
||||
end
|
||||
v.bind(address, false)
|
||||
end
|
||||
currentGPUBinding[gpu] = address
|
||||
return v
|
||||
@ -277,6 +286,7 @@ loadSettings()
|
||||
local function rescanDevs()
|
||||
monitorPool = {}
|
||||
currentGPUBinding = {}
|
||||
currentGPUUsers = {}
|
||||
local hasGPU = gpus.list()()
|
||||
for k, v in pairs(monitorClaims) do
|
||||
v[2](true)
|
||||
|
@ -58,7 +58,8 @@ local function retrieveNssMonitor(nss)
|
||||
local subpool = {}
|
||||
while not gpuG do
|
||||
if performDisclaim then
|
||||
performDisclaim()
|
||||
performDisclaim(true)
|
||||
performDisclaim = nil
|
||||
end
|
||||
-- nss available - this means the monitor pool is now ready.
|
||||
-- If no monitors are available, shut down now.
|
||||
@ -98,7 +99,9 @@ local function retrieveNssMonitor(nss)
|
||||
end
|
||||
end
|
||||
|
||||
if not subpool[1] then error("Unable to claim any monitor.") end
|
||||
if not subpool[1] then
|
||||
error("None of the GPUs we got were actually usable")
|
||||
end
|
||||
gpuG = subpool[1][1]
|
||||
screen = subpool[1][2]
|
||||
end
|
||||
@ -107,11 +110,14 @@ local function retrieveNssMonitor(nss)
|
||||
scrW, scrH = gpu.getResolution()
|
||||
rstfbDraw(gpu)
|
||||
gpu.fill(1, 1, scrW, scrH, " ")
|
||||
performDisclaim = function ()
|
||||
performDisclaim = function (full)
|
||||
nss.disclaim(subpool[1][2])
|
||||
if full then
|
||||
for _, v in ipairs(subpool) do
|
||||
nss.disclaim(v[2])
|
||||
end
|
||||
end
|
||||
end
|
||||
return gpu
|
||||
end
|
||||
|
||||
@ -137,6 +143,8 @@ local function finalPrompt()
|
||||
local nss = neo.requestAccess("x.neo.sys.screens")
|
||||
if nss then
|
||||
retrieveNssMonitor(nss)
|
||||
else
|
||||
error("no glacier to provide GPU for the prompt")
|
||||
end
|
||||
-- This is nsm's final chance to make itself available and thus allow the password to be set
|
||||
local nsm = neo.requestAccess("x.neo.sys.manage")
|
||||
@ -266,27 +274,28 @@ local function finalPrompt()
|
||||
end
|
||||
local function postPrompt()
|
||||
local gpu = gpuG()
|
||||
rstfbDraw(gpu)
|
||||
-- Begin to finish login, or fail
|
||||
local everests = neo.requestAccess("x.neo.sys.session")
|
||||
if everests then
|
||||
local s, e = pcall(everests.startSession)
|
||||
if not s then
|
||||
table.insert(warnings, "Everest failed to create a session")
|
||||
table.insert(warnings, tostring(e))
|
||||
else
|
||||
warnings = {"Transferring to Everest..."}
|
||||
advDraw(gpu)
|
||||
if performDisclaim then
|
||||
performDisclaim()
|
||||
-- Give Everest time (this isn't perceptible, and is really just a safety measure)
|
||||
sleep(1)
|
||||
local nsm = neo.requestAccess("x.neo.sys.manage")
|
||||
local sh = "sys-everest"
|
||||
warnings = {"Unable to get sys-init.shell due to no NSM, using sys-everest"}
|
||||
if nsm then
|
||||
sh = nsm.getSetting("sys-init.shell") or sh
|
||||
warnings = {"Starting " .. sh}
|
||||
end
|
||||
rstfbDraw(gpu)
|
||||
advDraw(gpu)
|
||||
performDisclaim()
|
||||
neo.executeAsync(sh)
|
||||
sleep(0.5)
|
||||
for i = 1, 9 do
|
||||
local v = neo.requestAccess("x.neo.sys.session")
|
||||
sleep(0.5) -- Important timing - allows it to take the monitor
|
||||
if v then
|
||||
return
|
||||
end
|
||||
else
|
||||
table.insert(warnings, "Couldn't communicate with Everest...")
|
||||
end
|
||||
-- ...oh. hope this works then?
|
||||
warnings = {"That wasn't a shell. Try Safe Mode."}
|
||||
rstfbDraw(gpu)
|
||||
advDraw(gpu)
|
||||
sleep(1)
|
||||
shutdown(true)
|
||||
@ -305,17 +314,13 @@ local function initializeSystem()
|
||||
for s in screenAc.list() do
|
||||
for g in gpuAc.list() do
|
||||
g.bind(s.address, false)
|
||||
local w, h = g.maxResolution()
|
||||
local whd = w * h * g.maxDepth()
|
||||
local whd = g.maxDepth()
|
||||
if whd > scrBestWHD then
|
||||
screen = s
|
||||
gpu = g
|
||||
scrBestWHD = whd
|
||||
end
|
||||
end
|
||||
if screen then
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
if gpu then
|
||||
@ -413,7 +418,7 @@ end
|
||||
|
||||
if callerPkg ~= nil then
|
||||
-- Everest can call into this to force a login screen
|
||||
-- In this case it locks Everest, then starts Bristol.
|
||||
-- In this case Everest dies, then starts Bristol.
|
||||
--
|
||||
if callerPkg ~= "sys-everest" then
|
||||
return
|
||||
|
Loading…
Reference in New Issue
Block a user