updates to make processviewer more robust (#3144)

This commit is contained in:
Mike Sawka 2026-03-30 11:18:58 -07:00 committed by GitHub
parent 96c2526f2a
commit 0ade6ee997
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 456 additions and 150 deletions

View file

@ -44,6 +44,7 @@ function formatNumber4(n: number): string {
function fmtMem(bytes: number): string {
if (bytes == null) return "";
if (bytes === -1) return "-";
if (bytes < 1024) return formatNumber4(bytes) + "B";
if (bytes < 1024 * 1024) return formatNumber4(bytes / 1024) + "K";
if (bytes < 1024 * 1024 * 1024) return formatNumber4(bytes / 1024 / 1024) + "M";
@ -52,7 +53,13 @@ function fmtMem(bytes: number): string {
function fmtCpu(cpu: number): string {
if (cpu == null) return "";
return cpu.toFixed(1) + "%";
if (cpu === -1) return " -";
if (cpu === 0) return " 0.0%";
if (cpu < 0.005) return "~0.0%";
if (cpu < 10) return cpu.toFixed(2) + "%";
if (cpu < 100) return cpu.toFixed(1) + "%";
if (cpu < 1000) return " " + Math.floor(cpu).toString() + "%";
return Math.floor(cpu).toString() + "%";
}
function fmtLoad(load: number): string {
@ -74,6 +81,7 @@ export class ProcessViewerViewModel implements ViewModel {
noPadding = jotai.atom<boolean>(true);
dataAtom: jotai.PrimitiveAtom<ProcessListResponse>;
dataStartAtom: jotai.PrimitiveAtom<number>;
sortByAtom: jotai.PrimitiveAtom<SortCol>;
sortDescAtom: jotai.PrimitiveAtom<boolean>;
scrollTopAtom: jotai.PrimitiveAtom<number>;
@ -86,12 +94,14 @@ export class ProcessViewerViewModel implements ViewModel {
actionStatusAtom: jotai.PrimitiveAtom<ActionStatus>;
textSearchAtom: jotai.PrimitiveAtom<string>;
searchOpenAtom: jotai.PrimitiveAtom<boolean>;
fetchIntervalAtom: jotai.PrimitiveAtom<number>;
connection: jotai.Atom<string>;
connStatus: jotai.Atom<ConnStatus>;
disposed = false;
cancelPoll: (() => void) | null = null;
fetchEpoch = 0;
constructor({ blockId, waveEnv }: ViewModelInitType) {
this.viewType = "processviewer";
@ -99,6 +109,7 @@ export class ProcessViewerViewModel implements ViewModel {
this.env = waveEnv;
this.dataAtom = jotai.atom<ProcessListResponse>(null) as jotai.PrimitiveAtom<ProcessListResponse>;
this.dataStartAtom = jotai.atom<number>(0);
this.sortByAtom = jotai.atom<SortCol>("cpu");
this.sortDescAtom = jotai.atom<boolean>(true);
this.scrollTopAtom = jotai.atom<number>(0);
@ -111,6 +122,7 @@ export class ProcessViewerViewModel implements ViewModel {
this.actionStatusAtom = jotai.atom<ActionStatus>(null) as jotai.PrimitiveAtom<ActionStatus>;
this.textSearchAtom = jotai.atom<string>("") as jotai.PrimitiveAtom<string>;
this.searchOpenAtom = jotai.atom<boolean>(false) as jotai.PrimitiveAtom<boolean>;
this.fetchIntervalAtom = jotai.atom<number>(2000) as jotai.PrimitiveAtom<number>;
this.connection = jotai.atom((get) => {
const connValue = get(this.env.getBlockMetaKeyAtom(blockId, "connection"));
@ -132,8 +144,9 @@ export class ProcessViewerViewModel implements ViewModel {
return ProcessViewerView;
}
async doOneFetch(cancelledFn?: () => boolean) {
async doOneFetch(lastPidOrder: boolean, cancelledFn?: () => boolean) {
if (this.disposed) return;
const epoch = ++this.fetchEpoch;
const sortBy = globalStore.get(this.sortByAtom);
const sortDesc = globalStore.get(this.sortDescAtom);
const scrollTop = globalStore.get(this.scrollTopAtom);
@ -149,23 +162,47 @@ export class ProcessViewerViewModel implements ViewModel {
try {
const resp = await this.env.rpc.RemoteProcessListCommand(
TabRpcClient,
{ sortby: sortBy, sortdesc: sortDesc, start, limit, textsearch: textSearch || undefined },
{
widgetid: this.blockId,
sortby: sortBy,
sortdesc: sortDesc,
start,
limit,
textsearch: textSearch || undefined,
lastpidorder: lastPidOrder,
},
{ route }
);
if (!this.disposed && !cancelledFn?.()) {
if (!this.disposed && !cancelledFn?.() && this.fetchEpoch === epoch) {
globalStore.set(this.dataAtom, resp);
globalStore.set(this.dataStartAtom, start);
globalStore.set(this.loadingAtom, false);
globalStore.set(this.errorAtom, null);
globalStore.set(this.lastSuccessAtom, Date.now());
}
} catch (e) {
if (!this.disposed && !cancelledFn?.()) {
if (!this.disposed && !cancelledFn?.() && this.fetchEpoch === epoch) {
globalStore.set(this.loadingAtom, false);
globalStore.set(this.errorAtom, String(e));
}
}
}
async doKeepAlive() {
if (this.disposed) return;
const conn = globalStore.get(this.connection);
const route = makeConnRoute(conn);
try {
await this.env.rpc.RemoteProcessListCommand(
TabRpcClient,
{ widgetid: this.blockId, keepalive: true },
{ route }
);
} catch (_) {
// keepalive failures are silent
}
}
startPolling() {
let cancelled = false;
this.cancelPoll = () => {
@ -174,12 +211,13 @@ export class ProcessViewerViewModel implements ViewModel {
const poll = async () => {
while (!cancelled && !this.disposed) {
await this.doOneFetch(() => cancelled);
await this.doOneFetch(false, () => cancelled);
if (cancelled || this.disposed) break;
const interval = globalStore.get(this.fetchIntervalAtom);
await new Promise<void>((resolve) => {
const timer = setTimeout(resolve, 1000);
const timer = setTimeout(resolve, interval);
this.cancelPoll = () => {
clearTimeout(timer);
cancelled = true;
@ -198,6 +236,38 @@ export class ProcessViewerViewModel implements ViewModel {
poll();
}
startKeepAlive() {
let cancelled = false;
this.cancelPoll = () => {
cancelled = true;
};
const keepAliveLoop = async () => {
while (!cancelled && !this.disposed) {
await this.doKeepAlive();
if (cancelled || this.disposed) break;
await new Promise<void>((resolve) => {
const timer = setTimeout(resolve, 10000);
this.cancelPoll = () => {
clearTimeout(timer);
cancelled = true;
resolve();
};
});
if (!cancelled) {
this.cancelPoll = () => {
cancelled = true;
};
}
}
};
keepAliveLoop();
}
triggerRefresh() {
if (this.cancelPoll) {
this.cancelPoll();
@ -208,6 +278,22 @@ export class ProcessViewerViewModel implements ViewModel {
}
}
forceRefreshOnConnectionChange() {
if (this.cancelPoll) {
this.cancelPoll();
}
this.cancelPoll = null;
globalStore.set(this.dataAtom, null);
globalStore.set(this.loadingAtom, true);
globalStore.set(this.errorAtom, null);
if (globalStore.get(this.pausedAtom)) {
this.doOneFetch(false);
this.startKeepAlive();
} else {
this.startPolling();
}
}
setPaused(paused: boolean) {
globalStore.set(this.pausedAtom, paused);
if (paused) {
@ -215,6 +301,7 @@ export class ProcessViewerViewModel implements ViewModel {
this.cancelPoll();
}
this.cancelPoll = null;
this.startKeepAlive();
} else {
this.startPolling();
}
@ -223,7 +310,7 @@ export class ProcessViewerViewModel implements ViewModel {
setTextSearch(text: string) {
globalStore.set(this.textSearchAtom, text);
if (globalStore.get(this.pausedAtom)) {
this.doOneFetch();
this.doOneFetch(false);
} else {
this.triggerRefresh();
}
@ -262,7 +349,7 @@ export class ProcessViewerViewModel implements ViewModel {
globalStore.set(this.sortDescAtom, numericCols.includes(col));
}
if (globalStore.get(this.pausedAtom)) {
this.doOneFetch();
this.doOneFetch(false);
} else {
this.triggerRefresh();
}
@ -272,14 +359,20 @@ export class ProcessViewerViewModel implements ViewModel {
const cur = globalStore.get(this.scrollTopAtom);
if (Math.abs(cur - scrollTop) < RowHeight) return;
globalStore.set(this.scrollTopAtom, scrollTop);
this.triggerRefresh();
if (globalStore.get(this.pausedAtom)) {
this.doOneFetch(true);
}
}
setContainerHeight(height: number) {
const cur = globalStore.get(this.containerHeightAtom);
if (cur === height) return;
globalStore.set(this.containerHeightAtom, height);
this.triggerRefresh();
if (globalStore.get(this.pausedAtom)) {
this.doOneFetch(true);
} else {
this.triggerRefresh();
}
}
async sendSignal(pid: number, signal: string, killLabel?: boolean) {
@ -310,6 +403,41 @@ export class ProcessViewerViewModel implements ViewModel {
globalStore.set(this.actionStatusAtom, null);
}
setFetchInterval(ms: number) {
globalStore.set(this.fetchIntervalAtom, ms);
this.triggerRefresh();
}
getSettingsMenuItems(): ContextMenuItem[] {
const currentInterval = globalStore.get(this.fetchIntervalAtom);
return [
{
label: "Refresh Interval",
type: "submenu",
submenu: [
{
label: "1 second",
type: "checkbox",
checked: currentInterval === 1000,
click: () => this.setFetchInterval(1000),
},
{
label: "2 seconds",
type: "checkbox",
checked: currentInterval === 2000,
click: () => this.setFetchInterval(2000),
},
{
label: "5 seconds",
type: "checkbox",
checked: currentInterval === 5000,
click: () => this.setFetchInterval(5000),
},
],
},
];
}
dispose() {
this.disposed = true;
if (this.cancelPoll) {
@ -470,6 +598,26 @@ const ProcessRow = React.memo(function ProcessRow({
const gridTemplate = getGridTemplate(platform);
const showStatus = platform !== "windows" && platform !== "darwin";
const showThreads = platform !== "windows";
if (proc.gone) {
return (
<div
className={`grid w-full text-xs transition-colors cursor-pointer ${selected ? "bg-accentbg" : "hover:bg-white/5"}`}
style={{ gridTemplateColumns: gridTemplate, height: RowHeight }}
onClick={() => onSelect(proc.pid)}
onContextMenu={(e) => onContextMenu(proc.pid, e)}
>
<div className="px-2 flex items-center truncate justify-end text-secondary font-mono text-[11px]">
{proc.pid}
</div>
<div className="px-2 flex items-center truncate text-muted italic">(gone)</div>
{showStatus && <div className="px-2 flex items-center truncate" />}
<div className="px-2 flex items-center truncate" />
{showThreads && <div className="px-2 flex items-center truncate" />}
<div className="px-2 flex items-center truncate" />
<div className="px-2 flex items-center truncate" />
</div>
);
}
return (
<div
className={`grid w-full text-xs transition-colors cursor-pointer ${selected ? "bg-accentbg" : "hover:bg-white/5"}`}
@ -487,11 +635,11 @@ const ProcessRow = React.memo(function ProcessRow({
<div className="px-2 flex items-center truncate text-secondary">{proc.user}</div>
{showThreads && (
<div className="px-2 flex items-center truncate justify-end text-secondary font-mono text-[11px]">
{proc.numthreads >= 1 ? proc.numthreads : ""}
{proc.numthreads === -1 ? "-" : proc.numthreads >= 1 ? proc.numthreads : ""}
</div>
)}
<div className="px-2 flex items-center truncate justify-end font-mono text-[11px]">
{hasCpu && proc.cpu != null ? fmtCpu(proc.cpu) : ""}
<div className="px-2 flex items-center truncate justify-end font-mono text-[11px] whitespace-pre">
{hasCpu ? fmtCpu(proc.cpu) : ""}
</div>
<div className="px-2 flex items-center truncate justify-end font-mono text-[11px]">{fmtMem(proc.mem)}</div>
</div>
@ -589,7 +737,7 @@ const StatusBar = React.memo(function StatusBar({ model, data, loading, error, w
<>
<div className="w-px self-stretch bg-white/10 shrink-0" />
<Tooltip
content={`100% per core · ${summary.numcpu} cores = ${summary.numcpu * 100}% max`}
content={`100% per core · ${summary.numcpu} ${summary.numcpu === 1 ? "core" : "cores"} = ${summary.numcpu * 100}% max`}
placement="bottom"
>
<span className="shrink-0 cursor-default whitespace-pre">
@ -642,7 +790,7 @@ const StatusBar = React.memo(function StatusBar({ model, data, loading, error, w
{hasSummaryCpu && (
<div className="flex flex-col shrink-0 w-[55px] mr-1">
<Tooltip
content={`100% per core · ${summary.numcpu} cores = ${summary.numcpu * 100}% max`}
content={`100% per core · ${summary.numcpu} ${summary.numcpu === 1 ? "core" : "cores"} = ${summary.numcpu * 100}% max`}
placement="bottom"
>
<div className="cursor-default">
@ -720,12 +868,22 @@ export const ProcessViewerView: React.FC<ViewComponentProps<ProcessViewerViewMod
const sortDesc = jotai.useAtomValue(model.sortDescAtom);
const loading = jotai.useAtomValue(model.loadingAtom);
const error = jotai.useAtomValue(model.errorAtom);
const scrollTop = jotai.useAtomValue(model.scrollTopAtom);
const [selectedPid, setSelectedPid] = jotai.useAtom(model.selectedPidAtom);
const dataStart = jotai.useAtomValue(model.dataStartAtom);
const connection = jotai.useAtomValue(model.connection);
const bodyScrollRef = React.useRef<HTMLDivElement>(null);
const containerRef = React.useRef<HTMLDivElement>(null);
const [wide, setWide] = React.useState(false);
const isFirstRender = React.useRef(true);
React.useEffect(() => {
if (isFirstRender.current) {
isFirstRender.current = false;
return;
}
model.forceRefreshOnConnectionChange();
}, [connection]);
const handleSelectPid = React.useCallback(
(pid: number) => {
setSelectedPid((cur) => (cur === pid ? null : pid));
@ -770,13 +928,15 @@ export const ProcessViewerView: React.FC<ViewComponentProps<ProcessViewerViewMod
});
}
menu.push({ type: "separator" });
menu.push(...model.getSettingsMenuItems());
ContextMenuModel.getInstance().showContextMenu(menu, e);
},
[model, setSelectedPid]
);
const platform = data?.platform ?? "";
const startIdx = Math.max(0, Math.floor(scrollTop / RowHeight) - OverscanRows);
const totalCount = data?.totalcount ?? 0;
const filteredCount = data?.filteredcount ?? totalCount;
const processes = data?.processes ?? [];
@ -804,7 +964,7 @@ export const ProcessViewerView: React.FC<ViewComponentProps<ProcessViewerViewMod
}, [model]);
const totalHeight = filteredCount * RowHeight;
const paddingTop = startIdx * RowHeight;
const paddingTop = dataStart * RowHeight;
return (
<div className="flex flex-col w-full h-full overflow-hidden" ref={containerRef}>
@ -823,7 +983,7 @@ export const ProcessViewerView: React.FC<ViewComponentProps<ProcessViewerViewMod
{/* virtualized rows — same width as header, scrolls vertically */}
<div
ref={bodyScrollRef}
className="flex-1 overflow-y-auto overflow-x-hidden w-full"
className="flex-1 overflow-y-auto overflow-x-hidden w-full wide-scrollbar"
onScroll={handleScroll}
>
<div style={{ height: totalHeight, position: "relative" }}>

View file

@ -10,21 +10,21 @@ import { useRpcOverride } from "../mock/use-rpc-override";
const PreviewNodeId = "preview-processviewer-node";
const MockProcesses: ProcessInfo[] = [
{ pid: 1, ppid: 0, command: "launchd", user: "root", cpu: 0.0, mem: 4096 * 1024 },
{ pid: 123, ppid: 1, command: "kernel_task", user: "root", cpu: 12.3, mem: 2048 * 1024 * 1024 },
{ pid: 456, ppid: 1, command: "WindowServer", user: "_windowserver", cpu: 5.1, mem: 512 * 1024 * 1024 },
{ pid: 789, ppid: 1, command: "node", user: "mike", cpu: 8.7, mem: 256 * 1024 * 1024 },
{ pid: 1001, ppid: 1, command: "Electron", user: "mike", cpu: 3.2, mem: 400 * 1024 * 1024 },
{ pid: 1234, ppid: 1001, command: "waveterm-helper", user: "mike", cpu: 0.5, mem: 64 * 1024 * 1024 },
{ pid: 2001, ppid: 1, command: "sshd", user: "root", cpu: 0.0, mem: 8 * 1024 * 1024 },
{ pid: 2345, ppid: 1, command: "postgres", user: "postgres", cpu: 1.2, mem: 128 * 1024 * 1024 },
{ pid: 3001, ppid: 1, command: "nginx", user: "_www", cpu: 0.3, mem: 32 * 1024 * 1024 },
{ pid: 3456, ppid: 1, command: "python3", user: "mike", cpu: 2.8, mem: 96 * 1024 * 1024 },
{ pid: 4001, ppid: 1, command: "docker", user: "root", cpu: 0.1, mem: 48 * 1024 * 1024 },
{ pid: 4567, ppid: 4001, command: "containerd", user: "root", cpu: 0.2, mem: 80 * 1024 * 1024 },
{ pid: 5001, ppid: 1, command: "zsh", user: "mike", cpu: 0.0, mem: 6 * 1024 * 1024 },
{ pid: 5678, ppid: 5001, command: "vim", user: "mike", cpu: 0.0, mem: 20 * 1024 * 1024 },
{ pid: 6001, ppid: 1, command: "coreaudiod", user: "_coreaudiod", cpu: 0.4, mem: 16 * 1024 * 1024 },
{ pid: 1, ppid: 0, command: "launchd", user: "root", cpu: 0.0, mem: 4096 * 1024, mempct: 0.01 },
{ pid: 123, ppid: 1, command: "kernel_task", user: "root", cpu: 12.3, mem: 2048 * 1024 * 1024, mempct: 6.25 },
{ pid: 456, ppid: 1, command: "WindowServer", user: "_windowserver", cpu: 5.1, mem: 512 * 1024 * 1024, mempct: 1.56 },
{ pid: 789, ppid: 1, command: "node", user: "mike", cpu: 8.7, mem: 256 * 1024 * 1024, mempct: 0.78 },
{ pid: 1001, ppid: 1, command: "Electron", user: "mike", cpu: 3.2, mem: 400 * 1024 * 1024, mempct: 1.22 },
{ pid: 1234, ppid: 1001, command: "waveterm-helper", user: "mike", cpu: 0.5, mem: 64 * 1024 * 1024, mempct: 0.20 },
{ pid: 2001, ppid: 1, command: "sshd", user: "root", cpu: 0.0, mem: 8 * 1024 * 1024, mempct: 0.02 },
{ pid: 2345, ppid: 1, command: "postgres", user: "postgres", cpu: 1.2, mem: 128 * 1024 * 1024, mempct: 0.39 },
{ pid: 3001, ppid: 1, command: "nginx", user: "_www", cpu: 0.3, mem: 32 * 1024 * 1024, mempct: 0.10 },
{ pid: 3456, ppid: 1, command: "python3", user: "mike", cpu: 2.8, mem: 96 * 1024 * 1024, mempct: 0.29 },
{ pid: 4001, ppid: 1, command: "docker", user: "root", cpu: 0.1, mem: 48 * 1024 * 1024, mempct: 0.15 },
{ pid: 4567, ppid: 4001, command: "containerd", user: "root", cpu: 0.2, mem: 80 * 1024 * 1024, mempct: 0.24 },
{ pid: 5001, ppid: 1, command: "zsh", user: "mike", cpu: 0.0, mem: 6 * 1024 * 1024, mempct: 0.02 },
{ pid: 5678, ppid: 5001, command: "vim", user: "mike", cpu: 0.0, mem: 20 * 1024 * 1024, mempct: 0.06 },
{ pid: 6001, ppid: 1, command: "coreaudiod", user: "_coreaudiod", cpu: 0.4, mem: 16 * 1024 * 1024, mempct: 0.05 },
];
const MockSummary: ProcessSummary = {

View file

@ -93,6 +93,10 @@ svg [aria-label="tip"] g path {
color: var(--border-color);
}
.wide-scrollbar::-webkit-scrollbar {
width: 10px;
}
/* Monaco editor scrollbar styling */
.monaco-editor .slider {
background: rgba(255, 255, 255, 0.4);

View file

@ -559,12 +559,14 @@ declare global {
// wshrpc.CommandRemoteProcessListData
type CommandRemoteProcessListData = {
widgetid?: string;
sortby?: string;
sortdesc?: boolean;
start?: number;
limit?: number;
textsearch?: string;
pids?: number[];
lastpidorder?: boolean;
keepalive?: boolean;
};
// wshrpc.CommandRemoteProcessSignalData
@ -1267,10 +1269,11 @@ declare global {
command?: string;
status?: string;
user?: string;
mem?: number;
mempct?: number;
cpu?: number;
numthreads?: number;
mem: number;
mempct: number;
cpu: number;
numthreads: number;
gone?: boolean;
};
// wshrpc.ProcessListResponse

4
package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "waveterm",
"version": "0.14.4-beta.2",
"version": "0.14.4",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "waveterm",
"version": "0.14.4-beta.2",
"version": "0.14.4",
"hasInstallScript": true,
"license": "Apache-2.0",
"workspaces": [

View file

@ -27,14 +27,16 @@ var LinuxStatStatus = map[string]string{
// ProcInfo holds per-process information read from the OS.
// CpuUser and CpuSys are cumulative CPU seconds since process start;
// callers should diff two samples over a known interval to derive a rate.
// CpuUser, CpuSys, and VmRSS are set to -1 when the data is unavailable
// (e.g. permission denied reading another user's process).
type ProcInfo struct {
Pid int32
Ppid int32
Command string
Status string
CpuUser float64 // cumulative user CPU seconds
CpuSys float64 // cumulative system CPU seconds
VmRSS uint64 // resident set size in bytes
CpuUser float64 // cumulative user CPU seconds; -1 if unavailable
CpuSys float64 // cumulative system CPU seconds; -1 if unavailable
VmRSS int64 // resident set size in bytes; -1 if unavailable
Uid uint32
NumThreads int32
NumThreads int32 // -1 if unavailable
}

View file

@ -11,7 +11,6 @@ import (
"unsafe"
"github.com/ebitengine/purego"
goproc "github.com/shirou/gopsutil/v4/process"
"golang.org/x/sys/unix"
)
@ -88,22 +87,17 @@ func GetProcInfo(ctx context.Context, _ any, pid int32) (*ProcInfo, error) {
Uid: k.Eproc.Ucred.Uid,
}
info.CpuUser = -1
info.CpuSys = -1
info.VmRSS = -1
info.NumThreads = -1
if ti, terr := getDarwinProcTaskInfo(pid); terr == nil {
if darwinTimeScale > 0 {
info.CpuUser = float64(ti.TotalUser) * darwinTimeScale / 1e9
info.CpuSys = float64(ti.TotalSystem) * darwinTimeScale / 1e9
}
info.VmRSS = ti.ResidentSize
info.VmRSS = int64(ti.ResidentSize)
info.NumThreads = ti.Threadnum
} else {
if p, gerr := goproc.NewProcessWithContext(ctx, pid); gerr == nil {
if mi, merr := p.MemoryInfoWithContext(ctx); merr == nil {
info.VmRSS = mi.RSS
}
if nt, nerr := p.NumThreadsWithContext(ctx); nerr == nil {
info.NumThreads = nt
}
}
}
return info, nil

View file

@ -90,12 +90,6 @@ func readStat(pid int32) (*ProcInfo, error) {
return nil, fmt.Errorf("procinfo: parse pid: %w", err)
}
ppid, _ := strconv.ParseInt(rest[1], 10, 32)
utime, _ := strconv.ParseUint(rest[11], 10, 64)
stime, _ := strconv.ParseUint(rest[12], 10, 64)
numThreads, _ := strconv.ParseInt(rest[17], 10, 32)
rssPages, _ := strconv.ParseInt(rest[21], 10, 64)
statusChar := rest[0]
status, ok := LinuxStatStatus[statusChar]
if !ok {
@ -104,14 +98,30 @@ func readStat(pid int32) (*ProcInfo, error) {
info := &ProcInfo{
Pid: int32(parsedPid),
Ppid: int32(ppid),
Command: comm,
Status: status,
CpuUser: float64(utime) / userHz,
CpuSys: float64(stime) / userHz,
VmRSS: uint64(rssPages * pageSize),
NumThreads: int32(numThreads),
CpuUser: -1,
CpuSys: -1,
VmRSS: -1,
NumThreads: -1,
}
if ppid, err := strconv.ParseInt(rest[1], 10, 32); err == nil {
info.Ppid = int32(ppid)
}
if utime, err := strconv.ParseUint(rest[11], 10, 64); err == nil {
info.CpuUser = float64(utime) / userHz
}
if stime, err := strconv.ParseUint(rest[12], 10, 64); err == nil {
info.CpuSys = float64(stime) / userHz
}
if numThreads, err := strconv.ParseInt(rest[17], 10, 32); err == nil {
info.NumThreads = int32(numThreads)
}
if rssPages, err := strconv.ParseInt(rest[21], 10, 64); err == nil {
info.VmRSS = rssPages * pageSize
}
return info, nil
}

View file

@ -97,6 +97,9 @@ func GetProcInfo(_ context.Context, snap any, pid int32) (*ProcInfo, error) {
Ppid: int32(si.ppid),
NumThreads: int32(si.numThreads),
Command: si.exeName,
CpuUser: -1,
CpuSys: -1,
VmRSS: -1,
}
handle, err := windows.OpenProcess(
@ -127,7 +130,7 @@ func GetProcInfo(_ context.Context, snap any, pid int32) (*ProcInfo, error) {
uintptr(mc.CB),
)
if r != 0 {
info.VmRSS = uint64(mc.WorkingSetSize)
info.VmRSS = int64(mc.WorkingSetSize)
}
return info, nil

View file

@ -25,7 +25,7 @@ import (
)
const (
ProcCacheIdleTimeout = 10 * time.Second
ProcCacheIdleTimeout = 60 * time.Second
ProcCachePollInterval = 1 * time.Second
ProcViewerMaxLimit = 500
)
@ -37,6 +37,13 @@ type cpuSample struct {
Epoch int // epoch at which this sample was recorded
}
// widgetPidOrder stores the ordered pid list from the last non-LastPidOrder request for a widget.
type widgetPidOrder struct {
pids []int32
totalCount int
lastRequest time.Time
}
// procCacheState is the singleton background cache for process list data.
// lastCPUSamples, lastCPUEpoch, and uidCache are only accessed by the single runLoop goroutine.
type procCacheState struct {
@ -50,6 +57,8 @@ type procCacheState struct {
lastCPUSamples map[int32]cpuSample
lastCPUEpoch int
uidCache map[uint32]string // uid -> username, populated lazily
widgetPidOrders map[string]*widgetPidOrder // keyed by widgetId
}
// procCache is the singleton background cache for process list data.
@ -88,6 +97,62 @@ func (s *procCacheState) requestAndWait(ctx context.Context) (*wshrpc.ProcessLis
return result, nil
}
func (s *procCacheState) touchLastRequest() {
s.lock.Lock()
defer s.lock.Unlock()
s.lastRequest = time.Now()
}
func (s *procCacheState) touchWidgetPidOrder(widgetId string) {
if widgetId == "" {
return
}
s.lock.Lock()
defer s.lock.Unlock()
s.lastRequest = time.Now()
if s.widgetPidOrders != nil {
if entry, ok := s.widgetPidOrders[widgetId]; ok {
entry.lastRequest = time.Now()
}
}
}
func (s *procCacheState) storeWidgetPidOrder(widgetId string, pids []int32, totalCount int) {
if widgetId == "" {
return
}
s.lock.Lock()
defer s.lock.Unlock()
if s.widgetPidOrders == nil {
s.widgetPidOrders = make(map[string]*widgetPidOrder)
}
s.widgetPidOrders[widgetId] = &widgetPidOrder{
pids: pids,
totalCount: totalCount,
lastRequest: time.Now(),
}
}
func (s *procCacheState) getWidgetPidOrder(widgetId string) ([]int32, int) {
if widgetId == "" {
return nil, 0
}
s.lock.Lock()
defer s.lock.Unlock()
if s.widgetPidOrders == nil {
return nil, 0
}
entry, ok := s.widgetPidOrders[widgetId]
if !ok {
return nil, 0
}
if time.Since(entry.lastRequest) >= ProcCacheIdleTimeout {
delete(s.widgetPidOrders, widgetId)
return nil, 0
}
return entry.pids, entry.totalCount
}
func (s *procCacheState) runLoop(firstReadyCh chan struct{}) {
defer func() {
panichandler.PanicHandler("procCache.runLoop", recover())
@ -127,6 +192,7 @@ func (s *procCacheState) runLoop(firstReadyCh chan struct{}) {
s.lastCPUSamples = nil
s.lastCPUEpoch = 0
s.uidCache = nil
s.widgetPidOrders = nil
s.lock.Unlock()
return
}
@ -208,6 +274,9 @@ func (s *procCacheState) collectSnapshot(numCPU int) *wshrpc.ProcessListResponse
if ri.info == nil {
continue
}
if ri.info.CpuUser < 0 || ri.info.CpuSys < 0 {
continue
}
curCPUSec := ri.info.CpuUser + ri.info.CpuSys
if hasCPU {
@ -245,16 +314,17 @@ func (s *procCacheState) collectSnapshot(numCPU int) *wshrpc.ProcessListResponse
Command: pi.Command,
Status: pi.Status,
Mem: pi.VmRSS,
MemPct: -1,
Cpu: -1,
NumThreads: pi.NumThreads,
User: s.lookupUID(pi.Uid),
}
if totalMem > 0 {
if totalMem > 0 && pi.VmRSS >= 0 {
info.MemPct = float64(pi.VmRSS) / float64(totalMem) * 100
}
if hasCPU {
if cpu, ok := cpuPcts[pi.Pid]; ok {
v := cpu
info.Cpu = &v
info.Cpu = cpu
cpuSum += cpu
}
}
@ -322,56 +392,88 @@ func filterProcesses(processes []wshrpc.ProcessInfo, textSearch string) []wshrpc
return filtered
}
func sortAndLimitProcesses(processes []wshrpc.ProcessInfo, sortBy string, sortDesc bool, start int, limit int) []wshrpc.ProcessInfo {
func sortProcesses(processes []wshrpc.ProcessInfo, sortBy string, sortDesc bool) {
switch sortBy {
case "cpu":
sort.Slice(processes, func(i, j int) bool {
ci, cj := 0.0, 0.0
if processes[i].Cpu != nil {
ci = *processes[i].Cpu
ci := processes[i].Cpu
cj := processes[j].Cpu
iNull := ci < 0
jNull := cj < 0
if iNull != jNull {
return !iNull
}
if processes[j].Cpu != nil {
cj = *processes[j].Cpu
if !iNull && ci != cj {
if sortDesc {
return ci > cj
}
return ci < cj
}
if sortDesc {
return ci > cj
}
return ci < cj
return processes[i].Pid < processes[j].Pid
})
case "mem":
sort.Slice(processes, func(i, j int) bool {
if sortDesc {
return processes[i].Mem > processes[j].Mem
mi := processes[i].Mem
mj := processes[j].Mem
iNull := mi < 0
jNull := mj < 0
if iNull != jNull {
return !iNull
}
return processes[i].Mem < processes[j].Mem
if !iNull && mi != mj {
if sortDesc {
return mi > mj
}
return mi < mj
}
return processes[i].Pid < processes[j].Pid
})
case "command":
sort.Slice(processes, func(i, j int) bool {
if sortDesc {
return processes[i].Command > processes[j].Command
if processes[i].Command != processes[j].Command {
if sortDesc {
return processes[i].Command > processes[j].Command
}
return processes[i].Command < processes[j].Command
}
return processes[i].Command < processes[j].Command
return processes[i].Pid < processes[j].Pid
})
case "user":
sort.Slice(processes, func(i, j int) bool {
if sortDesc {
return processes[i].User > processes[j].User
if processes[i].User != processes[j].User {
if sortDesc {
return processes[i].User > processes[j].User
}
return processes[i].User < processes[j].User
}
return processes[i].User < processes[j].User
return processes[i].Pid < processes[j].Pid
})
case "status":
sort.Slice(processes, func(i, j int) bool {
if sortDesc {
return processes[i].Status > processes[j].Status
if processes[i].Status != processes[j].Status {
if sortDesc {
return processes[i].Status > processes[j].Status
}
return processes[i].Status < processes[j].Status
}
return processes[i].Status < processes[j].Status
return processes[i].Pid < processes[j].Pid
})
case "threads":
sort.Slice(processes, func(i, j int) bool {
if sortDesc {
return processes[i].NumThreads > processes[j].NumThreads
ti := processes[i].NumThreads
tj := processes[j].NumThreads
iNull := ti < 0
jNull := tj < 0
if iNull != jNull {
return !iNull
}
return processes[i].NumThreads < processes[j].NumThreads
if !iNull && ti != tj {
if sortDesc {
return ti > tj
}
return ti < tj
}
return processes[i].Pid < processes[j].Pid
})
default: // "pid"
sort.Slice(processes, func(i, j int) bool {
@ -381,63 +483,83 @@ func sortAndLimitProcesses(processes []wshrpc.ProcessInfo, sortBy string, sortDe
return processes[i].Pid < processes[j].Pid
})
}
if start > 0 {
if start >= len(processes) {
return nil
}
processes = processes[start:]
}
if limit > 0 && len(processes) > limit {
processes = processes[:limit]
}
return processes
}
func (impl *ServerImpl) RemoteProcessListCommand(ctx context.Context, data wshrpc.CommandRemoteProcessListData) (*wshrpc.ProcessListResponse, error) {
if data.KeepAlive {
if data.WidgetId != "" {
procCache.touchWidgetPidOrder(data.WidgetId)
} else {
procCache.touchLastRequest()
}
return nil, nil
}
raw, err := procCache.requestAndWait(ctx)
if err != nil {
return nil, err
}
// Pids overrides all other request fields; when set we skip sort/limit/start/textsearch
// and return only the exact pids requested.
if len(data.Pids) > 0 {
pidSet := make(map[int32]struct{}, len(data.Pids))
for _, pid := range data.Pids {
pidSet[pid] = struct{}{}
totalCount := len(raw.Processes)
// Phase 1: derive the pid order.
// Use cached order if LastPidOrder is set and a cached order exists; otherwise filter/sort and store.
var pidOrder []int32
var filteredCount int
if data.LastPidOrder {
var cachedTotal int
pidOrder, cachedTotal = procCache.getWidgetPidOrder(data.WidgetId)
if pidOrder != nil {
filteredCount = len(pidOrder)
totalCount = cachedTotal
}
processes := make([]wshrpc.ProcessInfo, 0, len(data.Pids))
for _, p := range raw.Processes {
if _, ok := pidSet[p.Pid]; ok {
processes = append(processes, p)
}
}
if pidOrder == nil {
sortBy := data.SortBy
sortDesc := data.SortDesc
if sortBy == "" {
sortBy = "cpu"
sortDesc = true
}
procs := make([]wshrpc.ProcessInfo, len(raw.Processes))
copy(procs, raw.Processes)
procs = filterProcesses(procs, data.TextSearch)
filteredCount = len(procs)
sortProcesses(procs, sortBy, sortDesc)
pidOrder = make([]int32, len(procs))
for i, p := range procs {
pidOrder[i] = p.Pid
}
if data.WidgetId != "" {
procCache.storeWidgetPidOrder(data.WidgetId, pidOrder, totalCount)
}
return &wshrpc.ProcessListResponse{
Processes: processes,
Summary: raw.Summary,
Ts: raw.Ts,
HasCPU: raw.HasCPU,
Platform: raw.Platform,
}, nil
}
sortBy := data.SortBy
if sortBy == "" {
sortBy = "cpu"
}
// Phase 2: limit and populate process info from the pid order.
limit := data.Limit
if limit <= 0 || limit > ProcViewerMaxLimit {
limit = ProcViewerMaxLimit
}
totalCount := len(raw.Processes)
// Copy processes so we can sort/limit without mutating the cache.
processes := make([]wshrpc.ProcessInfo, len(raw.Processes))
copy(processes, raw.Processes)
processes = filterProcesses(processes, data.TextSearch)
filteredCount := len(processes)
processes = sortAndLimitProcesses(processes, sortBy, data.SortDesc, data.Start, limit)
pidMap := make(map[int32]wshrpc.ProcessInfo, len(raw.Processes))
for _, p := range raw.Processes {
pidMap[p.Pid] = p
}
start := data.Start
if start >= len(pidOrder) {
start = len(pidOrder)
}
window := pidOrder[start:]
if limit > 0 && len(window) > limit {
window = window[:limit]
}
processes := make([]wshrpc.ProcessInfo, 0, len(window))
for _, pid := range window {
if p, ok := pidMap[pid]; ok {
processes = append(processes, p)
} else {
processes = append(processes, wshrpc.ProcessInfo{Pid: pid, Gone: true})
}
}
return &wshrpc.ProcessListResponse{
Processes: processes,

View file

@ -911,16 +911,20 @@ type FocusedBlockData struct {
TermLastCommand string `json:"termlastcommand,omitempty"`
}
// ProcessInfo holds per-process information for the process viewer.
// Mem, MemPct, Cpu, and NumThreads are set to -1 when the data is unavailable
// (e.g. permission denied reading another user's process on macOS).
type ProcessInfo struct {
Pid int32 `json:"pid"`
Ppid int32 `json:"ppid,omitempty"`
Command string `json:"command,omitempty"`
Status string `json:"status,omitempty"`
User string `json:"user,omitempty"`
Mem uint64 `json:"mem,omitempty"`
MemPct float64 `json:"mempct,omitempty"`
Cpu *float64 `json:"cpu,omitempty"`
NumThreads int32 `json:"numthreads,omitempty"`
Pid int32 `json:"pid"`
Ppid int32 `json:"ppid,omitempty"`
Command string `json:"command,omitempty"`
Status string `json:"status,omitempty"`
User string `json:"user,omitempty"`
Mem int64 `json:"mem"` // resident set size in bytes; -1 if unavailable
MemPct float64 `json:"mempct"` // memory percent; -1 if unavailable
Cpu float64 `json:"cpu"` // cpu percent; -1 if unavailable
NumThreads int32 `json:"numthreads"` // -1 if unavailable
Gone bool `json:"gone,omitempty"`
}
type ProcessSummary struct {
@ -946,13 +950,17 @@ type ProcessListResponse struct {
}
type CommandRemoteProcessListData struct {
WidgetId string `json:"widgetid,omitempty"`
SortBy string `json:"sortby,omitempty"`
SortDesc bool `json:"sortdesc,omitempty"`
Start int `json:"start,omitempty"`
Limit int `json:"limit,omitempty"`
TextSearch string `json:"textsearch,omitempty"`
// Pids overrides all other fields; when set, returns only the specified pids (no sort/limit/start/textsearch).
Pids []int32 `json:"pids,omitempty"`
// LastPidOrder, when set, ignores SortBy/SortDesc/TextSearch and returns processes in the order
// they were returned in the previous request for this WidgetId (with Gone=true for dead pids).
LastPidOrder bool `json:"lastpidorder,omitempty"`
// KeepAlive, when set, overrides all other fields and simply keeps the backend cache alive (returns nil).
KeepAlive bool `json:"keepalive,omitempty"`
}
type CommandRemoteProcessSignalData struct {