Commit d5f04510 authored by liming6's avatar liming6
Browse files

feature 临时提交

parent ee110e57
......@@ -8,6 +8,5 @@
- 多goroutine收集数据,收集数据的goroutine不阻塞tui相关goroutine的运行
尝试使用.so文件抓取系统信息,而非使用二进制文件的输出,以提高采集效率
提高采集频率
集成docker相关功能,同时注意错误隔断,避免无用错误影响主逻辑(docker容器重启会影响获取docker容器相关信息,会返回err)
package backend
import (
"testing"
"time"
)
func TestDcoker(t *testing.T) {
m := DockerProcessMap{}
start := time.Now
err := m.Update()
d := time.Since(start())
if err != nil {
t.Error(err)
}
t.Log(d.Nanoseconds(), "ns")
info, l := m.Get()
defer l.Unlock()
for k, v := range info {
t.Logf("%d: %+v", k, v)
}
}
func TestUpdateQuickInfo(t *testing.T) {
err := Init()
if err != nil {
t.Error(err)
}
defer Shutdown()
start := time.Now()
err = DCUSInfoMap.UpdateQuickInfo()
du := time.Since(start)
if err != nil {
t.Error(err)
}
t.Log(du.Milliseconds(), "ms")
info, lock := DCUSInfoMap.GetQuitInfo()
defer lock.Unlock()
for k, v := range info {
t.Logf("%d: %+v", k, v)
}
}
func BenchmarkUpdateQuickInfo(b *testing.B) {
err := Init()
if err != nil {
b.Error(err)
}
defer Shutdown()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = DCUSInfoMap.UpdateQuickInfo()
if err != nil {
b.Error(err)
}
}
}
func TestUpdateSlowInfo(t *testing.T) {
err := Init()
if err != nil {
t.Error(err)
}
defer Shutdown()
start := time.Now()
err = DCUSInfoMap.UpdateSlowInfo()
du := time.Since(start)
if err != nil {
t.Error(err)
}
t.Log(du.Milliseconds(), "ms")
info, lock := DCUSInfoMap.GetSlowInfo()
defer lock.Unlock()
for k, v := range info {
t.Logf("%d: %+v", k, v)
}
}
func BenchmarkUpdateSlowInfo(b *testing.B) {
err := Init()
if err != nil {
b.Error(err)
}
defer Shutdown()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = DCUSInfoMap.UpdateSlowInfo()
if err != nil {
b.Error(err)
}
}
}
package backend
import (
"context"
"fmt"
"get-container/docker"
"get-container/cmd/hytop/lib"
"get-container/gpu"
"get-container/utils"
"maps"
"math"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/shirou/gopsutil/v4/process"
)
/*
backend包为tui包提供数据
*/
var (
MapIdDCU = sync.Map{} // 记录dcu信息
ContainerInfo *docker.ContainersInfo
DCUSInfoMap *DCUInfoMap = nil // 记录dcu信息
DockerPidInfo *DockerProcessMap = nil
User = ""
HostName = ""
Rocmlib = lib.Rocmlib_instance
DriverVersion = ""
stopCtx context.Context
cancelFunc context.CancelFunc
)
func init() {
ContainerInfo = docker.NewContainersInfo()
ContainerInfo.Update()
i, err := docker.ContainerInfo.GetProcessIdInDocker(false)
DockerPidInfo = &DockerProcessMap{lock: sync.RWMutex{}, pids: make(map[int32]bool)}
if err == nil && i != nil {
for _, v := range i {
for _, pidInfo := range v {
DockerPidInfo.pids[pidInfo.Pid] = true
}
}
}
const (
ENVUser = "USER"
)
// Init 初始化本包的数据
func Init() error {
HostName, _ = os.Hostname()
uid := os.Getuid()
u, err := utils.GetSysUserById(uid)
if err == nil && u != nil {
User = u.Name
} else {
username, seted := os.LookupEnv(ENVUser)
if !seted {
User = strconv.Itoa(uid)
} else {
User = username
}
DCUSInfoMap = &DCUInfoMap{}
DCUSInfoMap.qinfo = make(map[int]*DCUQuickInfo)
DCUSInfoMap.sinfo = make(map[int]*DCUSlowInfo)
Rocmlib = lib.GetRocmlib()
_, err := Rocmlib.Init()
if err != nil {
return err
}
DriverVersion, err = Rocmlib.GetSystemDriverVersion()
if err != nil {
return err
}
_, err = Rocmlib.GetDevNumber()
DockerPidInfo = &DockerProcessMap{}
DockerPidInfo.Update()
if err == nil {
stopCtx, cancelFunc = context.WithCancel(context.Background())
go func() {
ticker := time.NewTicker(time.Second * 20)
for {
select {
case <-ticker.C:
DockerPidInfo.Update()
DCUSInfoMap.UpdateSlowInfo()
case <-stopCtx.Done():
return
}
}
}()
}
return err
}
func Shutdown() {
Rocmlib.Shutdown()
cancelFunc()
}
type DCUQuickInfo struct {
lock sync.RWMutex
Id int //
Name string //
PerformanceLevel string //
Fan string //
Temp float32 //
PwrAvg float32 //
PwrCap float32 //
BusId string //
MemTotal uint64 //
MemUsed uint64 //
MemUsedPerent float32 //
DCUUTil float32 //
}
type DCUSlowInfo struct {
Id atomic.Int32
Ecc atomic.Bool
PwrMode atomic.Value // string
Mig atomic.Bool
}
type DCUInfo struct {
Id int
Name string // full
PerformanceLevel string
Fan string // full
Temp float32
PwrAvg float32
PwrCap float32
BusId string // full
MemTotal int // full
MemUsed int // full
MemUsedPerent float32
Mig bool // full
DCUUTil float32
Ecc bool // full
PwrMode string
SlowInfo DCUSlowInfo
QuickInfo DCUQuickInfo
}
// UpdateDCUInfo 更新dcu信息,full表示是否全部更新
func UpdateDCUInfo(full bool) {
wg := sync.WaitGroup{}
var smiAll map[int]*gpu.SMIAllOutput
var eccInfo, migInfo map[int]bool
var memInfo map[int]gpu.DCUMemInfo
var runInfo map[int]gpu.DCURunningInfo
var errSmiAll, errEcc, errMem, errRun error
if full {
wg.Add(5)
go func() {
smiAll, errSmiAll = gpu.GetSMIAllOutput()
wg.Done()
}()
go func() {
eccInfo, errEcc = gpu.GetEccInfo()
wg.Done()
}()
} else {
wg.Add(3)
type DCUInfoMap struct {
qinfolock sync.RWMutex
qinfo map[int]*DCUQuickInfo
sinfolock sync.RWMutex
sinfo map[int]*DCUSlowInfo
}
func (m *DCUInfoMap) UpdateQuickInfo() error {
num, err := Rocmlib.GetDevNumber()
if err != nil {
return err
}
go func() {
migInfo = gpu.GetMigInfo()
wg.Done()
}()
go func() {
memInfo, errMem = gpu.GetDCUMemInfo()
wg.Done()
}()
go func() {
runInfo, errRun = gpu.GetRunningInfo()
wg.Done()
}()
wg.Wait()
cache := make(map[int]DCUInfo)
if errSmiAll == nil && smiAll != nil {
for k, v := range smiAll {
i, have := cache[k]
if !have {
i = DCUInfo{}
i.Id = k
}
i.Name = v.CardSeries
i.PerformanceLevel = v.PerLevel
i.Fan = gpu.NA
i.PwrCap = v.MaxPwr
i.PwrAvg = v.AvgPwr
i.BusId = v.PCIBus
i.MemUsedPerent = v.HCUMemUsage
i.DCUUTil = v.HCUUsage
cache[k] = i
}
names, err := Rocmlib.GetDevName()
if err != nil {
return err
}
if errEcc == nil && eccInfo != nil {
for k, v := range eccInfo {
i, have := cache[k]
if !have {
i = DCUInfo{}
i.Id = k
}
i.Ecc = v
cache[k] = i
}
plevel, err := Rocmlib.GetPerfLevel()
if err != nil {
return err
}
fan, err := Rocmlib.GetFanSpeed()
if err != nil {
return err
}
temp, err := Rocmlib.GetTemp()
if err != nil {
return err
}
pwrAvg, err := Rocmlib.GetPowerAvg()
if err != nil {
return err
}
for k, v := range migInfo {
i, have := cache[k]
pwrCap, err := Rocmlib.GetPowerCap()
if err != nil {
return err
}
busid, err := Rocmlib.GetPCIBusId()
if err != nil {
return err
}
memTotal, err := Rocmlib.GetMemTotal()
if err != nil {
return err
}
memUsed, err := Rocmlib.GetMemUsed()
if err != nil {
return err
}
dcu, err := Rocmlib.GetBusyPercent()
if err != nil {
return err
}
set := make(map[int]bool)
m.qinfolock.Lock()
defer m.qinfolock.Unlock()
for i := range num {
qinfo, have := m.qinfo[i]
if !have {
i = DCUInfo{}
i.Id = k
qinfo = &DCUQuickInfo{}
m.qinfo[i] = qinfo
}
i.Mig = v
cache[k] = i
}
if memInfo != nil && errMem == nil {
for k, v := range memInfo {
i, have := cache[k]
if !have {
i = DCUInfo{}
i.Id = k
}
i.MemTotal = int(v.Total.Num)
i.MemUsed = int(v.Used.Num)
cache[k] = i
qinfo.lock.Lock()
qinfo.Id = i
qinfo.Name = names[i]
qinfo.PerformanceLevel = plevel[i]
if rpm, have := fan[i]; !have || rpm == 0 {
qinfo.Fan = "N/A"
} else {
qinfo.Fan = strconv.Itoa(int(rpm))
}
}
if errRun == nil && runInfo != nil {
for k, v := range runInfo {
i, have := cache[k]
if !have {
i = DCUInfo{}
i.Id = k
}
i.Temp = v.Temp
i.PwrAvg = v.AvgPower
i.PerformanceLevel = v.PerformanceLevel
i.MemUsedPerent = v.MemPerc
i.DCUUTil = v.HCUPerc
i.PwrMode = v.Mode
cache[k] = i
qinfo.Temp = float32(temp[i]) / 1000
qinfo.PwrAvg = float32(pwrAvg[i]) / 1000000
qinfo.PwrCap = float32(pwrCap[i]) / 1000000
qinfo.BusId = busid[i]
qinfo.MemTotal = memTotal[i]
qinfo.MemUsed = memUsed[i]
if qinfo.MemTotal == 0 {
qinfo.MemUsedPerent = 0
} else {
qinfo.MemUsedPerent = float32(qinfo.MemUsed) / float32(qinfo.MemTotal) * 100
}
qinfo.DCUUTil = float32(dcu[i])
qinfo.lock.Unlock()
set[i] = true
}
for k, v := range cache {
old, have := MapIdDCU.LoadOrStore(k, &v)
if !have {
for k := range m.qinfo {
a, b := set[k]
if a && b {
continue
}
d := old.(*DCUInfo)
if full {
d.Name = v.Name
d.PerformanceLevel = v.PerformanceLevel
d.Fan = v.Fan
d.PwrCap = v.PwrCap
d.BusId = v.BusId
d.Mig = v.Mig
d.Ecc = v.Ecc
}
d.Temp = v.Temp
d.PwrAvg = v.PwrAvg
d.MemTotal = v.MemTotal
d.MemUsed = v.MemUsed
d.MemUsedPerent = v.MemUsedPerent
d.DCUUTil = v.DCUUTil
d.PwrMode = v.PwrMode
delete(m.qinfo, k)
}
return nil
}
func GetDCUInfo() map[int]DCUInfo {
result := make(map[int]DCUInfo)
MapIdDCU.Range(func(key, value any) bool {
id := key.(int)
val := value.(*DCUInfo)
result[id] = *val
return true
})
return result
}
type DockerProcessMap struct {
lock sync.RWMutex
pids map[int32]bool
func (m *DCUInfoMap) UpdateSlowInfo() error {
num, err := Rocmlib.GetDevNumber()
if err != nil {
return err
}
ecc, err := gpu.GetEccInfo()
if err != nil {
return err
}
rinfo, err := gpu.GetRunningInfo()
if err != nil {
return err
}
set := make(map[int]bool)
m.sinfolock.Lock()
defer m.sinfolock.Unlock()
for i := range num {
sinfo, have := m.sinfo[i]
if !have {
sinfo = &DCUSlowInfo{}
m.sinfo[i] = sinfo
}
sinfo.Id.Store(int32(i))
sinfo.Mig.Store(false)
if r, have := rinfo[i]; have {
sinfo.PwrMode.Store(r.Mode)
} else {
sinfo.PwrMode.Store("Normal")
}
e, have := ecc[i]
if have {
sinfo.Ecc.Store(e)
} else {
sinfo.Ecc.Store(false)
}
set[i] = true
}
for k := range m.sinfo {
a, b := set[k]
if a && b {
continue
}
delete(m.sinfo, k)
}
return nil
}
func (dpm *DockerProcessMap) GetPidInfo() map[int32]bool {
rl := dpm.lock.RLocker()
// GetSlowInfo 获取慢更新信息,读取完一定要释放锁
func (m *DCUInfoMap) GetSlowInfo() (map[int]*DCUSlowInfo, sync.Locker) {
rl := m.sinfolock.RLocker()
rl.Lock()
defer rl.Unlock()
return maps.Clone(dpm.pids)
return m.sinfo, rl
}
func (dpm *DockerProcessMap) Update(dinfo *docker.ContainersInfo) (map[int32]bool, sync.Locker) {
dpm.lock.Lock()
clear(dpm.pids)
dpm.lock.Unlock()
rl := dpm.lock.RLocker()
i, err := dinfo.GetProcessIdInDocker(false)
if err != nil || i == nil {
return dpm.pids, rl
}
for _, v := range i {
for _, pidInfo := range v {
DockerPidInfo.pids[pidInfo.Pid] = true
}
}
return dpm.pids, rl
// GetQuitInfo 获取快更新信息,读取完一定要释放锁
func (m *DCUInfoMap) GetQuitInfo() (map[int]*DCUQuickInfo, sync.Locker) {
rl := m.qinfolock.RLocker()
rl.Lock()
return m.qinfo, rl
}
type DCUProcessInfo struct {
......@@ -248,7 +282,7 @@ type ProcessInfo struct {
Mem float32 // 内存使用率
Time string // 占用的CPU时间
Cmd string // 命令
InDocker bool // 是否在docker容器里
ContInfo *ContainerInfo
}
func getProcessInfo(pids []int32) map[int32]ProcessInfo {
......@@ -256,7 +290,8 @@ func getProcessInfo(pids []int32) map[int32]ProcessInfo {
if len(pids) == 0 {
return result
}
dockerPids := DockerPidInfo.GetPidInfo()
dockerInfo, lock := DockerPidInfo.Get()
defer lock.Unlock()
for _, pid := range pids {
p, err := process.NewProcess(int32(pid))
if err != nil {
......@@ -271,33 +306,33 @@ func getProcessInfo(pids []int32) map[int32]ProcessInfo {
item.Time = durationStr(time.Duration((t.System + t.User)) * time.Second)
}
item.Cmd, _ = p.Cmdline()
a, b := dockerPids[item.Pid]
item.InDocker = a && b
d, have := dockerInfo[item.Pid]
if have {
item.ContInfo = d
}
result[p.Pid] = item
}
return result
}
// GetDCUProcessInfo 返回值的key为dcu index
func GetDCUProcessInfo() map[int][]DCUProcessInfo {
func (m *DCUInfoMap) GetDCUProcessInfo() map[int][]DCUProcessInfo {
result := make(map[int][]DCUProcessInfo)
info, err := gpu.GetDCUPidInfo()
mem := utils.MemorySize{}
mem.Unit = utils.Byte
info, err := Rocmlib.GetProcessInfo()
if err != nil {
return result
}
pids := make([]int32, 0)
for _, v := range info {
pids = append(pids, v.Pid)
pids = append(pids, int32(v.Pid))
}
pinfo := getProcessInfo(pids)
for _, v := range info {
index := make([]int, 0)
for _, i := range v.HCUIndex {
ii, err := strconv.Atoi(i)
if err != nil {
continue
}
index = append(index, ii)
for _, i := range v.UsedGPUIndex {
index = append(index, int(i))
}
for _, i := range index {
l, have := result[i]
......@@ -306,9 +341,10 @@ func GetDCUProcessInfo() map[int][]DCUProcessInfo {
l = result[i]
}
item := DCUProcessInfo{DCU: i}
item.Info = pinfo[v.Pid]
item.DCUMem = v.VRamUsed.HumanReadStr(1)
item.SDMA = v.SDMAUsed
item.Info = pinfo[int32(v.Pid)]
mem.Num = v.VarmUsage
item.DCUMem = mem.HumanReadStr(1)
item.SDMA = int(v.SdmaUsage)
l = append(l, item)
result[i] = l
}
......
package backend
import (
"context"
"strconv"
"strings"
"sync"
"github.com/moby/moby/client"
)
type ContainerInfo struct {
Name string
Id string
}
type DockerProcessMap struct {
lock sync.RWMutex
pids map[int32]*ContainerInfo
}
func (m *DockerProcessMap) Update() error {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return err
}
defer cli.Close()
cs, err := cli.ContainerList(context.Background(), client.ContainerListOptions{All: false})
if err != nil {
return err
}
if m.pids == nil {
m.pids = make(map[int32]*ContainerInfo)
}
m.lock.Lock()
defer m.lock.Unlock()
clear(m.pids)
for _, v := range cs {
top, err := cli.ContainerTop(context.Background(), v.ID, []string{"-o", "pid"})
if err != nil {
continue
}
cinfo := &ContainerInfo{Name: strings.Trim(v.Names[0], "/"), Id: v.ID}
for _, pid := range top.Processes {
if len(pid) == 0 {
continue
}
p, err := strconv.ParseInt(pid[0], 10, 32)
if err != nil {
continue
}
m.pids[int32(p)] = cinfo
}
}
return nil
}
func (m *DockerProcessMap) Get() (map[int32]*ContainerInfo, sync.Locker) {
rl := m.lock.RLocker()
rl.Lock()
return m.pids, rl
}
package backend
import (
"testing"
"time"
)
func TestUpdateDCUInfo(t *testing.T) {
for i := 0; i < 10; i++ {
start := time.Now()
UpdateDCUInfo(false)
end := time.Now()
t.Logf("%d ms", end.Sub(start).Milliseconds())
}
}
func TestGetDCUInfo(t *testing.T) {
UpdateDCUInfo(true)
info := GetDCUInfo()
t.Logf("%+v", info)
for i := range 10 {
time.Sleep(time.Second)
start := time.Now()
UpdateDCUInfo(false)
info = GetDCUInfo()
tt := time.Since(start).Milliseconds()
t.Logf("%d|%d : %+v", i, tt, info)
}
}
......@@ -154,7 +154,7 @@ func TestRocmlib(t *testing.T) {
if err != nil {
t.Error(err)
}
t.Logf("%+v", busy)
t.Logf("busy: %+v", busy)
v, err := rlib.GetSystemDriverVersion()
if err != nil {
......
......@@ -3,12 +3,13 @@ package lib
import (
"errors"
"get-container/utils"
"slices"
"sync/atomic"
)
var (
rocmlib_flag = atomic.Int32{}
rocmlib_instance *rocmlib = nil
Rocmlib_instance *rocmlib = nil
ErrNotGetDevNum = errors.New("not get dev num yet")
ErrNotInit = errors.New("not init rocm lib yet")
......@@ -22,7 +23,7 @@ type rocmlib struct {
func GetRocmlib() *rocmlib {
if rocmlib_flag.Load() == 1 {
return rocmlib_instance
return Rocmlib_instance
} else {
result := &rocmlib{
status: atomic.Int32{},
......@@ -30,6 +31,7 @@ func GetRocmlib() *rocmlib {
}
result.status.Store(0)
rocmlib_flag.Store(1)
Rocmlib_instance = result
return result
}
}
......@@ -350,5 +352,8 @@ func (r *rocmlib) GetProcessInfo() ([]RSMIProcessInfo, error) {
}
result[i].UsedGPUIndex = indexs
}
return result, nil
s := slices.DeleteFunc(result, func(info RSMIProcessInfo) bool {
return len(info.UsedGPUIndex) == 0
})
return s, nil
}
package main
import (
"get-container/cmd/hytop/backend"
"get-container/cmd/hytop/tui"
"log"
"os"
......@@ -14,9 +15,15 @@ func main() {
if err != nil {
log.Fatalf("error get terminal size: %v", err)
}
err = backend.Init()
if err != nil {
log.Fatalf("error init data backend: %v", err)
}
model := tui.NewModelMain(w, h)
if _, err := tea.NewProgram(&model, tea.WithAltScreen()).Run(); err != nil {
log.Fatalf("error create program; %v", err)
}
backend.Shutdown()
os.Exit(0)
}
......@@ -15,11 +15,10 @@ import (
)
type ModelDCUInfo struct {
DCUNum int // dcu数量
width, height int // 终端的尺寸
info map[int]backend.DCUInfo // dcu相关信息
pro progress.Model // 进度条
proWidth int // 进度条宽度
parent *ModelMain
info map[int]backend.DCUInfo // dcu相关信息
pro progress.Model // 进度条
proWidth int // 进度条宽度
}
const (
......@@ -36,13 +35,20 @@ var (
ReDCUName = regexp.MustCompile(`(?i)^[A-Z0-9-_]*`)
)
func NewModelDCUInfo(m *ModelMain) *ModelDCUInfo {
return &ModelDCUInfo{
parent: m,
info: make(map[int]backend.DCUInfo),
}
}
func (m *ModelDCUInfo) Init() tea.Cmd {
if m.width < StaticWidth+ProgressMinWidth {
if m.parent.width < StaticWidth+ProgressMinWidth+OtherWidth {
return tea.Quit
}
m.proWidth = ProgressMinWidth
if m.width > StaticWidth+ProgressMinWidth+OtherWidth {
m.proWidth = m.width - OtherWidth - StaticWidth
if m.parent.width > StaticWidth+ProgressMinWidth+OtherWidth {
m.proWidth = m.parent.width - OtherWidth - StaticWidth
}
m.pro = progress.New(progress.WithColorProfile(termenv.TrueColor), progress.WithGradient("#0000ffff", "#ff0000ff"), progress.WithWidth(m.proWidth))
return nil
......
......@@ -10,12 +10,19 @@ import (
)
type ModelHeader struct {
parent *ModelMain
t time.Time
DCUTopVersion string
SMIVersion string
DriverVersion string
}
func NewModelHeader(m *ModelMain) *ModelHeader {
return &ModelHeader{
parent: m,
}
}
func (mh *ModelHeader) Init() tea.Cmd {
mh.t = time.Now()
return nil
......@@ -51,7 +58,6 @@ var (
func (mh *ModelHeader) View() string {
header := fmt.Sprintf("%s%s(Press %s for help or %s for quit)\n", mh.t.Format("2006-01-02 15:04:05"), Space, KeyH, KeyQ)
style := lipgloss.NewStyle().Padding(0, 1)
hyv := style.Width(18).Render(fmt.Sprintf("hytop: %s", mh.DCUTopVersion))
drv := style.Width(35).Render(fmt.Sprintf("Driver Version: %s", mh.DriverVersion))
smiv := style.Width(24).Render(fmt.Sprintf("SMI Version: %s", mh.SMIVersion))
......
......@@ -16,13 +16,12 @@ const (
// ModelMsg 模型信息,在父组件和各个子组件间共享信息
type ModelMsg struct {
t time.Time // 当前时间
index uint64 // update次数
Version *gpu.HYVersionInfo // gpu版本相关信息
MyVersion string
DCUInfo map[int]backend.DCUInfo // DCU全量信息
t time.Time // 当前时间
Version *gpu.HYVersionInfo // gpu版本相关信息
DCUPidInfo map[int][]backend.DCUProcessInfo // 使用dcu的进程信息
systemInfo *utils.SysInfo // 系统信息
MyVersion string // 本软件的版本
DCUInfo *backend.DCUInfoMap // dcu相关信息
}
type TickMsg time.Time
......@@ -34,16 +33,15 @@ type ModelMain struct {
DCUInfo *ModelDCUInfo
SysLoad *ModelSysLoad
ProcessInfo *ModelProcessInfo
index uint64 // 记录update次数的值
modelMsg *ModelMsg // 记录模型信息
modelMsg *ModelMsg
}
func NewModelMain(width, height int) ModelMain {
result := ModelMain{}
result.width = width
result.height = height
result.Header = &ModelHeader{}
result.DCUInfo = &ModelDCUInfo{width: width, height: height, info: make(map[int]backend.DCUInfo)}
result.Header = NewModelHeader(&result)
result.DCUInfo = NewModelDCUInfo(&result)
result.SysLoad = NewModelSysLoad(width)
result.ProcessInfo = NewModelProcessInfo(width)
return result
......@@ -71,11 +69,9 @@ func (m *ModelMain) Init() tea.Cmd {
if c := m.Header.Init(); c != nil {
cmds = append(cmds, c)
}
m.DCUInfo.DCUNum = len(modelMsg.DCUInfo)
if c := m.DCUInfo.Init(); c != nil {
cmds = append(cmds, c)
}
if c := m.SysLoad.Init(); c != nil {
cmds = append(cmds, c)
}
......@@ -96,8 +92,7 @@ func (m *ModelMain) Update(inputMsg tea.Msg) (tea.Model, tea.Cmd) {
return m, tea.Quit
}
case TickMsg: // 定时事件
m.index++
updateModelInfo(m.modelMsg, m.index, time.Time(msg))
updateModelInfo(m.modelMsg, time.Time(msg))
header, _ := m.Header.Update(m.modelMsg)
dcuInfo, _ := m.DCUInfo.Update(m.modelMsg)
sysLoad, _ := m.SysLoad.Update(m.modelMsg)
......@@ -117,6 +112,9 @@ func (m *ModelMain) Update(inputMsg tea.Msg) (tea.Model, tea.Cmd) {
m.SysLoad = sysLoad.(*ModelSysLoad)
m.ProcessInfo = pidinfo.(*ModelProcessInfo)
return m, nil
case tea.WindowSizeMsg:
m.width, m.height = msg.Width, msg.Height
return m, nil
}
return m, nil
}
......@@ -142,34 +140,22 @@ var myBorder = lipgloss.Border{
}
func initModelInfo(model *ModelMsg) error {
model.MyVersion = DCUTopVersion
model.index = 0
model.t = time.Now()
ver, err := gpu.GetHYVersionInfo()
if err != nil {
model.MyVersion = DCUTopVersion
if ver, err := gpu.GetHYVersionInfo(); err != nil {
return err
}
model.Version = ver
if model.index%20 == 0 {
backend.UpdateDCUInfo(true)
} else {
backend.UpdateDCUInfo(false)
model.Version = ver
}
model.DCUInfo = backend.GetDCUInfo()
model.systemInfo, err = utils.GetSysInfo()
model.DCUPidInfo = backend.GetDCUProcessInfo()
return err
}
// updateModelInfo 更新模型信息
func updateModelInfo(modelMsg *ModelMsg, index uint64, t time.Time) {
modelMsg.index = index
func updateModelInfo(modelMsg *ModelMsg, t time.Time) {
modelMsg.t = t
if modelMsg.index%60 == 0 {
backend.UpdateDCUInfo(true)
} else {
backend.UpdateDCUInfo(false)
}
modelMsg.DCUInfo = backend.GetDCUInfo()
modelMsg.systemInfo, _ = utils.GetSysInfo()
modelMsg.DCUPidInfo = backend.GetDCUProcessInfo()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment