rank_worker.cpp 11.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
#include "rank_worker.hpp"

#include "../models/model_factory.hpp"

#include <iostream>
#include <spdlog/spdlog.h>
#include <stdexcept>

namespace infinilm::engine {

RankWorker::RankWorker(const std::any &model_config,
12
13
                       const distributed::RankInfo &rank_info,
                       const cache::CacheConfig &cache_config)
14
15
16
17
18
19
    : model_config_(model_config),
      rank_info_(rank_info),
      job_cmd_(Command::INIT),
      has_job_(false),
      job_done_(false),
      should_exit_(false),
20
21
      init_done_(false),
      pending_cache_config_(cache_config) {
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
    // start the thread
    thread_ = std::thread(&RankWorker::thread_loop, this);

    // Wait until the worker thread finishes initialization (model created)
    std::unique_lock<std::mutex> lk(mutex_);
    cv_.wait(lk, [&] { return init_done_; });
}

std::string RankWorker::info() const {
    std::stringstream ss;

    ss << "RankWorker{";

    // Rank related
    ss << rank_info_.to_string() << " ";

    // Flags
    ss << "| init_done: " << (init_done_ ? "true" : "false") << " ";
    ss << "| should_exit: " << (should_exit_ ? "true" : "false") << " ";
    ss << "| has_job: " << (has_job_ ? "true" : "false") << " ";
    ss << "| job_done: " << (job_done_ ? "true" : "false") << " ";

    ss << "}";

    return ss.str();
}

//------------------------------------------------------
// load_param -- synchronous (blocks until worker finishes loading)
//------------------------------------------------------
void RankWorker::load_param(const std::string &name,
                            const infinicore::Tensor &param) {
    {
        std::lock_guard<std::mutex> lock(mutex_);
        // If the worker is stopping, don't submit new jobs.
        if (should_exit_) {
            throw std::runtime_error("RankWorker is closing; cannot load_param");
        }

        pending_param_name_ = name;
        pending_param_ = param;

        job_cmd_ = Command::LOAD;
        has_job_ = true;
        job_done_ = false;
    }
    cv_.notify_all();

    // Wait for job completion
    std::unique_lock<std::mutex> lk(mutex_);
    cv_.wait(lk, [&] { return job_done_ || should_exit_; });

    if (should_exit_) {
        throw std::runtime_error("RankWorker stopped while loading parameter");
    }
}

79
80
81
82
83
84
85
//------------------------------------------------------
// state_dict --
//------------------------------------------------------
std::unordered_map<std::string, infinicore::nn::Parameter> RankWorker::state_dict() {
    return this->model_->state_dict();
}

86
//------------------------------------------------------
PanZezhong's avatar
PanZezhong committed
87
// run -- asynchronous
88
89
//------------------------------------------------------
void RankWorker::run(const std::vector<std::any> &args) {
PanZezhong's avatar
PanZezhong committed
90
    std::lock_guard<std::mutex> lock(mutex_);
91

PanZezhong's avatar
PanZezhong committed
92
93
    if (should_exit_) {
        throw std::runtime_error("RankWorker is closing; cannot run");
94
    }
PanZezhong's avatar
PanZezhong committed
95
96
97
98
99
100

    pending_args_ = args;
    job_cmd_ = Command::RUN;
    has_job_ = true;
    job_done_ = false;

101
    cv_.notify_all();
PanZezhong's avatar
PanZezhong committed
102
}
103

PanZezhong's avatar
PanZezhong committed
104
105
106
107
//------------------------------------------------------
// wait -- asynchronous
//------------------------------------------------------
void RankWorker::wait() {
108
109
110
111
    std::unique_lock<std::mutex> lk(mutex_);
    cv_.wait(lk, [&] { return job_done_ || should_exit_; });

    if (should_exit_) {
PanZezhong's avatar
PanZezhong committed
112
        throw std::runtime_error("RankWorker stopped during run");
113
114
115
    }
}

Ceng's avatar
Ceng committed
116
117
118
//------------------------------------------------------
// reset_cache -- synchronous by default, async optional (unstable)
//------------------------------------------------------
119
120
121
122
void RankWorker::reset_cache(size_t pos) {
    std::lock_guard<std::mutex> lock(mutex_);
    if (should_exit_) {
        throw std::runtime_error("RankWorker is closing; cannot reset_cache");
Ceng's avatar
Ceng committed
123
124
    }

125
126
127
128
129
130
    pending_reset_pos_ = pos;
    job_cmd_ = Command::RESET_CACHE;
    has_job_ = true;
    job_done_ = false;
    cv_.notify_all();
}
Ceng's avatar
Ceng committed
131

132
133
134
135
void RankWorker::reset_cache(const cache::CacheConfig &new_config, size_t pos) {
    std::lock_guard<std::mutex> lock(mutex_);
    if (should_exit_) {
        throw std::runtime_error("RankWorker is closing; cannot reset_cache");
Ceng's avatar
Ceng committed
136
    }
137
138
139
140
141
142
143
144

    // Store both the position and the new config
    pending_reset_pos_ = pos;
    pending_cache_config_ = new_config;
    job_cmd_ = Command::RESET_CACHE_WITH_CONFIG;
    has_job_ = true;
    job_done_ = false;
    cv_.notify_all();
Ceng's avatar
Ceng committed
145
146
}

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
//------------------------------------------------------
// close -- request shutdown and join thread
//------------------------------------------------------
void RankWorker::close() {
    {
        std::lock_guard<std::mutex> lock(mutex_);
        should_exit_ = true;
        has_job_ = false; // don't keep old jobs pending
        job_cmd_ = Command::STOP;
    }
    cv_.notify_all();

    if (thread_.joinable()) {
        thread_.join();
    }
}

//------------------------------------------------------
// get_output (thread safe)
//------------------------------------------------------
infinicore::Tensor RankWorker::get_output() {
    std::lock_guard<std::mutex> lock(mutex_);
    return output_;
}

//------------------------------------------------------
// thread_loop
//------------------------------------------------------
void RankWorker::thread_loop() {
    try {
        // Initialize device & model outside of holding the main mutex to avoid blocking callers.
        infinicore::context::setDevice(rank_info_.device);

180
181
        cache_ptr_ = std::make_shared<cache::DynamicCache>(pending_cache_config_);

182
        // Create model using factory (may be expensive)
183
        model_ = InfinilmModelFactory::createModel(model_config_, rank_info_, cache_ptr_);
184
185
186
187
188
189
190
191
192
193
194
195
196
197

        // Signal that initialization is done
        {
            std::lock_guard<std::mutex> lk(mutex_);
            init_done_ = true;
        }
        cv_.notify_all();

        // Main loop: wait for jobs or exit
        while (true) {
            Command local_cmd = Command::INIT;
            std::string local_param_name;
            infinicore::Tensor local_param;
            std::vector<std::any> local_args;
Ceng's avatar
Ceng committed
198
            size_t local_reset_pos = 0;
199
            cache::CacheConfig local_reset_config;
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

            // Wait for a job or exit
            {
                std::unique_lock<std::mutex> lk(mutex_);
                cv_.wait(lk, [&] { return has_job_ || should_exit_; });

                if (should_exit_) {
                    break;
                }

                // capture job data and clear has_job_
                local_cmd = job_cmd_;
                if (local_cmd == Command::LOAD) {
                    local_param_name = pending_param_name_;
                    local_param = pending_param_;
                } else if (local_cmd == Command::RUN) {
                    local_args = pending_args_;
Ceng's avatar
Ceng committed
217
218
                } else if (local_cmd == Command::RESET_CACHE) {
                    local_reset_pos = pending_reset_pos_;
219
220
221
                } else if (local_cmd == Command::RESET_CACHE_WITH_CONFIG) {
                    local_reset_pos = pending_reset_pos_;
                    local_reset_config = pending_cache_config_;
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
                }

                // mark job as being processed
                has_job_ = false;
                job_done_ = false;
            } // unlock mutex while executing the job

            // Execute job outside the lock
            if (local_cmd == Command::LOAD) {
                try {
                    model_->load_parameter(local_param_name, local_param);
                } catch (const std::exception &e) {
                    // convert exceptions to a safe behavior: set should_exit_ and notify caller
                    std::lock_guard<std::mutex> lk(mutex_);
                    should_exit_ = true;
                    job_done_ = true;
                    cv_.notify_all();
                    // rethrow so the thread can be joined and caller sees an error if desired (optional)
                    spdlog::error("[{}] exception during load_parameter_: {}\n", info(), e.what());
                    break;
                }

                // signal completion
                {
                    std::lock_guard<std::mutex> lk(mutex_);
                    job_done_ = true;
                }
                cv_.notify_all();

            } else if (local_cmd == Command::RUN) {
                try {
                    auto out = model_->forward(local_args);

                    {
                        std::lock_guard<std::mutex> lk(mutex_);
                        output_ = std::move(out);
                        job_done_ = true;
                    }
                    cv_.notify_all();

                } catch (const std::exception &e) {
                    std::lock_guard<std::mutex> lk(mutex_);
                    should_exit_ = true;
                    job_done_ = true;
                    cv_.notify_all();
                    spdlog::error("[{}] exception during forward: {}\n", info(), e.what());
                    break;
                }
Ceng's avatar
Ceng committed
270
271
            } else if (local_cmd == Command::RESET_CACHE) {
                try {
272
                    // Option 1: Use model's reset_cache if it handles cache
Ceng's avatar
Ceng committed
273
274
                    model_->reset_cache(local_reset_pos);

275
276
277
278
279
280
                    // Option 2: Reset cache directly if we have access
                    // if (cache_ptr_ != nullptr) {
                    //     auto* dynamic_cache = static_cast<cache::DynamicCache*>(cache_ptr_);
                    //     dynamic_cache->reset(local_reset_pos);
                    // }

Ceng's avatar
Ceng committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
                    {
                        std::lock_guard<std::mutex> lk(mutex_);
                        job_done_ = true;
                    }
                    cv_.notify_all();

                } catch (const std::exception &e) {
                    std::lock_guard<std::mutex> lk(mutex_);
                    should_exit_ = true;
                    job_done_ = true;
                    cv_.notify_all();
                    spdlog::error("[{}] exception during reset_cache: {}\n", info(), e.what());
                    break;
                }
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
            } else if (local_cmd == Command::RESET_CACHE_WITH_CONFIG) {
                try {
                    // Use model's reset_cache with new configuration
                    model_->reset_cache(local_reset_config, local_reset_pos);

                    {
                        std::lock_guard<std::mutex> lk(mutex_);
                        job_done_ = true;
                    }
                    cv_.notify_all();

                } catch (const std::exception &e) {
                    std::lock_guard<std::mutex> lk(mutex_);
                    should_exit_ = true;
                    job_done_ = true;
                    cv_.notify_all();
                    spdlog::error("[{}] exception during reset_cache with config: {}\n", info(), e.what());
                    break;
                }
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
            } else {
                // Shouldn't reach here (no-op)
            }
        } // while
    } catch (const std::exception &e) {
        // Top-level exception: ensure any waiters are woken and the thread exits cleanly.
        {
            std::lock_guard<std::mutex> lk(mutex_);
            should_exit_ = true;
            job_done_ = true;
        }
        cv_.notify_all();
        spdlog::error("[{}] fatal exception in thread_loop: {} \n", info(), e.what());
    }
}

} // namespace infinilm::engine