Skip to content

Commit 102953d

Browse files
committed
refactor(tx): remove rpc servers param
Signed-off-by: thxCode <thxcode0824@gmail.com>
1 parent 1ae97f8 commit 102953d

File tree

3 files changed

+4
-83
lines changed

3 files changed

+4
-83
lines changed

stable-diffusion.cpp

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -233,39 +233,13 @@ class StableDiffusionGGML {
233233
bool vae_on_cpu,
234234
bool diffusion_flash_attn,
235235
bool tae_preview_only,
236-
const std::vector<std::string>& rpc_servers,
237236
const float* tensor_split) {
238237
use_tiny_autoencoder = taesd_path.size() > 0;
239238

240239
ggml_log_set(ggml_log_callback_default, nullptr);
241240

242241
std::vector<ggml_backend_dev_t> devices;
243242

244-
if (!rpc_servers.empty()) {
245-
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
246-
if (!rpc_reg) {
247-
LOG_ERROR("failed to find RPC backend");
248-
return false;
249-
}
250-
251-
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char* endpoint);
252-
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t)ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
253-
if (!ggml_backend_rpc_add_device_fn) {
254-
LOG_ERROR("failed to find RPC device add function");
255-
return false;
256-
}
257-
258-
for (const std::string& server : rpc_servers) {
259-
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
260-
if (dev) {
261-
devices.push_back(dev);
262-
} else {
263-
LOG_ERROR("failed to add RPC device for server '%s'", server.c_str());
264-
return false;
265-
}
266-
}
267-
}
268-
269243
// use all available devices
270244
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
271245
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
@@ -1496,7 +1470,6 @@ sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
14961470
bool keep_vae_on_cpu,
14971471
bool diffusion_flash_attn,
14981472
bool tae_preview_only,
1499-
const char* rpc_servers,
15001473
const float* tensor_splits) {
15011474
sd_ctx_t* sd_ctx = (sd_ctx_t*)malloc(sizeof(sd_ctx_t));
15021475
if (sd_ctx == NULL) {
@@ -1514,17 +1487,6 @@ sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
15141487
std::string id_embd_path(id_embed_dir_c_str);
15151488
std::string lora_model_dir(lora_model_dir_c_str);
15161489
std::vector<std::string> rpc_servers_vec;
1517-
if (rpc_servers != nullptr && rpc_servers[0] != '\0') {
1518-
// split the servers set them into model->rpc_servers
1519-
std::string servers(rpc_servers);
1520-
size_t pos = 0;
1521-
while ((pos = servers.find(',')) != std::string::npos) {
1522-
std::string server = servers.substr(0, pos);
1523-
rpc_servers_vec.push_back(server);
1524-
servers.erase(0, pos + 1);
1525-
}
1526-
rpc_servers_vec.push_back(servers);
1527-
}
15281490

15291491
sd_ctx->sd = new StableDiffusionGGML(n_threads,
15301492
vae_decode_only,
@@ -1554,7 +1516,6 @@ sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
15541516
keep_vae_on_cpu,
15551517
diffusion_flash_attn,
15561518
tae_preview_only,
1557-
rpc_servers_vec,
15581519
tensor_splits)) {
15591520
delete sd_ctx->sd;
15601521
sd_ctx->sd = NULL;

stable-diffusion.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -168,8 +168,7 @@ SD_API sd_ctx_t* new_sd_ctx(const char* model_path,
168168
bool keep_vae_on_cpu,
169169
bool diffusion_flash_attn,
170170
bool tae_preview_only,
171-
const char * rpc_servers = nullptr,
172-
const float * tensor_splits = nullptr);
171+
const float* tensor_splits = nullptr);
173172

174173
SD_API void free_sd_ctx(sd_ctx_t* sd_ctx);
175174

@@ -312,8 +311,7 @@ typedef struct upscaler_ctx_t upscaler_ctx_t;
312311

313312
SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path,
314313
int n_threads,
315-
const char * rpc_servers = nullptr,
316-
const float * tensor_splits = nullptr);
314+
const float* tensor_splits = nullptr);
317315
SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx);
318316

319317
SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor);

upscaler.cpp

Lines changed: 2 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -14,36 +14,11 @@ struct UpscalerGGML {
1414
: n_threads(n_threads) {
1515
}
1616

17-
bool load_from_file(const std::string& esrgan_path, const std::vector<std::string>& rpc_servers, const float* tensor_split) {
17+
bool load_from_file(const std::string& esrgan_path, const float* tensor_split) {
1818
ggml_log_set(ggml_log_callback_default, nullptr);
1919

2020
std::vector<ggml_backend_dev_t> devices;
2121

22-
if (!rpc_servers.empty()) {
23-
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
24-
if (!rpc_reg) {
25-
LOG_ERROR("failed to find RPC backend");
26-
return false;
27-
}
28-
29-
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char* endpoint);
30-
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t)ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
31-
if (!ggml_backend_rpc_add_device_fn) {
32-
LOG_ERROR("failed to find RPC device add function");
33-
return false;
34-
}
35-
36-
for (const std::string& server : rpc_servers) {
37-
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
38-
if (dev) {
39-
devices.push_back(dev);
40-
} else {
41-
LOG_ERROR("failed to add RPC device for server '%s'", server.c_str());
42-
return false;
43-
}
44-
}
45-
}
46-
4722
// use all available devices
4823
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
4924
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
@@ -154,32 +129,19 @@ struct upscaler_ctx_t {
154129

155130
upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
156131
int n_threads,
157-
const char* rpc_servers,
158132
const float* tensor_splits) {
159133
upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t));
160134
if (upscaler_ctx == NULL) {
161135
return NULL;
162136
}
163137
std::string esrgan_path(esrgan_path_c_str);
164-
std::vector<std::string> rpc_servers_vec;
165-
if (rpc_servers != nullptr && rpc_servers[0] != '\0') {
166-
// split the servers set them into model->rpc_servers
167-
std::string servers(rpc_servers);
168-
size_t pos = 0;
169-
while ((pos = servers.find(',')) != std::string::npos) {
170-
std::string server = servers.substr(0, pos);
171-
rpc_servers_vec.push_back(server);
172-
servers.erase(0, pos + 1);
173-
}
174-
rpc_servers_vec.push_back(servers);
175-
}
176138

177139
upscaler_ctx->upscaler = new UpscalerGGML(n_threads);
178140
if (upscaler_ctx->upscaler == NULL) {
179141
return NULL;
180142
}
181143

182-
if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, rpc_servers_vec, tensor_splits)) {
144+
if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, tensor_splits)) {
183145
delete upscaler_ctx->upscaler;
184146
upscaler_ctx->upscaler = NULL;
185147
free(upscaler_ctx);

0 commit comments

Comments
 (0)