Skip to content

Commit ec82d52

Browse files
committed
refector: remove some useless code
1 parent afea457 commit ec82d52

10 files changed

+23
-126
lines changed

clip.hpp

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -939,22 +939,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public GGMLModule {
939939
return "clip";
940940
}
941941

942-
size_t get_params_mem_size() {
943-
size_t params_mem_size = text_model.get_params_mem_size();
944-
if (version == VERSION_XL) {
945-
params_mem_size += text_model2.get_params_mem_size();
946-
}
947-
return params_mem_size;
948-
}
949-
950-
size_t get_params_num() {
951-
size_t params_num = text_model.get_params_num();
952-
if (version == VERSION_XL) {
953-
params_num += text_model2.get_params_num();
954-
}
955-
return params_num;
956-
}
957-
958942
void set_clip_skip(int clip_skip) {
959943
text_model.set_clip_skip(clip_skip);
960944
if (version == VERSION_XL) {
@@ -1385,14 +1369,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLModule {
13851369
return "clip_vision";
13861370
}
13871371

1388-
size_t get_params_mem_size() {
1389-
return vision_model.get_params_mem_size();
1390-
}
1391-
1392-
size_t get_params_num() {
1393-
return vision_model.get_params_num();
1394-
}
1395-
13961372
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
13971373
vision_model.get_param_tensors(tensors, prefix + "transformer");
13981374
}

control.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -369,14 +369,6 @@ struct ControlNet : public GGMLModule {
369369
return "control_net";
370370
}
371371

372-
size_t get_params_mem_size() {
373-
return control_net.get_params_mem_size();
374-
}
375-
376-
size_t get_params_num() {
377-
return control_net.get_params_num();
378-
}
379-
380372
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
381373
control_net.get_param_tensors(tensors, prefix);
382374
}

esrgan.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -152,14 +152,6 @@ struct ESRGAN : public GGMLModule {
152152
return "esrgan";
153153
}
154154

155-
size_t get_params_mem_size() {
156-
return rrdb_net.get_params_mem_size();
157-
}
158-
159-
size_t get_params_num() {
160-
return rrdb_net.get_params_num();
161-
}
162-
163155
bool load_from_file(const std::string& file_path) {
164156
LOG_INFO("loading esrgan from '%s'", file_path.c_str());
165157

ggml_extend.hpp

Lines changed: 17 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -752,18 +752,15 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_timestep_embedding(
752752
return ggml_timestep_embedding(ctx, timesteps, dim, max_period);
753753
}
754754

755-
// struct GGMLComputeGraph {
756-
// virtual void init(struct ggml_context* ctx, ggml_type wtype) = 0;
757-
// virtual std::string get_desc() = 0;
758-
// virtual size_t get_params_mem_size() = 0;
759-
// virtual size_t get_params_num() = 0;
760-
// virtual struct ggml_cgraph* get_ggml_cgraph() = 0;
761-
// };
762-
763-
/*
764-
#define MAX_PARAMS_TENSOR_NUM 10240
765-
#define MAX_GRAPH_SIZE 10240
766-
*/
755+
756+
__STATIC_INLINE__ size_t ggml_tensor_num(ggml_context * ctx) {
757+
size_t num = 0;
758+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
759+
num++;
760+
}
761+
return num;
762+
}
763+
767764
/* SDXL with LoRA requires more space */
768765
#define MAX_PARAMS_TENSOR_NUM 15360
769766
#define MAX_GRAPH_SIZE 15360
@@ -854,8 +851,6 @@ struct GGMLModule {
854851
}
855852

856853
public:
857-
virtual size_t get_params_mem_size() = 0;
858-
virtual size_t get_params_num() = 0;
859854
virtual std::string get_desc() = 0;
860855

861856
GGMLModule(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32)
@@ -876,7 +871,7 @@ struct GGMLModule {
876871
}
877872

878873
bool alloc_params_buffer() {
879-
size_t num_tensors = get_params_num();
874+
size_t num_tensors = ggml_tensor_num(params_ctx);
880875
params_buffer = ggml_backend_alloc_ctx_tensors(params_ctx, backend);
881876
if (params_buffer == NULL) {
882877
LOG_ERROR("%s alloc params backend buffer failed", get_desc().c_str());
@@ -898,6 +893,13 @@ struct GGMLModule {
898893
}
899894
}
900895

896+
size_t get_params_buffer_size() {
897+
if (params_buffer != NULL) {
898+
return ggml_backend_buffer_get_size(params_buffer);
899+
}
900+
return 0;
901+
}
902+
901903
void free_compute_buffer() {
902904
if (compute_allocr != NULL) {
903905
ggml_gallocr_free(compute_allocr);
@@ -968,19 +970,6 @@ struct GGMLModule {
968970
};
969971

970972
class GGMLBlock {
971-
private:
972-
static char temp_buffer[1024 * 1024 * 10];
973-
ggml_context* get_temp_ctx() {
974-
struct ggml_init_params params;
975-
params.mem_size = sizeof(temp_buffer);
976-
params.mem_buffer = temp_buffer;
977-
params.no_alloc = true;
978-
979-
ggml_context* temp_ctx = ggml_init(params);
980-
GGML_ASSERT(temp_ctx != NULL);
981-
return temp_ctx;
982-
}
983-
984973
protected:
985974
typedef std::unordered_map<std::string, struct ggml_tensor*> ParameterMap;
986975
typedef std::unordered_map<std::string, std::shared_ptr<GGMLBlock>> GGMLBlockMap;
@@ -1003,14 +992,6 @@ class GGMLBlock {
1003992
init_params(ctx, wtype);
1004993
}
1005994

1006-
std::tuple<size_t, size_t> get_params_info(ggml_type wtype) {
1007-
ggml_context* temp_ctx = get_temp_ctx();
1008-
init(temp_ctx, wtype);
1009-
size_t num_tensors = get_params_num();
1010-
size_t mem_size = get_params_mem_size();
1011-
return {num_tensors, mem_size};
1012-
}
1013-
1014995
size_t get_params_num() {
1015996
size_t num_tensors = params.size();
1016997
for (auto& pair : blocks) {

lora.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,6 @@ struct LoraModel : public GGMLModule {
2727
return "lora";
2828
}
2929

30-
size_t get_params_num() {
31-
return LORA_GRAPH_SIZE;
32-
}
33-
34-
size_t get_params_mem_size() {
35-
return model_loader.get_params_mem_size(NULL);
36-
}
37-
3830
bool load_from_file(bool filter_tensor = false) {
3931
LOG_INFO("loading LoRA from '%s'", file_path.c_str());
4032

pmid.hpp

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -186,16 +186,6 @@ struct PhotoMakerIDEncoder : public GGMLModule {
186186
return "pmid";
187187
}
188188

189-
size_t get_params_mem_size() {
190-
size_t params_mem_size = id_encoder.get_params_mem_size();
191-
return params_mem_size;
192-
}
193-
194-
size_t get_params_num() {
195-
size_t params_num = id_encoder.get_params_num();
196-
return params_num;
197-
}
198-
199189
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
200190
id_encoder.get_param_tensors(tensors, prefix);
201191
}

stable-diffusion.cpp

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@ const char* sampling_methods_str[] = {
4242
"LCM",
4343
};
4444

45-
char GGMLBlock::temp_buffer[1024 * 1024 * 10];
46-
4745
/*================================================== Helper Functions ================================================*/
4846

4947
void calculate_alphas_cumprod(float* alphas_cumprod,
@@ -353,27 +351,27 @@ class StableDiffusionGGML {
353351
// first_stage_model->test();
354352
// return false;
355353
} else {
356-
size_t clip_params_mem_size = cond_stage_model->get_params_mem_size();
357-
size_t unet_params_mem_size = diffusion_model->get_params_mem_size();
354+
size_t clip_params_mem_size = cond_stage_model->get_params_buffer_size();
355+
size_t unet_params_mem_size = diffusion_model->get_params_buffer_size();
358356
size_t vae_params_mem_size = 0;
359357
if (!use_tiny_autoencoder) {
360-
vae_params_mem_size = first_stage_model->get_params_mem_size();
358+
vae_params_mem_size = first_stage_model->get_params_buffer_size();
361359
} else {
362360
if (!tae_first_stage->load_from_file(taesd_path)) {
363361
return false;
364362
}
365-
vae_params_mem_size = tae_first_stage->get_params_mem_size();
363+
vae_params_mem_size = tae_first_stage->get_params_buffer_size();
366364
}
367365
size_t control_net_params_mem_size = 0;
368366
if (control_net) {
369367
if (!control_net->load_from_file(control_net_path)) {
370368
return false;
371369
}
372-
control_net_params_mem_size = control_net->get_params_mem_size();
370+
control_net_params_mem_size = control_net->get_params_buffer_size();
373371
}
374372
size_t pmid_params_mem_size = 0;
375373
if (stacked_id) {
376-
pmid_params_mem_size = pmid_model->get_params_mem_size();
374+
pmid_params_mem_size = pmid_model->get_params_buffer_size();
377375
}
378376

379377
size_t total_params_ram_size = 0;

tae.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -200,14 +200,6 @@ struct TinyAutoEncoder : public GGMLModule {
200200
return "taesd";
201201
}
202202

203-
size_t get_params_mem_size() {
204-
return taesd.get_params_mem_size();
205-
}
206-
207-
size_t get_params_num() {
208-
return taesd.get_params_num();
209-
}
210-
211203
bool load_from_file(const std::string& file_path) {
212204
LOG_INFO("loading taesd from '%s'", file_path.c_str());
213205
alloc_params_buffer();

unet.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -543,14 +543,6 @@ struct UNetModel : public GGMLModule {
543543
return "unet";
544544
}
545545

546-
size_t get_params_mem_size() {
547-
return unet.get_params_mem_size();
548-
}
549-
550-
size_t get_params_num() {
551-
return unet.get_params_num();
552-
}
553-
554546
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
555547
unet.get_param_tensors(tensors, prefix);
556548
}

vae.hpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -526,14 +526,6 @@ struct AutoEncoderKL : public GGMLModule {
526526
return "vae";
527527
}
528528

529-
size_t get_params_mem_size() {
530-
return ae.get_params_mem_size();
531-
}
532-
533-
size_t get_params_num() {
534-
return ae.get_params_num();
535-
}
536-
537529
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
538530
ae.get_param_tensors(tensors, prefix);
539531
}

0 commit comments

Comments
 (0)