@@ -4137,11 +4137,29 @@ struct LoraModel {
4137
4137
continue ;
4138
4138
}
4139
4139
k_tensor = k_tensor.substr (0 , k_pos);
4140
- replace_all_chars (k_tensor, ' .' , ' _' );
4141
- std::string lora_up_name = " lora." + k_tensor + " .lora_up.weight" ;
4142
- std::string lora_down_name = " lora." + k_tensor + " .lora_down.weight" ;
4143
- std::string alpha_name = " lora." + k_tensor + " .alpha" ;
4144
- std::string scale_name = " lora." + k_tensor + " .scale" ;
4140
+ // replace_all_chars(k_tensor, '.', '_');
4141
+ std::string lora_up_name = " " + k_tensor + " .lora_B.weight" ;
4142
+ std::string lora_down_name = " " + k_tensor + " .lora_A.weight" ;
4143
+ std::string alpha_name = " " + k_tensor + " .alpha" ;
4144
+ std::string scale_name = " " + k_tensor + " .scale" ;
4145
+
4146
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.lora_up.weight
4147
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q.lora_up.weight
4148
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v.lora_up.weight
4149
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.lora_up.weight
4150
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.lora_up.weight
4151
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k.lora_up.weight
4152
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q.lora_up.weight
4153
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v.lora_up.weight
4154
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.lora_up.weight
4155
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2.lora_up.weight [DEBUG] stable-diffusion.cpp:3983 - Candidate lora_up_name: model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1.lora_up.weight
4156
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2.lora_up.weight
4157
+ // model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3.lora_up.weight
4158
+
4159
+ // LOG_DEBUG("Candidate lora_up_name: %s", lora_up_name.c_str());
4160
+ // LOG_DEBUG("Candidate lora_down_name: %s", lora_down_name.c_str());
4161
+ // LOG_DEBUG("Candidate alpha_name: %s", alpha_name.c_str());
4162
+ // LOG_DEBUG("Candidate scale_name: %s", scale_name.c_str());
4145
4163
4146
4164
ggml_tensor* lora_up = NULL ;
4147
4165
ggml_tensor* lora_down = NULL ;
0 commit comments