24
24
#include <stdio.h>
25
25
#include <float.h>
26
26
#include <limits.h>
27
+ #include <stdarg.h>
27
28
28
29
#ifdef GGML_USE_METAL
29
30
#include <unistd.h>
@@ -4734,10 +4735,19 @@ struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * nam
4734
4735
return tensor;
4735
4736
}
4736
4737
4738
+ struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
4739
+ va_list args;
4740
+ va_start(args, fmt);
4741
+ vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
4742
+ va_end(args);
4743
+ return tensor;
4744
+ }
4745
+
4737
4746
struct ggml_tensor * ggml_view_tensor(
4738
4747
struct ggml_context * ctx,
4739
4748
const struct ggml_tensor * src) {
4740
4749
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
4750
+ ggml_format_name(result, "%s (view)", src->name);
4741
4751
4742
4752
result->nb[0] = src->nb[0];
4743
4753
result->nb[1] = src->nb[1];
@@ -5899,6 +5909,11 @@ struct ggml_tensor * ggml_cpy_impl(
5899
5909
5900
5910
// make a view of the destination
5901
5911
struct ggml_tensor * result = ggml_view_tensor(ctx, b);
5912
+ if (strlen(b->name) > 0) {
5913
+ ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
5914
+ } else {
5915
+ ggml_format_name(result, "%s (copy)", a->name);
5916
+ }
5902
5917
5903
5918
result->op = GGML_OP_CPY;
5904
5919
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -5935,6 +5950,7 @@ struct ggml_tensor * ggml_cont_impl(
5935
5950
}
5936
5951
5937
5952
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
5953
+ ggml_format_name(result, "%s (cont)", a->name);
5938
5954
5939
5955
result->op = GGML_OP_CONT;
5940
5956
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -5978,6 +5994,7 @@ struct ggml_tensor * ggml_reshape(
5978
5994
}
5979
5995
5980
5996
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
5997
+ ggml_format_name(result, "%s (reshaped)", a->name);
5981
5998
5982
5999
result->op = GGML_OP_RESHAPE;
5983
6000
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -6002,6 +6019,7 @@ struct ggml_tensor * ggml_reshape_1d(
6002
6019
6003
6020
const int64_t ne[1] = { ne0 };
6004
6021
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
6022
+ ggml_format_name(result, "%s (reshaped)", a->name);
6005
6023
6006
6024
result->op = GGML_OP_RESHAPE;
6007
6025
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -6027,6 +6045,7 @@ struct ggml_tensor * ggml_reshape_2d(
6027
6045
6028
6046
const int64_t ne[2] = { ne0, ne1 };
6029
6047
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
6048
+ ggml_format_name(result, "%s (reshaped)", a->name);
6030
6049
6031
6050
result->op = GGML_OP_RESHAPE;
6032
6051
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -6053,6 +6072,7 @@ struct ggml_tensor * ggml_reshape_3d(
6053
6072
6054
6073
const int64_t ne[3] = { ne0, ne1, ne2 };
6055
6074
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
6075
+ ggml_format_name(result, "%s (reshaped)", a->name);
6056
6076
6057
6077
result->op = GGML_OP_RESHAPE;
6058
6078
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -6081,6 +6101,7 @@ struct ggml_tensor * ggml_reshape_4d(
6081
6101
6082
6102
const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
6083
6103
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
6104
+ ggml_format_name(result, "%s (reshaped)", a->name);
6084
6105
6085
6106
result->op = GGML_OP_RESHAPE;
6086
6107
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -6105,10 +6126,12 @@ struct ggml_tensor * ggml_view_1d(
6105
6126
}
6106
6127
6107
6128
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
6129
+ ggml_format_name(result, "%s (view)", a->name);
6108
6130
6109
6131
ggml_scratch_save(ctx);
6110
6132
6111
6133
struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
6134
+ ggml_set_name(offs, "offset");
6112
6135
memcpy(offs->data, &offset, 2*sizeof(int32_t));
6113
6136
6114
6137
ggml_scratch_load(ctx);
@@ -6141,10 +6164,12 @@ struct ggml_tensor * ggml_view_2d(
6141
6164
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
6142
6165
6143
6166
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
6167
+ ggml_format_name(result, "%s (view)", a->name);
6144
6168
6145
6169
ggml_scratch_save(ctx);
6146
6170
6147
6171
struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
6172
+ ggml_set_name(offs, "offset");
6148
6173
memcpy(offs->data, &offset, 2*sizeof(int32_t));
6149
6174
6150
6175
ggml_scratch_load(ctx);
@@ -6183,10 +6208,12 @@ struct ggml_tensor * ggml_view_3d(
6183
6208
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
6184
6209
6185
6210
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
6211
+ ggml_format_name(result, "%s (view)", a->name);
6186
6212
6187
6213
ggml_scratch_save(ctx);
6188
6214
6189
6215
struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
6216
+ ggml_set_name(offs, "offset");
6190
6217
memcpy(offs->data, &offset, 2*sizeof(int32_t));
6191
6218
6192
6219
ggml_scratch_load(ctx);
@@ -6227,10 +6254,12 @@ struct ggml_tensor * ggml_view_4d(
6227
6254
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
6228
6255
6229
6256
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
6257
+ ggml_format_name(result, "%s (view)", a->name);
6230
6258
6231
6259
ggml_scratch_save(ctx);
6232
6260
6233
6261
struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
6262
+ ggml_set_name(offs, "offset");
6234
6263
memcpy(offs->data, &offset, 2*sizeof(int32_t));
6235
6264
6236
6265
ggml_scratch_load(ctx);
@@ -6276,6 +6305,7 @@ struct ggml_tensor * ggml_permute(
6276
6305
}
6277
6306
6278
6307
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
6308
+ ggml_format_name(result, "%s (permuted)", a->name);
6279
6309
6280
6310
int ne[GGML_MAX_DIMS];
6281
6311
int nb[GGML_MAX_DIMS];
@@ -6335,6 +6365,7 @@ struct ggml_tensor * ggml_transpose(
6335
6365
}
6336
6366
6337
6367
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
6368
+ ggml_format_name(result, "%s (transposed)", a->name);
6338
6369
6339
6370
result->ne[0] = a->ne[1];
6340
6371
result->ne[1] = a->ne[0];
@@ -16004,7 +16035,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
16004
16035
GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
16005
16036
16006
16037
if (strlen(node->name) == 0) {
16007
- snprintf (node->name, sizeof(node->name) , "leaf_%d", cgraph->n_leafs);
16038
+ ggml_format_name (node, "leaf_%d", cgraph->n_leafs);
16008
16039
}
16009
16040
16010
16041
cgraph->leafs[cgraph->n_leafs] = node;
@@ -16013,7 +16044,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
16013
16044
GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
16014
16045
16015
16046
if (strlen(node->name) == 0) {
16016
- snprintf (node->name, sizeof(node->name) , "node_%d", cgraph->n_nodes);
16047
+ ggml_format_name (node, "node_%d", cgraph->n_nodes);
16017
16048
}
16018
16049
16019
16050
cgraph->nodes[cgraph->n_nodes] = node;
@@ -17397,6 +17428,26 @@ static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgr
17397
17428
return NULL;
17398
17429
}
17399
17430
17431
+ static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
17432
+ struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
17433
+ struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
17434
+ fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
17435
+ gparent0 ? (void *) gparent0 : (void *) parent,
17436
+ gparent0 ? "g" : "x",
17437
+ gparent ? (void *) gparent : (void *) node,
17438
+ gparent ? "g" : "x",
17439
+ gparent ? "empty" : "vee",
17440
+ gparent ? "dashed" : "solid",
17441
+ label);
17442
+ }
17443
+
17444
+ static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
17445
+ fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
17446
+ (void *) parent, "x",
17447
+ (void *) node, "x",
17448
+ label);
17449
+ }
17450
+
17400
17451
void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
17401
17452
char color[16];
17402
17453
@@ -17432,7 +17483,9 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph
17432
17483
(void *) node, color);
17433
17484
17434
17485
if (strlen(node->name) > 0) {
17435
- fprintf(fp, "%s |", node->name);
17486
+ fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
17487
+ } else {
17488
+ fprintf(fp, "(%s)|", ggml_type_name(node->type));
17436
17489
}
17437
17490
17438
17491
if (node->n_dims == 2) {
@@ -17441,7 +17494,6 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph
17441
17494
fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], GGML_OP_SYMBOL[node->op]);
17442
17495
}
17443
17496
17444
-
17445
17497
if (node->grad) {
17446
17498
fprintf(fp, " | <g>%s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]);
17447
17499
} else {
@@ -17460,65 +17512,70 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph
17460
17512
(void *) node, color);
17461
17513
17462
17514
if (strlen(node->name) > 0) {
17463
- fprintf(fp, "%s | ", node->name);
17515
+ fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
17516
+ } else {
17517
+ fprintf(fp, "(%s)|", ggml_type_name(node->type));
17464
17518
}
17465
- if (ggml_nelements(node) == 1) {
17466
- if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
17467
- fprintf(fp, "%d", ggml_get_i32_1d(node, 0));
17468
- }
17469
- else {
17470
- fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, 0));
17519
+
17520
+ fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
17521
+ if (ggml_nelements(node) < 5) {
17522
+ fprintf(fp, " | (");
17523
+ for (int j = 0; j < ggml_nelements(node); j++) {
17524
+ if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
17525
+ fprintf(fp, "%d", ggml_get_i32_1d(node, j));
17526
+ }
17527
+ else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
17528
+ fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
17529
+ }
17530
+ else {
17531
+ fprintf(fp, "#");
17532
+ }
17533
+ if (j < ggml_nelements(node) - 1) {
17534
+ fprintf(fp, ", ");
17535
+ }
17471
17536
}
17472
- }
17473
- else {
17474
- fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
17537
+ fprintf(fp, ")");
17475
17538
}
17476
17539
fprintf(fp, "\"; ]\n");
17477
17540
}
17478
17541
17479
17542
for (int i = 0; i < gb->n_nodes; i++) {
17480
17543
struct ggml_tensor * node = gb->nodes[i];
17481
17544
17482
- struct ggml_tensor * parent = ggml_graph_get_parent(gb, node);
17483
-
17484
17545
if (node->src0) {
17485
- struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0);
17486
-
17487
- fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n",
17488
- parent0 ? (void *) parent0 : (void *) node->src0,
17489
- parent0 ? "g" : "x",
17490
- parent ? (void *) parent : (void *) node,
17491
- parent ? "g" : "x",
17492
- parent ? "empty" : "vee",
17493
- parent ? "dashed" : "solid");
17546
+ ggml_graph_dump_dot_node_edge(fp, gb, node, node->src0, "x");
17494
17547
}
17495
17548
17496
17549
if (node->src1) {
17497
- struct ggml_tensor * parent1 = ggml_graph_get_parent( gb, node->src1);
17498
-
17499
- fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n",
17500
- parent1 ? (void *) parent1 : (void *) node->src1,
17501
- parent1 ? "g" : "x",
17502
- parent ? (void *) parent : (void *) node,
17503
- parent ? "g" : "x",
17504
- parent ? "empty" : "vee",
17505
- parent ? "dashed" : "solid");
17550
+ ggml_graph_dump_dot_node_edge(fp, gb, node, node ->src1, "y" );
17551
+ }
17552
+
17553
+ for (int j = 0; j < GGML_MAX_OPT; j++) {
17554
+ if (node->opt[j]) {
17555
+ char label[16];
17556
+ snprintf(label, sizeof(label), "opt %d", j);
17557
+ ggml_graph_dump_dot_node_edge(fp, gb, node, node->opt[j], label);
17558
+ }
17506
17559
}
17507
17560
}
17508
17561
17509
17562
for (int i = 0; i < gb->n_leafs; i++) {
17510
17563
struct ggml_tensor * node = gb->leafs[i];
17511
17564
17512
17565
if (node->src0) {
17513
- fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n",
17514
- (void *) node->src0, "x",
17515
- (void *) node, "x");
17566
+ ggml_graph_dump_dot_leaf_edge(fp, node, node->src0, "x");
17516
17567
}
17517
17568
17518
17569
if (node->src1) {
17519
- fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"y\"; ]\n",
17520
- (void *) node->src1, "x",
17521
- (void *) node, "x");
17570
+ ggml_graph_dump_dot_leaf_edge(fp, node, node->src1, "y");
17571
+ }
17572
+
17573
+ for (int j = 0; j < GGML_MAX_OPT; j++) {
17574
+ if (node->opt[j]) {
17575
+ char label[16];
17576
+ snprintf(label, sizeof(label), "opt %d", j);
17577
+ ggml_graph_dump_dot_leaf_edge(fp, node, node->opt[j], label);
17578
+ }
17522
17579
}
17523
17580
}
17524
17581
0 commit comments