Skip to content

Commit 4935b22

Browse files
James HoganMike Turquette
authored andcommitted
clk: move some parent related functions upwards
Move some parent related functions up in clk.c so they can be used by the modifications in the following patch which enables clock reparenting during set_rate. No other changes are made so this patch makes no functional difference in isolation. This is separate from the following patch primarily to ease readability of that patch. Signed-off-by: James Hogan <james.hogan@imgtec.com> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Cc: Mike Turquette <mturquette@linaro.org> Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Mike Turquette <mturquette@linaro.org>
1 parent 7ef3dcc commit 4935b22

File tree

1 file changed

+104
-104
lines changed

1 file changed

+104
-104
lines changed

drivers/clk/clk.c

Lines changed: 104 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -1028,6 +1028,110 @@ unsigned long clk_get_rate(struct clk *clk)
10281028
}
10291029
EXPORT_SYMBOL_GPL(clk_get_rate);
10301030

1031+
static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1032+
{
1033+
u8 i;
1034+
1035+
if (!clk->parents)
1036+
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1037+
GFP_KERNEL);
1038+
1039+
/*
1040+
* find index of new parent clock using cached parent ptrs,
1041+
* or if not yet cached, use string name comparison and cache
1042+
* them now to avoid future calls to __clk_lookup.
1043+
*/
1044+
for (i = 0; i < clk->num_parents; i++) {
1045+
if (clk->parents && clk->parents[i] == parent)
1046+
break;
1047+
else if (!strcmp(clk->parent_names[i], parent->name)) {
1048+
if (clk->parents)
1049+
clk->parents[i] = __clk_lookup(parent->name);
1050+
break;
1051+
}
1052+
}
1053+
1054+
return i;
1055+
}
1056+
1057+
static void clk_reparent(struct clk *clk, struct clk *new_parent)
1058+
{
1059+
hlist_del(&clk->child_node);
1060+
1061+
if (new_parent)
1062+
hlist_add_head(&clk->child_node, &new_parent->children);
1063+
else
1064+
hlist_add_head(&clk->child_node, &clk_orphan_list);
1065+
1066+
clk->parent = new_parent;
1067+
}
1068+
1069+
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1070+
{
1071+
unsigned long flags;
1072+
int ret = 0;
1073+
struct clk *old_parent = clk->parent;
1074+
1075+
/*
1076+
* Migrate prepare state between parents and prevent race with
1077+
* clk_enable().
1078+
*
1079+
* If the clock is not prepared, then a race with
1080+
* clk_enable/disable() is impossible since we already have the
1081+
* prepare lock (future calls to clk_enable() need to be preceded by
1082+
* a clk_prepare()).
1083+
*
1084+
* If the clock is prepared, migrate the prepared state to the new
1085+
* parent and also protect against a race with clk_enable() by
1086+
* forcing the clock and the new parent on. This ensures that all
1087+
* future calls to clk_enable() are practically NOPs with respect to
1088+
* hardware and software states.
1089+
*
1090+
* See also: Comment for clk_set_parent() below.
1091+
*/
1092+
if (clk->prepare_count) {
1093+
__clk_prepare(parent);
1094+
clk_enable(parent);
1095+
clk_enable(clk);
1096+
}
1097+
1098+
/* update the clk tree topology */
1099+
flags = clk_enable_lock();
1100+
clk_reparent(clk, parent);
1101+
clk_enable_unlock(flags);
1102+
1103+
/* change clock input source */
1104+
if (parent && clk->ops->set_parent)
1105+
ret = clk->ops->set_parent(clk->hw, p_index);
1106+
1107+
if (ret) {
1108+
flags = clk_enable_lock();
1109+
clk_reparent(clk, old_parent);
1110+
clk_enable_unlock(flags);
1111+
1112+
if (clk->prepare_count) {
1113+
clk_disable(clk);
1114+
clk_disable(parent);
1115+
__clk_unprepare(parent);
1116+
}
1117+
return ret;
1118+
}
1119+
1120+
/*
1121+
* Finish the migration of prepare state and undo the changes done
1122+
* for preventing a race with clk_enable().
1123+
*/
1124+
if (clk->prepare_count) {
1125+
clk_disable(clk);
1126+
clk_disable(old_parent);
1127+
__clk_unprepare(old_parent);
1128+
}
1129+
1130+
/* update debugfs with new clk tree topology */
1131+
clk_debug_reparent(clk, parent);
1132+
return 0;
1133+
}
1134+
10311135
/**
10321136
* __clk_speculate_rates
10331137
* @clk: first clk in the subtree
@@ -1335,117 +1439,13 @@ static struct clk *__clk_init_parent(struct clk *clk)
13351439
return ret;
13361440
}
13371441

1338-
static void clk_reparent(struct clk *clk, struct clk *new_parent)
1339-
{
1340-
hlist_del(&clk->child_node);
1341-
1342-
if (new_parent)
1343-
hlist_add_head(&clk->child_node, &new_parent->children);
1344-
else
1345-
hlist_add_head(&clk->child_node, &clk_orphan_list);
1346-
1347-
clk->parent = new_parent;
1348-
}
1349-
13501442
void __clk_reparent(struct clk *clk, struct clk *new_parent)
13511443
{
13521444
clk_reparent(clk, new_parent);
13531445
clk_debug_reparent(clk, new_parent);
13541446
__clk_recalc_rates(clk, POST_RATE_CHANGE);
13551447
}
13561448

1357-
static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1358-
{
1359-
u8 i;
1360-
1361-
if (!clk->parents)
1362-
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1363-
GFP_KERNEL);
1364-
1365-
/*
1366-
* find index of new parent clock using cached parent ptrs,
1367-
* or if not yet cached, use string name comparison and cache
1368-
* them now to avoid future calls to __clk_lookup.
1369-
*/
1370-
for (i = 0; i < clk->num_parents; i++) {
1371-
if (clk->parents && clk->parents[i] == parent)
1372-
break;
1373-
else if (!strcmp(clk->parent_names[i], parent->name)) {
1374-
if (clk->parents)
1375-
clk->parents[i] = __clk_lookup(parent->name);
1376-
break;
1377-
}
1378-
}
1379-
1380-
return i;
1381-
}
1382-
1383-
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1384-
{
1385-
unsigned long flags;
1386-
int ret = 0;
1387-
struct clk *old_parent = clk->parent;
1388-
1389-
/*
1390-
* Migrate prepare state between parents and prevent race with
1391-
* clk_enable().
1392-
*
1393-
* If the clock is not prepared, then a race with
1394-
* clk_enable/disable() is impossible since we already have the
1395-
* prepare lock (future calls to clk_enable() need to be preceded by
1396-
* a clk_prepare()).
1397-
*
1398-
* If the clock is prepared, migrate the prepared state to the new
1399-
* parent and also protect against a race with clk_enable() by
1400-
* forcing the clock and the new parent on. This ensures that all
1401-
* future calls to clk_enable() are practically NOPs with respect to
1402-
* hardware and software states.
1403-
*
1404-
* See also: Comment for clk_set_parent() below.
1405-
*/
1406-
if (clk->prepare_count) {
1407-
__clk_prepare(parent);
1408-
clk_enable(parent);
1409-
clk_enable(clk);
1410-
}
1411-
1412-
/* update the clk tree topology */
1413-
flags = clk_enable_lock();
1414-
clk_reparent(clk, parent);
1415-
clk_enable_unlock(flags);
1416-
1417-
/* change clock input source */
1418-
if (parent && clk->ops->set_parent)
1419-
ret = clk->ops->set_parent(clk->hw, p_index);
1420-
1421-
if (ret) {
1422-
flags = clk_enable_lock();
1423-
clk_reparent(clk, old_parent);
1424-
clk_enable_unlock(flags);
1425-
1426-
if (clk->prepare_count) {
1427-
clk_disable(clk);
1428-
clk_disable(parent);
1429-
__clk_unprepare(parent);
1430-
}
1431-
return ret;
1432-
}
1433-
1434-
/*
1435-
* Finish the migration of prepare state and undo the changes done
1436-
* for preventing a race with clk_enable().
1437-
*/
1438-
if (clk->prepare_count) {
1439-
clk_disable(clk);
1440-
clk_disable(old_parent);
1441-
__clk_unprepare(old_parent);
1442-
}
1443-
1444-
/* update debugfs with new clk tree topology */
1445-
clk_debug_reparent(clk, parent);
1446-
return 0;
1447-
}
1448-
14491449
/**
14501450
* clk_set_parent - switch the parent of a mux clk
14511451
* @clk: the mux clk whose input we are switching

0 commit comments

Comments
 (0)