mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
firmware: arm_scmi: Use common iterators in the perf protocol
Make SCMI perf protocol use the common iterator protocol helpers for issuing the multi-part commands. Link: https://lore.kernel.org/r/20220330150551.2573938-19-cristian.marussi@arm.com Signed-off-by: Cristian Marussi <cristian.marussi@arm.com> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
This commit is contained in:
parent
d8d7e91316
commit
79d2ea9244
1 changed files with 72 additions and 51 deletions
|
@ -272,66 +272,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
|
|||
return t1->perf - t2->perf;
|
||||
}
|
||||
|
||||
struct scmi_perf_ipriv {
|
||||
u32 domain;
|
||||
struct perf_dom_info *perf_dom;
|
||||
};
|
||||
|
||||
static void iter_perf_levels_prepare_message(void *message,
|
||||
unsigned int desc_index,
|
||||
const void *priv)
|
||||
{
|
||||
struct scmi_msg_perf_describe_levels *msg = message;
|
||||
const struct scmi_perf_ipriv *p = priv;
|
||||
|
||||
msg->domain = cpu_to_le32(p->domain);
|
||||
/* Set the number of OPPs to be skipped/already read */
|
||||
msg->level_index = cpu_to_le32(desc_index);
|
||||
}
|
||||
|
||||
static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
|
||||
const void *response, void *priv)
|
||||
{
|
||||
const struct scmi_msg_resp_perf_describe_levels *r = response;
|
||||
|
||||
st->num_returned = le16_to_cpu(r->num_returned);
|
||||
st->num_remaining = le16_to_cpu(r->num_remaining);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
|
||||
const void *response,
|
||||
struct scmi_iterator_state *st, void *priv)
|
||||
{
|
||||
struct scmi_opp *opp;
|
||||
const struct scmi_msg_resp_perf_describe_levels *r = response;
|
||||
struct scmi_perf_ipriv *p = priv;
|
||||
|
||||
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
|
||||
opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
|
||||
opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
|
||||
opp->trans_latency_us =
|
||||
le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
|
||||
p->perf_dom->opp_count++;
|
||||
|
||||
dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
|
||||
opp->perf, opp->power, opp->trans_latency_us);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
|
||||
struct perf_dom_info *perf_dom)
|
||||
{
|
||||
int ret, cnt;
|
||||
u32 tot_opp_cnt = 0;
|
||||
u16 num_returned, num_remaining;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_opp *opp;
|
||||
struct scmi_msg_perf_describe_levels *dom_info;
|
||||
struct scmi_msg_resp_perf_describe_levels *level_info;
|
||||
int ret;
|
||||
void *iter;
|
||||
struct scmi_msg_perf_describe_levels *msg;
|
||||
struct scmi_iterator_ops ops = {
|
||||
.prepare_message = iter_perf_levels_prepare_message,
|
||||
.update_state = iter_perf_levels_update_state,
|
||||
.process_response = iter_perf_levels_process_response,
|
||||
};
|
||||
struct scmi_perf_ipriv ppriv = {
|
||||
.domain = domain,
|
||||
.perf_dom = perf_dom,
|
||||
};
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
|
||||
sizeof(*dom_info), 0, &t);
|
||||
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
|
||||
PERF_DESCRIBE_LEVELS,
|
||||
sizeof(*msg), &ppriv);
|
||||
if (IS_ERR(iter))
|
||||
return PTR_ERR(iter);
|
||||
|
||||
ret = ph->hops->iter_response_run(iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dom_info = t->tx.buf;
|
||||
level_info = t->rx.buf;
|
||||
if (perf_dom->opp_count)
|
||||
sort(perf_dom->opp, perf_dom->opp_count,
|
||||
sizeof(struct scmi_opp), opp_cmp_func, NULL);
|
||||
|
||||
do {
|
||||
dom_info->domain = cpu_to_le32(domain);
|
||||
/* Set the number of OPPs to be skipped/already read */
|
||||
dom_info->level_index = cpu_to_le32(tot_opp_cnt);
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
num_returned = le16_to_cpu(level_info->num_returned);
|
||||
num_remaining = le16_to_cpu(level_info->num_remaining);
|
||||
if (tot_opp_cnt + num_returned > MAX_OPPS) {
|
||||
dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
|
||||
break;
|
||||
}
|
||||
|
||||
opp = &perf_dom->opp[tot_opp_cnt];
|
||||
for (cnt = 0; cnt < num_returned; cnt++, opp++) {
|
||||
opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
|
||||
opp->power = le32_to_cpu(level_info->opp[cnt].power);
|
||||
opp->trans_latency_us = le16_to_cpu
|
||||
(level_info->opp[cnt].transition_latency_us);
|
||||
|
||||
dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
|
||||
opp->perf, opp->power, opp->trans_latency_us);
|
||||
}
|
||||
|
||||
tot_opp_cnt += num_returned;
|
||||
|
||||
ph->xops->reset_rx_to_maxsz(ph, t);
|
||||
/*
|
||||
* check for both returned and remaining to avoid infinite
|
||||
* loop due to buggy firmware
|
||||
*/
|
||||
} while (num_returned && num_remaining);
|
||||
|
||||
perf_dom->opp_count = tot_opp_cnt;
|
||||
ph->xops->xfer_put(ph, t);
|
||||
|
||||
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue