Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DRAFT] Initial SDCA regmap integration #5150

Open
wants to merge 21 commits into
base: topic/sof-dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
0673651
ASoC/soundwire: remove sdw_slave_extended_id
plbossart May 17, 2024
5f2314b
ASoC: SDCA: add initial module
plbossart Apr 16, 2024
4d5c222
soundwire: slave: lookup SDCA version and functions
plbossart May 14, 2024
4ad35f8
ASoC: SDCA: add quirk function for RT712_VB match
plbossart May 15, 2024
ff286cc
ASoC: soc-acpi: introduce new 'machine check' callback
plbossart May 16, 2024
3b58ad8
ASoC: Intel: soc-acpi: add is_device_rt712_vb() helper
plbossart May 15, 2024
64d415e
ASoC: SOF: Intel: hda: use machine_check() for SoundWire
plbossart May 15, 2024
3a7b55e
ASoC: SDCA: add helpers to handle SDCA interrupt sources
plbossart May 30, 2024
75d9b0d
ASoC: SDCA: add code to parse Function information
plbossart May 22, 2024
fc6acaa
ASoC: SDCA: add function devices
plbossart May 21, 2024
e0500c0
ASoC: SDCA: Add control parsing
charleskeepax Aug 8, 2024
506f757
ASoC: SDCA: Move calls to sdca_parse_function to device level
charleskeepax Aug 19, 2024
ec260ac
ASoC: SDCA: Allow passing in a regmap for each auxiliary device
charleskeepax Aug 20, 2024
5cfdf55
soundwire: SDCA: Add additional SDCA macros
charleskeepax Jun 12, 2024
392b639
regmap: sdw-mbq: Add support for further MBQ register sizes
charleskeepax May 10, 2024
36c9e09
regmap: sdw-mbq: Add support for SDCA deferred controls
charleskeepax Jun 20, 2024
0294c5a
ASoC: SDCA: Add generic regmap sdca helpers
charleskeepax Aug 13, 2024
4001fed
ASoC: SDCA: Add disco constants/default handling
charleskeepax Aug 13, 2024
6a64950
mfd: cs42l43: Strip the driver right back
charleskeepax Apr 29, 2024
7229554
mfd: cs42l43: Properly configure the firmware
charleskeepax Apr 29, 2024
4f60798
mfd: cs42l43: Setup device to use new SDCA class functionality
charleskeepax Aug 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
210 changes: 188 additions & 22 deletions drivers/base/regmap/regmap-sdw-mbq.c
Original file line number Diff line number Diff line change
@@ -1,45 +1,180 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.

#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
#include "internal.h"

struct regmap_mbq_context {
struct device *dev;

struct regmap_sdw_mbq_cfg cfg;

int val_size;
};

static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
{
int size = ctx->val_size;

if (ctx->cfg.mbq_size) {
size = ctx->cfg.mbq_size(ctx->dev, reg);
if (!size || size > ctx->val_size)
return -EINVAL;
}

return size;
}

static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
{
if (ctx->cfg.deferrable)
return ctx->cfg.deferrable(ctx->dev, reg);

return false;
}

static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
unsigned long timeout_us, unsigned long retry_us)
{
struct device *dev = &slave->dev;
int val, ret;

dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);

reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0, SDW_SDCA_FUNCTION_STATUS, 0);

ret = read_poll_timeout(sdw_read_no_pm, val,
val < 0 || !(val & SDW_SDCA_FUNCTION_BUSY),
timeout_us, retry_us, false, slave, reg);
if (val < 0)
return val;
if (ret)
dev_err(dev, "Timed out polling function busy 0x%x: %d\n", reg, val);

return ret;
}

static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
unsigned int reg, unsigned int val,
int mbq_size, bool deferrable)
{
int shift = mbq_size * BITS_PER_BYTE;
int ret;

while (--mbq_size > 0) {
shift -= BITS_PER_BYTE;

ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
(val >> shift) & 0xff);
if (ret < 0)
return ret;
}

ret = sdw_write_no_pm(slave, reg, val & 0xff);
if (deferrable && ret == -ENODATA)
return -EAGAIN;

return ret;
}

static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
struct device *dev = context;
struct regmap_mbq_context *ctx = context;
struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;

ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
if (ret < 0)
return ret;
if (mbq_size < 0)
return mbq_size;

/*
* Technically the spec does allow a device to set itself to busy for
* internal reasons, but since it doesn't provide any information on
* how to handle timeouts in that case, for now the code will only
* process a single wait/timeout on function busy and a single retry
* of the transaction.
*/
ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
if (ret == -EAGAIN) {
ret = regmap_sdw_mbq_poll_busy(slave, reg,
ctx->cfg.timeout_us, ctx->cfg.retry_us);
if (ret)
return ret;

ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
}

return ret;
}

static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
unsigned int reg, unsigned int *val,
int mbq_size, bool deferrable)
{
int shift = BITS_PER_BYTE;
int read;

read = sdw_read_no_pm(slave, reg);
if (read < 0) {
if (deferrable && read == -ENODATA)
return -EAGAIN;

return read;
}

*val = read;

while (--mbq_size > 0) {
read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
if (read < 0)
return read;

return sdw_write_no_pm(slave, reg, val & 0xff);
*val |= read << shift;
shift += BITS_PER_BYTE;
}

return 0;
}

static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
struct device *dev = context;
struct regmap_mbq_context *ctx = context;
struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read0;
int read1;
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;

read0 = sdw_read_no_pm(slave, reg);
if (read0 < 0)
return read0;
if (mbq_size < 0)
return mbq_size;

read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
if (read1 < 0)
return read1;
/*
* Technically the spec does allow a device to set itself to busy for
* internal reasons, but since it doesn't provide any information on
* how to handle timeouts in that case, for now the code will only
* process a single wait/timeout on function busy and a single retry
* of the transaction.
*/
ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
if (ret == -EAGAIN) {
ret = regmap_sdw_mbq_poll_busy(slave, reg,
ctx->cfg.timeout_us, ctx->cfg.retry_us);
if (ret)
return ret;

*val = (read1 << 8) | read0;
ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
}

return 0;
return ret;
}

static const struct regmap_bus regmap_sdw_mbq = {
Expand All @@ -51,8 +186,7 @@ static const struct regmap_bus regmap_sdw_mbq = {

static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
/* MBQ-based controls are only 16-bits for now */
if (config->val_bits != 16)
if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;

/* Registers are 32 bits wide */
Expand All @@ -65,35 +199,67 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}

static struct regmap_mbq_context *
regmap_sdw_mbq_gen_context(struct device *dev,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config)
{
struct regmap_mbq_context *ctx;

ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);

ctx->dev = dev;
ctx->val_size = config->val_bits / BITS_PER_BYTE;

if (mbq_config)
ctx->cfg = *mbq_config;

return ctx;
}

struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap_mbq_context *ctx;
int ret;

ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);

return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
&sdw->dev, config, lock_key, lock_name);
ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);

return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);

struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap_mbq_context *ctx;
int ret;

ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);

return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
&sdw->dev, config, lock_key, lock_name);
ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);

return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);

Expand Down
Loading
Loading