Patchwork [v5,2/4] perf: add arm64 smmuv3 pmu driver

login
register
mail settings
Submitter Shameerali Kolothum Thodi
Date Nov. 30, 2018, 3:47 p.m.
Message ID <20181130154751.28580-3-shameerali.kolothum.thodi@huawei.com>
Download mbox | patch
Permalink /patch/669331/
State New
Headers show

Comments

Shameerali Kolothum Thodi - Nov. 30, 2018, 3:47 p.m.
From: Neil Leeder <nleeder@codeaurora.org>

Adds a new driver to support the SMMUv3 PMU and add it into the
perf events framework.

Each SMMU node may have multiple PMUs associated with it, each of
which may support different events.

SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
<phys_addr_page> is the physical page address of the SMMU PMCG
wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is
named smmuv3_pmcg_ff88840

Filtering by stream id is done by specifying filtering parameters
with the event. options are:
   filter_enable    - 0 = no filtering, 1 = filtering enabled
   filter_span      - 0 = exact match, 1 = pattern match
   filter_stream_id - pattern to filter against

Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
                       filter_span=1,filter_stream_id=0x42/ -a netperf

Applies filter pattern 0x42 to transaction events, which means events
matching stream ids 0x42 & 0x43 are counted as only upper StreamID
bits are required to match the given filter. Further filtering
information is available in the SMMU documentation.

SMMU events are not attributable to a CPU, so task mode and sampling
are not supported.

Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 drivers/perf/Kconfig          |   9 +
 drivers/perf/Makefile         |   1 +
 drivers/perf/arm_smmuv3_pmu.c | 778 ++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 788 insertions(+)
 create mode 100644 drivers/perf/arm_smmuv3_pmu.c
Andrew Murray - Jan. 22, 2019, 4:23 p.m.
On Fri, Nov 30, 2018 at 03:47:49PM +0000, Shameer Kolothum wrote:
> From: Neil Leeder <nleeder@codeaurora.org>
> 
> Adds a new driver to support the SMMUv3 PMU and add it into the
> perf events framework.
> 
> Each SMMU node may have multiple PMUs associated with it, each of
> which may support different events.
> 
> SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
> <phys_addr_page> is the physical page address of the SMMU PMCG
> wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is
> named smmuv3_pmcg_ff88840
> 
> Filtering by stream id is done by specifying filtering parameters
> with the event. options are:
>    filter_enable    - 0 = no filtering, 1 = filtering enabled
>    filter_span      - 0 = exact match, 1 = pattern match
>    filter_stream_id - pattern to filter against
> 
> Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
>                        filter_span=1,filter_stream_id=0x42/ -a netperf
> 
> Applies filter pattern 0x42 to transaction events, which means events
> matching stream ids 0x42 & 0x43 are counted as only upper StreamID
> bits are required to match the given filter. Further filtering
> information is available in the SMMU documentation.
> 
> SMMU events are not attributable to a CPU, so task mode and sampling
> are not supported.
> 
> Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> ---
>  drivers/perf/Kconfig          |   9 +
>  drivers/perf/Makefile         |   1 +
>  drivers/perf/arm_smmuv3_pmu.c | 778 ++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 788 insertions(+)
>  create mode 100644 drivers/perf/arm_smmuv3_pmu.c
> 
> diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> index 08ebaf7..92be6a3 100644
> --- a/drivers/perf/Kconfig
> +++ b/drivers/perf/Kconfig
> @@ -52,6 +52,15 @@ config ARM_PMU_ACPI
>  	depends on ARM_PMU && ACPI
>  	def_bool y
>  
> +config ARM_SMMU_V3_PMU
> +	 bool "ARM SMMUv3 Performance Monitors Extension"
> +	 depends on ARM64 && ACPI && ARM_SMMU_V3
> +	   help
> +	   Provides support for the SMMU version 3 performance monitor unit (PMU)
> +	   on ARM-based systems.
> +	   Adds the SMMU PMU into the perf events subsystem for
> +	   monitoring SMMU performance events.
> +
>  config ARM_DSU_PMU
>  	tristate "ARM DynamIQ Shared Unit (DSU) PMU"
>  	depends on ARM64
> diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
> index b3902bd..f10a932 100644
> --- a/drivers/perf/Makefile
> +++ b/drivers/perf/Makefile
> @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
>  obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
>  obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
>  obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
> +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
>  obj-$(CONFIG_HISI_PMU) += hisilicon/
>  obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
>  obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
> diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
> new file mode 100644
> index 0000000..fb9dcd8
> --- /dev/null
> +++ b/drivers/perf/arm_smmuv3_pmu.c
> @@ -0,0 +1,778 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * This driver adds support for perf events to use the Performance
> + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
> + * to monitor that node.
> + *
> + * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
> + * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
> + * to 4K boundary. For example, the PMCG at 0xff88840000 is named
> + * smmuv3_pmcg_ff88840
> + *
> + * Filtering by stream id is done by specifying filtering parameters
> + * with the event. options are:
> + *   filter_enable    - 0 = no filtering, 1 = filtering enabled
> + *   filter_span      - 0 = exact match, 1 = pattern match
> + *   filter_stream_id - pattern to filter against
> + *
> + * To match a partial StreamID where the X most-significant bits must match
> + * but the Y least-significant bits might differ, STREAMID is programmed
> + * with a value that contains:
> + *  STREAMID[Y - 1] == 0.
> + *  STREAMID[Y - 2:0] == 1 (where Y > 1).
> + * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
> + * contain a value to match from the corresponding bits of event StreamID.
> + *
> + * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
> + *                    filter_span=1,filter_stream_id=0x42/ -a netperf
> + * Applies filter pattern 0x42 to transaction events, which means events
> + * matching stream ids 0x42 and 0x43 are counted. Further filtering
> + * information is available in the SMMU documentation.
> + *
> + * SMMU events are not attributable to a CPU, so task mode and sampling
> + * are not supported.
> + */
> +
> +#include <linux/acpi.h>
> +#include <linux/bitfield.h>
> +#include <linux/bitops.h>
> +#include <linux/cpuhotplug.h>
> +#include <linux/cpumask.h>
> +#include <linux/device.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/kernel.h>
> +#include <linux/list.h>
> +#include <linux/msi.h>
> +#include <linux/perf_event.h>
> +#include <linux/platform_device.h>
> +#include <linux/smp.h>
> +#include <linux/sysfs.h>
> +#include <linux/types.h>
> +
> +#define SMMU_PMCG_EVCNTR0               0x0
> +#define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 + (n) * (stride))
> +#define SMMU_PMCG_EVTYPER0              0x400
> +#define SMMU_PMCG_EVTYPER(n)            (SMMU_PMCG_EVTYPER0 + (n) * 4)
> +#define SMMU_PMCG_SID_SPAN_SHIFT        29
> +#define SMMU_PMCG_SID_SPAN_MASK         GENMASK(29, 29)
> +#define SMMU_PMCG_SMR0                  0xA00
> +#define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 + (n) * 4)
> +#define SMMU_PMCG_CNTENSET0             0xC00
> +#define SMMU_PMCG_CNTENCLR0             0xC20
> +#define SMMU_PMCG_INTENSET0             0xC40
> +#define SMMU_PMCG_INTENCLR0             0xC60
> +#define SMMU_PMCG_OVSCLR0               0xC80
> +#define SMMU_PMCG_OVSSET0               0xCC0
> +#define SMMU_PMCG_CFGR                  0xE00
> +#define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)
> +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)
> +#define SMMU_PMCG_CFGR_SIZE_MASK        GENMASK(13, 8)
> +#define SMMU_PMCG_CFGR_NCTR_MASK        GENMASK(5, 0)
> +#define SMMU_PMCG_CR                    0xE04
> +#define SMMU_PMCG_CR_ENABLE             BIT(0)
> +#define SMMU_PMCG_CEID0                 0xE20
> +#define SMMU_PMCG_CEID1                 0xE28
> +#define SMMU_PMCG_IRQ_CTRL              0xE50
> +#define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)
> +#define SMMU_PMCG_IRQ_CFG0              0xE58
> +
> +#define SMMU_DEFAULT_FILTER_SPAN        1
> +#define SMMU_DEFAULT_FILTER_STREAM_ID   GENMASK(31, 0)
> +
> +#define SMMU_MAX_COUNTERS               64
> +#define SMMU_ARCH_MAX_EVENT_ID          0x7F
> +#define SMMU_IMPDEF_MAX_EVENT_ID        0xFFFF
> +#define SMMU_ARCH_MAX_EVENTS            (SMMU_ARCH_MAX_EVENT_ID + 1)
> +
> +#define SMMU_PA_SHIFT                   12
> +
> +static int cpuhp_state_num;
> +
> +struct smmu_pmu {
> +	bool sid_filter_global;
> +	struct hlist_node node;
> +	struct perf_event *events[SMMU_MAX_COUNTERS];
> +	DECLARE_BITMAP(used_counters, SMMU_MAX_COUNTERS);
> +	DECLARE_BITMAP(supported_events, SMMU_ARCH_MAX_EVENTS);
> +	unsigned int irq;
> +	unsigned int on_cpu;
> +	struct pmu pmu;
> +	unsigned int num_counters;
> +	struct device *dev;
> +	void __iomem *reg_base;
> +	void __iomem *reloc_base;
> +	u64 counter_present_mask;
> +	u64 counter_mask;
> +};
> +
> +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
> +
> +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end)        \
> +	static inline u32 get_##_name(struct perf_event *event)            \
> +	{                                                                  \
> +		return FIELD_GET(GENMASK_ULL(_end, _start),                \
> +				 event->attr._config);                     \
> +	}                                                                  \
> +
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
> +
> +static inline void smmu_pmu_enable(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +
> +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
> +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
> +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);

Should the interrupts be enabled before enabling the PMU?

> +}
> +
> +static inline void smmu_pmu_disable(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> +}
> +
> +static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
> +					      u32 idx, u64 value)
> +{
> +	if (smmu_pmu->counter_mask & BIT(32))
> +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
> +	else
> +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));

The arm64 IO macros use __force u32 and so it's probably OK to provide a 64 bit
value to writel. But you could use something like lower_32_bits for clarity.

> +}
> +
> +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	u64 value;
> +
> +	if (smmu_pmu->counter_mask & BIT(32))
> +		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
> +	else
> +		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
> +
> +	return value;
> +}
> +
> +static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
> +}
> +
> +static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> +}
> +
> +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
> +}
> +
> +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
> +					      u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> +}
> +
> +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
> +					u32 val)
> +{
> +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
> +}
> +
> +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
> +{
> +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
> +}
> +
> +static void smmu_pmu_event_update(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	u64 delta, prev, now;
> +	u32 idx = hwc->idx;
> +
> +	do {
> +		prev = local64_read(&hwc->prev_count);
> +		now = smmu_pmu_counter_get_value(smmu_pmu, idx);
> +	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
> +
> +	/* handle overflow. */
> +	delta = now - prev;
> +	delta &= smmu_pmu->counter_mask;
> +
> +	local64_add(delta, &event->count);
> +}
> +
> +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
> +				struct hw_perf_event *hwc)
> +{
> +	u32 idx = hwc->idx;
> +	u64 new;
> +
> +	/*
> +	 * We limit the max period to half the max counter value of the counter
> +	 * size, so that even in the case of extreme interrupt latency the
> +	 * counter will (hopefully) not wrap past its initial value.
> +	 */
> +	new = smmu_pmu->counter_mask >> 1;
> +
> +	local64_set(&hwc->prev_count, new);
> +	smmu_pmu_counter_set_value(smmu_pmu, idx, new);
> +}
> +
> +static void smmu_pmu_get_event_filter(struct perf_event *event, u32 *span,
> +				      u32 *stream_id)
> +{
> +	bool filter_en = !!get_filter_enable(event);
> +
> +	*span = filter_en ? get_filter_span(event) : SMMU_DEFAULT_FILTER_SPAN;
> +	*stream_id = filter_en ? get_filter_stream_id(event) :
> +				 SMMU_DEFAULT_FILTER_STREAM_ID;
> +}
> +
> +static bool smmu_pmu_event_filter_valid(struct smmu_pmu *smmu_pmu,
> +				  struct perf_event *event, int idx)
> +{
> +	u32 filter_span, filter_sid;
> +	u32 curr_span, curr_sid;
> +
> +	if (!idx  || !smmu_pmu->sid_filter_global)
> +		return true;
> +
> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> +
> +	/* Read the current global filter setting */
> +	curr_span = FIELD_GET(SMMU_PMCG_SID_SPAN_MASK,
> +			      readl(smmu_pmu->reg_base + SMMU_PMCG_EVTYPER0));
> +	curr_sid = readl(smmu_pmu->reg_base + SMMU_PMCG_SMR0);
> +
> +	if (filter_span != curr_span || filter_sid != curr_sid)
> +		return false;
> +
> +	return true;
> +}
> +
> +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
> +				  struct perf_event *event)
> +{
> +	unsigned int idx;
> +	unsigned int num_ctrs = smmu_pmu->num_counters;
> +
> +	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
> +	if (idx == num_ctrs)
> +		/* The counters are all in use. */
> +		return -EAGAIN;
> +
> +	if (!smmu_pmu_event_filter_valid(smmu_pmu, event, idx))
> +		return -EAGAIN;
> +
> +	set_bit(idx, smmu_pmu->used_counters);
> +
> +	return idx;
> +}
> +
> +/*
> + * Implementation of abstract pmu functionality required by
> + * the core perf events code.
> + */
> +
> +static int smmu_pmu_event_init(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct device *dev = smmu_pmu->dev;
> +	struct perf_event *sibling;
> +	u32 event_id;
> +
> +	if (event->attr.type != event->pmu->type)
> +		return -ENOENT;
> +
> +	if (hwc->sample_period) {
> +		dev_dbg(dev, "Sampling not supported\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	if (event->cpu < 0) {
> +		dev_dbg(dev, "Per-task mode not supported\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	/* We cannot filter accurately so we just don't allow it. */
> +	if (event->attr.exclude_user || event->attr.exclude_kernel ||
> +	    event->attr.exclude_hv || event->attr.exclude_idle) {
> +		dev_dbg(dev, "Can't exclude execution levels\n");
> +		return -EOPNOTSUPP;
> +	}

You should also test for attr.exclude_host and attr.exclude_guest, unless
it makes sense to stop counting these events when entering guest/host?

Also returning -EINVAL may be more desirable than -EOPNOTSUPP. If a user
makes use of the perf utility and attempts to filter one of the above
exclusion flags - then perf will retry opening the event but without some
of the unsupported flags, but it will only do this if you return -EINVAL.

(See line 1927 of tools/perf/util/evsel.c and run the perf stat tool with
the verbose flag to see this in action.)

> +
> +	/* Verify specified event is supported on this PMU */
> +	event_id = get_event(event);
> +	if (((event_id <= SMMU_ARCH_MAX_EVENT_ID) &&
> +	    (!test_bit(event_id, smmu_pmu->supported_events))) ||
> +	    (event_id > SMMU_IMPDEF_MAX_EVENT_ID)) {
> +		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
> +		return -EINVAL;
> +	}
> +
> +	/* Don't allow groups with mixed PMUs, except for s/w events */
> +	if (event->group_leader->pmu != event->pmu &&
> +	    !is_software_event(event->group_leader)) {
> +		dev_dbg(dev, "Can't create mixed PMU group\n");
> +		return -EINVAL;
> +	}
> +
> +	list_for_each_entry(sibling, &event->group_leader->sibling_list,
> +			    sibling_list)
> +		if (sibling->pmu != event->pmu &&
> +		    !is_software_event(sibling)) {
> +			dev_dbg(dev, "Can't create mixed PMU group\n");
> +			return -EINVAL;
> +		}
> +
> +	/* Ensure all events in a group are on the same cpu */
> +	if ((event->group_leader != event) &&
> +	    (event->cpu != event->group_leader->cpu)) {
> +		dev_dbg(dev, "Can't create group on CPUs %d and %d\n",
> +			event->cpu, event->group_leader->cpu);
> +		return -EINVAL;
> +	}
> +
> +	hwc->idx = -1;
> +
> +	/*
> +	 * Ensure all events are on the same cpu so all events are in the
> +	 * same cpu context, to avoid races on pmu_enable etc.
> +	 */
> +	event->cpu = smmu_pmu->on_cpu;
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_event_start(struct perf_event *event, int flags)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +	u32 filter_span, filter_sid;
> +	u32 evtyper;
> +
> +	hwc->state = 0;
> +
> +	smmu_pmu_set_period(smmu_pmu, hwc);
> +
> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> +
> +	evtyper = get_event(event) |
> +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
> +
> +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
> +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
> +	smmu_pmu_interrupt_enable(smmu_pmu, idx);
> +	smmu_pmu_counter_enable(smmu_pmu, idx);
> +}
> +
> +static void smmu_pmu_event_stop(struct perf_event *event, int flags)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +
> +	if (hwc->state & PERF_HES_STOPPED)
> +		return;
> +
> +	smmu_pmu_counter_disable(smmu_pmu, idx);

Is it intentional not to call smmu_pmu_interrupt_disable here? 

> +
> +	if (flags & PERF_EF_UPDATE)
> +		smmu_pmu_event_update(event);
> +	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;

Shouldn't we only set PERF_HES_UPTODATE if we've updated the count (e.g.
when PERF_EF_UPDATE was set?)

This may be a nit, but shouldn't smmu_pmu_event_update come before the
test for PERF_HES_STOPPED? I guess in the odd case where stop is called
without the PERF_EF_UPDATE flag (to stop the counter) and then called
a second time with PERF_EF_UPDATE to update the count.

> +}
> +
> +static int smmu_pmu_event_add(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +
> +	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
> +	if (idx < 0)
> +		return idx;
> +
> +	hwc->idx = idx;
> +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +	smmu_pmu->events[idx] = event;
> +	local64_set(&hwc->prev_count, 0);
> +
> +	if (flags & PERF_EF_START)
> +		smmu_pmu_event_start(event, flags);
> +
> +	/* Propagate changes to the userspace mapping. */
> +	perf_event_update_userpage(event);
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_event_del(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	int idx = hwc->idx;
> +
> +	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
> +	smmu_pmu->events[idx] = NULL;
> +	clear_bit(idx, smmu_pmu->used_counters);
> +
> +	perf_event_update_userpage(event);
> +}
> +
> +static void smmu_pmu_event_read(struct perf_event *event)
> +{
> +	smmu_pmu_event_update(event);
> +}
> +
> +/* cpumask */
> +
> +static ssize_t smmu_pmu_cpumask_show(struct device *dev,
> +				     struct device_attribute *attr,
> +				     char *buf)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> +
> +	return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
> +}
> +
> +static struct device_attribute smmu_pmu_cpumask_attr =
> +		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
> +
> +static struct attribute *smmu_pmu_cpumask_attrs[] = {
> +	&smmu_pmu_cpumask_attr.attr,
> +	NULL
> +};
> +
> +static struct attribute_group smmu_pmu_cpumask_group = {
> +	.attrs = smmu_pmu_cpumask_attrs,
> +};
> +
> +/* Events */
> +
> +ssize_t smmu_pmu_event_show(struct device *dev,
> +			    struct device_attribute *attr, char *page)
> +{
> +	struct perf_pmu_events_attr *pmu_attr;
> +
> +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
> +
> +	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
> +}
> +
> +#define SMMU_EVENT_ATTR(_name, _id)					  \
> +	(&((struct perf_pmu_events_attr[]) {				  \
> +		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \
> +		  .id = _id, }						  \
> +	})[0].attr.attr)

This is very similar to PMU_EVENT_ATTR, have you considered taking an approach
similar to the ARMV8_EVENT_ATTR macro that wraps up PMU_EVENT_ATTR. It may
be more readable if we define these attributes consistently across PMUs.

Thanks,

Andrew Murray

> +
> +static struct attribute *smmu_pmu_events[] = {
> +	SMMU_EVENT_ATTR(cycles, 0),
> +	SMMU_EVENT_ATTR(transaction, 1),
> +	SMMU_EVENT_ATTR(tlb_miss, 2),
> +	SMMU_EVENT_ATTR(config_cache_miss, 3),
> +	SMMU_EVENT_ATTR(trans_table_walk_access, 4),
> +	SMMU_EVENT_ATTR(config_struct_access, 5),
> +	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
> +	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
> +	NULL
> +};
> +
> +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
> +					 struct attribute *attr, int unused)
> +{
> +	struct device *dev = kobj_to_dev(kobj);
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> +	struct perf_pmu_events_attr *pmu_attr;
> +
> +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
> +
> +	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
> +		return attr->mode;
> +
> +	return 0;
> +}
> +static struct attribute_group smmu_pmu_events_group = {
> +	.name = "events",
> +	.attrs = smmu_pmu_events,
> +	.is_visible = smmu_pmu_event_is_visible,
> +};
> +
> +/* Formats */
> +PMU_FORMAT_ATTR(event,		   "config:0-15");
> +PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
> +PMU_FORMAT_ATTR(filter_span,	   "config1:32");
> +PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
> +
> +static struct attribute *smmu_pmu_formats[] = {
> +	&format_attr_event.attr,
> +	&format_attr_filter_stream_id.attr,
> +	&format_attr_filter_span.attr,
> +	&format_attr_filter_enable.attr,
> +	NULL
> +};
> +
> +static struct attribute_group smmu_pmu_format_group = {
> +	.name = "format",
> +	.attrs = smmu_pmu_formats,
> +};
> +
> +static const struct attribute_group *smmu_pmu_attr_grps[] = {
> +	&smmu_pmu_cpumask_group,
> +	&smmu_pmu_events_group,
> +	&smmu_pmu_format_group,
> +	NULL
> +};
> +
> +/*
> + * Generic device handlers
> + */
> +
> +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
> +{
> +	struct smmu_pmu *smmu_pmu;
> +	unsigned int target;
> +
> +	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
> +	if (cpu != smmu_pmu->on_cpu)
> +		return 0;
> +
> +	target = cpumask_any_but(cpu_online_mask, cpu);
> +	if (target >= nr_cpu_ids)
> +		return 0;
> +
> +	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
> +	smmu_pmu->on_cpu = target;
> +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
> +
> +	return 0;
> +}
> +
> +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
> +{
> +	struct smmu_pmu *smmu_pmu = data;
> +	u64 ovsr;
> +	unsigned int idx;
> +
> +	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
> +	if (!ovsr)
> +		return IRQ_NONE;
> +
> +	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> +
> +	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
> +		struct perf_event *event = smmu_pmu->events[idx];
> +		struct hw_perf_event *hwc;
> +
> +		if (WARN_ON_ONCE(!event))
> +			continue;
> +
> +		smmu_pmu_event_update(event);
> +		hwc = &event->hw;
> +
> +		smmu_pmu_set_period(smmu_pmu, hwc);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
> +{
> +	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
> +	int irq, ret = -ENXIO;
> +
> +	irq = pmu->irq;
> +	if (irq)
> +		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
> +				       flags, "smmuv3-pmu", pmu);
> +	return ret;
> +}
> +
> +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
> +{
> +	smmu_pmu_disable(&smmu_pmu->pmu);
> +
> +	/* Disable counter and interrupt */
> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> +}
> +
> +static int smmu_pmu_probe(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu;
> +	struct resource *res_0, *res_1;
> +	u32 cfgr, reg_size;
> +	u64 ceid_64[2];
> +	int irq, err;
> +	char *name;
> +	struct device *dev = &pdev->dev;
> +
> +	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
> +	if (!smmu_pmu)
> +		return -ENOMEM;
> +
> +	smmu_pmu->dev = dev;
> +	platform_set_drvdata(pdev, smmu_pmu);
> +
> +	smmu_pmu->pmu = (struct pmu) {
> +		.task_ctx_nr    = perf_invalid_context,
> +		.pmu_enable	= smmu_pmu_enable,
> +		.pmu_disable	= smmu_pmu_disable,
> +		.event_init	= smmu_pmu_event_init,
> +		.add		= smmu_pmu_event_add,
> +		.del		= smmu_pmu_event_del,
> +		.start		= smmu_pmu_event_start,
> +		.stop		= smmu_pmu_event_stop,
> +		.read		= smmu_pmu_event_read,
> +		.attr_groups	= smmu_pmu_attr_grps,
> +	};
> +
> +	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
> +	if (IS_ERR(smmu_pmu->reg_base))
> +		return PTR_ERR(smmu_pmu->reg_base);
> +
> +	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
> +
> +	/* Determine if page 1 is present */
> +	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
> +		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
> +		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
> +		if (IS_ERR(smmu_pmu->reloc_base))
> +			return PTR_ERR(smmu_pmu->reloc_base);
> +	} else {
> +		smmu_pmu->reloc_base = smmu_pmu->reg_base;
> +	}
> +
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq > 0)
> +		smmu_pmu->irq = irq;
> +
> +	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
> +	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
> +	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
> +			  SMMU_ARCH_MAX_EVENTS);
> +
> +	smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;
> +	smmu_pmu->counter_present_mask = GENMASK(smmu_pmu->num_counters - 1, 0);
> +
> +	smmu_pmu->sid_filter_global = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
> +
> +	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);
> +	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
> +
> +	smmu_pmu_reset(smmu_pmu);
> +
> +	err = smmu_pmu_setup_irq(smmu_pmu);
> +	if (err) {
> +		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
> +		return err;
> +	}
> +
> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
> +			      (res_0->start) >> SMMU_PA_SHIFT);
> +	if (!name) {
> +		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
> +		return -EINVAL;
> +	}
> +
> +	/* Pick one CPU to be the preferred one to use */
> +	smmu_pmu->on_cpu = get_cpu();
> +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
> +
> +	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
> +					       &smmu_pmu->node);
> +	if (err) {
> +		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
> +			err, &res_0->start);
> +		goto out_cpuhp_err;
> +	}
> +
> +	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
> +	if (err) {
> +		dev_err(dev, "Error %d registering PMU @%pa\n",
> +			err, &res_0->start);
> +		goto out_unregister;
> +	}
> +
> +	put_cpu();
> +	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
> +		 &res_0->start, smmu_pmu->num_counters,
> +		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :
> +		 "Individual");
> +
> +	return 0;
> +
> +out_unregister:
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
> +out_cpuhp_err:
> +	put_cpu();
> +	return err;
> +}
> +
> +static int smmu_pmu_remove(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> +
> +	perf_pmu_unregister(&smmu_pmu->pmu);
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_shutdown(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> +
> +	smmu_pmu_disable(&smmu_pmu->pmu);
> +}
> +
> +static struct platform_driver smmu_pmu_driver = {
> +	.driver = {
> +		.name = "arm-smmu-v3-pmu",
> +	},
> +	.probe = smmu_pmu_probe,
> +	.remove = smmu_pmu_remove,
> +	.shutdown = smmu_pmu_shutdown,
> +};
> +
> +static int __init arm_smmu_pmu_init(void)
> +{
> +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> +				      "perf/arm/pmcg:online",
> +				      NULL,
> +				      smmu_pmu_offline_cpu);
> +	if (cpuhp_state_num < 0)
> +		return cpuhp_state_num;
> +
> +	return platform_driver_register(&smmu_pmu_driver);
> +}
> +module_init(arm_smmu_pmu_init);
> +
> +static void __exit arm_smmu_pmu_exit(void)
> +{
> +	platform_driver_unregister(&smmu_pmu_driver);
> +	cpuhp_remove_multi_state(cpuhp_state_num);
> +}
> +
> +module_exit(arm_smmu_pmu_exit);
> +
> +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
> +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
> +MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
> +MODULE_LICENSE("GPL v2");
> -- 
> 2.7.4
> 
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Shameerali Kolothum Thodi - Jan. 23, 2019, 11:02 a.m.
Hi Andrew,

Thanks for taking a look at this.

> -----Original Message-----
> From: Andrew Murray [mailto:andrew.murray@arm.com]
> Sent: 22 January 2019 16:24
> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Cc: lorenzo.pieralisi@arm.com; robin.murphy@arm.com;
> mark.rutland@arm.com; vkilari@codeaurora.org; neil.m.leeder@gmail.com;
> jean-philippe.brucker@arm.com; pabba@codeaurora.org; John Garry
> <john.garry@huawei.com>; will.deacon@arm.com; rruigrok@codeaurora.org;
> Linuxarm <linuxarm@huawei.com>; linux-kernel@vger.kernel.org;
> linux-acpi@vger.kernel.org; Guohanjun (Hanjun Guo)
> <guohanjun@huawei.com>; linux-arm-kernel@lists.infradead.org
> Subject: Re: [PATCH v5 2/4] perf: add arm64 smmuv3 pmu driver
> 
> On Fri, Nov 30, 2018 at 03:47:49PM +0000, Shameer Kolothum wrote:
> > From: Neil Leeder <nleeder@codeaurora.org>
> >
> > Adds a new driver to support the SMMUv3 PMU and add it into the
> > perf events framework.
> >
> > Each SMMU node may have multiple PMUs associated with it, each of
> > which may support different events.
> >
> > SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page>
> where
> > <phys_addr_page> is the physical page address of the SMMU PMCG
> > wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is
> > named smmuv3_pmcg_ff88840
> >
> > Filtering by stream id is done by specifying filtering parameters
> > with the event. options are:
> >    filter_enable    - 0 = no filtering, 1 = filtering enabled
> >    filter_span      - 0 = exact match, 1 = pattern match
> >    filter_stream_id - pattern to filter against
> >
> > Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
> >                        filter_span=1,filter_stream_id=0x42/ -a netperf
> >
> > Applies filter pattern 0x42 to transaction events, which means events
> > matching stream ids 0x42 & 0x43 are counted as only upper StreamID
> > bits are required to match the given filter. Further filtering
> > information is available in the SMMU documentation.
> >
> > SMMU events are not attributable to a CPU, so task mode and sampling
> > are not supported.
> >
> > Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> > ---
> >  drivers/perf/Kconfig          |   9 +
> >  drivers/perf/Makefile         |   1 +
> >  drivers/perf/arm_smmuv3_pmu.c | 778
> ++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 788 insertions(+)
> >  create mode 100644 drivers/perf/arm_smmuv3_pmu.c
> >
> > diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> > index 08ebaf7..92be6a3 100644
> > --- a/drivers/perf/Kconfig
> > +++ b/drivers/perf/Kconfig
> > @@ -52,6 +52,15 @@ config ARM_PMU_ACPI
> >  	depends on ARM_PMU && ACPI
> >  	def_bool y
> >
> > +config ARM_SMMU_V3_PMU
> > +	 bool "ARM SMMUv3 Performance Monitors Extension"
> > +	 depends on ARM64 && ACPI && ARM_SMMU_V3
> > +	   help
> > +	   Provides support for the SMMU version 3 performance monitor unit
> (PMU)
> > +	   on ARM-based systems.
> > +	   Adds the SMMU PMU into the perf events subsystem for
> > +	   monitoring SMMU performance events.
> > +
> >  config ARM_DSU_PMU
> >  	tristate "ARM DynamIQ Shared Unit (DSU) PMU"
> >  	depends on ARM64
> > diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
> > index b3902bd..f10a932 100644
> > --- a/drivers/perf/Makefile
> > +++ b/drivers/perf/Makefile
> > @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
> >  obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
> >  obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
> >  obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
> > +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
> >  obj-$(CONFIG_HISI_PMU) += hisilicon/
> >  obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
> >  obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
> > diff --git a/drivers/perf/arm_smmuv3_pmu.c
> b/drivers/perf/arm_smmuv3_pmu.c
> > new file mode 100644
> > index 0000000..fb9dcd8
> > --- /dev/null
> > +++ b/drivers/perf/arm_smmuv3_pmu.c
> > @@ -0,0 +1,778 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +
> > +/*
> > + * This driver adds support for perf events to use the Performance
> > + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
> > + * to monitor that node.
> > + *
> > + * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page>
> where
> > + * <phys_addr_page> is the physical page address of the SMMU PMCG
> wrapped
> > + * to 4K boundary. For example, the PMCG at 0xff88840000 is named
> > + * smmuv3_pmcg_ff88840
> > + *
> > + * Filtering by stream id is done by specifying filtering parameters
> > + * with the event. options are:
> > + *   filter_enable    - 0 = no filtering, 1 = filtering enabled
> > + *   filter_span      - 0 = exact match, 1 = pattern match
> > + *   filter_stream_id - pattern to filter against
> > + *
> > + * To match a partial StreamID where the X most-significant bits must match
> > + * but the Y least-significant bits might differ, STREAMID is programmed
> > + * with a value that contains:
> > + *  STREAMID[Y - 1] == 0.
> > + *  STREAMID[Y - 2:0] == 1 (where Y > 1).
> > + * The remainder of implemented bits of STREAMID (X bits, from bit Y
> upwards)
> > + * contain a value to match from the corresponding bits of event StreamID.
> > + *
> > + * Example: perf stat -e
> smmuv3_pmcg_ff88840/transaction,filter_enable=1,
> > + *                    filter_span=1,filter_stream_id=0x42/ -a netperf
> > + * Applies filter pattern 0x42 to transaction events, which means events
> > + * matching stream ids 0x42 and 0x43 are counted. Further filtering
> > + * information is available in the SMMU documentation.
> > + *
> > + * SMMU events are not attributable to a CPU, so task mode and sampling
> > + * are not supported.
> > + */
> > +
> > +#include <linux/acpi.h>
> > +#include <linux/bitfield.h>
> > +#include <linux/bitops.h>
> > +#include <linux/cpuhotplug.h>
> > +#include <linux/cpumask.h>
> > +#include <linux/device.h>
> > +#include <linux/errno.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/irq.h>
> > +#include <linux/kernel.h>
> > +#include <linux/list.h>
> > +#include <linux/msi.h>
> > +#include <linux/perf_event.h>
> > +#include <linux/platform_device.h>
> > +#include <linux/smp.h>
> > +#include <linux/sysfs.h>
> > +#include <linux/types.h>
> > +
> > +#define SMMU_PMCG_EVCNTR0               0x0
> > +#define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 +
> (n) * (stride))
> > +#define SMMU_PMCG_EVTYPER0              0x400
> > +#define SMMU_PMCG_EVTYPER(n)
> (SMMU_PMCG_EVTYPER0 + (n) * 4)
> > +#define SMMU_PMCG_SID_SPAN_SHIFT        29
> > +#define SMMU_PMCG_SID_SPAN_MASK         GENMASK(29, 29)
> > +#define SMMU_PMCG_SMR0                  0xA00
> > +#define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 +
> (n) * 4)
> > +#define SMMU_PMCG_CNTENSET0             0xC00
> > +#define SMMU_PMCG_CNTENCLR0             0xC20
> > +#define SMMU_PMCG_INTENSET0             0xC40
> > +#define SMMU_PMCG_INTENCLR0             0xC60
> > +#define SMMU_PMCG_OVSCLR0               0xC80
> > +#define SMMU_PMCG_OVSSET0               0xCC0
> > +#define SMMU_PMCG_CFGR                  0xE00
> > +#define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)
> > +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)
> > +#define SMMU_PMCG_CFGR_SIZE_MASK        GENMASK(13, 8)
> > +#define SMMU_PMCG_CFGR_NCTR_MASK        GENMASK(5, 0)
> > +#define SMMU_PMCG_CR                    0xE04
> > +#define SMMU_PMCG_CR_ENABLE             BIT(0)
> > +#define SMMU_PMCG_CEID0                 0xE20
> > +#define SMMU_PMCG_CEID1                 0xE28
> > +#define SMMU_PMCG_IRQ_CTRL              0xE50
> > +#define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)
> > +#define SMMU_PMCG_IRQ_CFG0              0xE58
> > +
> > +#define SMMU_DEFAULT_FILTER_SPAN        1
> > +#define SMMU_DEFAULT_FILTER_STREAM_ID   GENMASK(31, 0)
> > +
> > +#define SMMU_MAX_COUNTERS               64
> > +#define SMMU_ARCH_MAX_EVENT_ID          0x7F
> > +#define SMMU_IMPDEF_MAX_EVENT_ID        0xFFFF
> > +#define SMMU_ARCH_MAX_EVENTS
> (SMMU_ARCH_MAX_EVENT_ID + 1)
> > +
> > +#define SMMU_PA_SHIFT                   12
> > +
> > +static int cpuhp_state_num;
> > +
> > +struct smmu_pmu {
> > +	bool sid_filter_global;
> > +	struct hlist_node node;
> > +	struct perf_event *events[SMMU_MAX_COUNTERS];
> > +	DECLARE_BITMAP(used_counters, SMMU_MAX_COUNTERS);
> > +	DECLARE_BITMAP(supported_events, SMMU_ARCH_MAX_EVENTS);
> > +	unsigned int irq;
> > +	unsigned int on_cpu;
> > +	struct pmu pmu;
> > +	unsigned int num_counters;
> > +	struct device *dev;
> > +	void __iomem *reg_base;
> > +	void __iomem *reloc_base;
> > +	u64 counter_present_mask;
> > +	u64 counter_mask;
> > +};
> > +
> > +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
> > +
> > +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start,
> _end)        \
> > +	static inline u32 get_##_name(struct perf_event *event)            \
> > +
> 	{
>        \
> > +		return FIELD_GET(GENMASK_ULL(_end, _start),                \
> > +				 event->attr._config);                     \
> > +	}
> \
> > +
> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
> > +
> > +static inline void smmu_pmu_enable(struct pmu *pmu)
> > +{
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> > +
> > +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base +
> SMMU_PMCG_CR);
> > +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
> > +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> 
> Should the interrupts be enabled before enabling the PMU?

Yes, it make sense to do so.

> > +}
> > +
> > +static inline void smmu_pmu_disable(struct pmu *pmu)
> > +{
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> > +
> > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
> > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> > +}
> > +
> > +static inline void smmu_pmu_counter_set_value(struct smmu_pmu
> *smmu_pmu,
> > +					      u32 idx, u64 value)
> > +{
> > +	if (smmu_pmu->counter_mask & BIT(32))
> > +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> 8));
> > +	else
> > +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> 4));
> 
> The arm64 IO macros use __force u32 and so it's probably OK to provide a 64
> bit
> value to writel. But you could use something like lower_32_bits for clarity.

Yes, macro uses __force u32. I will change it to make it more clear though.

> > +}
> > +
> > +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu
> *smmu_pmu, u32 idx)
> > +{
> > +	u64 value;
> > +
> > +	if (smmu_pmu->counter_mask & BIT(32))
> > +		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> 8));
> > +	else
> > +		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> 4));
> > +
> > +	return value;
> > +}
> > +
> > +static inline void smmu_pmu_counter_enable(struct smmu_pmu
> *smmu_pmu, u32 idx)
> > +{
> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
> > +}
> > +
> > +static inline void smmu_pmu_counter_disable(struct smmu_pmu
> *smmu_pmu, u32 idx)
> > +{
> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> > +}
> > +
> > +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu
> *smmu_pmu, u32 idx)
> > +{
> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
> > +}
> > +
> > +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu
> *smmu_pmu,
> > +					      u32 idx)
> > +{
> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> > +}
> > +
> > +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu,
> u32 idx,
> > +					u32 val)
> > +{
> > +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
> > +}
> > +
> > +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32
> idx, u32 val)
> > +{
> > +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
> > +}
> > +
> > +static void smmu_pmu_event_update(struct perf_event *event)
> > +{
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +	u64 delta, prev, now;
> > +	u32 idx = hwc->idx;
> > +
> > +	do {
> > +		prev = local64_read(&hwc->prev_count);
> > +		now = smmu_pmu_counter_get_value(smmu_pmu, idx);
> > +	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
> > +
> > +	/* handle overflow. */
> > +	delta = now - prev;
> > +	delta &= smmu_pmu->counter_mask;
> > +
> > +	local64_add(delta, &event->count);
> > +}
> > +
> > +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
> > +				struct hw_perf_event *hwc)
> > +{
> > +	u32 idx = hwc->idx;
> > +	u64 new;
> > +
> > +	/*
> > +	 * We limit the max period to half the max counter value of the counter
> > +	 * size, so that even in the case of extreme interrupt latency the
> > +	 * counter will (hopefully) not wrap past its initial value.
> > +	 */
> > +	new = smmu_pmu->counter_mask >> 1;
> > +
> > +	local64_set(&hwc->prev_count, new);
> > +	smmu_pmu_counter_set_value(smmu_pmu, idx, new);
> > +}
> > +
> > +static void smmu_pmu_get_event_filter(struct perf_event *event, u32
> *span,
> > +				      u32 *stream_id)
> > +{
> > +	bool filter_en = !!get_filter_enable(event);
> > +
> > +	*span = filter_en ? get_filter_span(event) :
> SMMU_DEFAULT_FILTER_SPAN;
> > +	*stream_id = filter_en ? get_filter_stream_id(event) :
> > +				 SMMU_DEFAULT_FILTER_STREAM_ID;
> > +}
> > +
> > +static bool smmu_pmu_event_filter_valid(struct smmu_pmu *smmu_pmu,
> > +				  struct perf_event *event, int idx)
> > +{
> > +	u32 filter_span, filter_sid;
> > +	u32 curr_span, curr_sid;
> > +
> > +	if (!idx  || !smmu_pmu->sid_filter_global)
> > +		return true;
> > +
> > +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> > +
> > +	/* Read the current global filter setting */
> > +	curr_span = FIELD_GET(SMMU_PMCG_SID_SPAN_MASK,
> > +			      readl(smmu_pmu->reg_base +
> SMMU_PMCG_EVTYPER0));
> > +	curr_sid = readl(smmu_pmu->reg_base + SMMU_PMCG_SMR0);
> > +
> > +	if (filter_span != curr_span || filter_sid != curr_sid)
> > +		return false;
> > +
> > +	return true;
> > +}
> > +
> > +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
> > +				  struct perf_event *event)
> > +{
> > +	unsigned int idx;
> > +	unsigned int num_ctrs = smmu_pmu->num_counters;
> > +
> > +	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
> > +	if (idx == num_ctrs)
> > +		/* The counters are all in use. */
> > +		return -EAGAIN;
> > +
> > +	if (!smmu_pmu_event_filter_valid(smmu_pmu, event, idx))
> > +		return -EAGAIN;
> > +
> > +	set_bit(idx, smmu_pmu->used_counters);
> > +
> > +	return idx;
> > +}
> > +
> > +/*
> > + * Implementation of abstract pmu functionality required by
> > + * the core perf events code.
> > + */
> > +
> > +static int smmu_pmu_event_init(struct perf_event *event)
> > +{
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +	struct device *dev = smmu_pmu->dev;
> > +	struct perf_event *sibling;
> > +	u32 event_id;
> > +
> > +	if (event->attr.type != event->pmu->type)
> > +		return -ENOENT;
> > +
> > +	if (hwc->sample_period) {
> > +		dev_dbg(dev, "Sampling not supported\n");
> > +		return -EOPNOTSUPP;
> > +	}
> > +
> > +	if (event->cpu < 0) {
> > +		dev_dbg(dev, "Per-task mode not supported\n");
> > +		return -EOPNOTSUPP;
> > +	}
> > +
> > +	/* We cannot filter accurately so we just don't allow it. */
> > +	if (event->attr.exclude_user || event->attr.exclude_kernel ||
> > +	    event->attr.exclude_hv || event->attr.exclude_idle) {
> > +		dev_dbg(dev, "Can't exclude execution levels\n");
> > +		return -EOPNOTSUPP;
> > +	}
> 
> You should also test for attr.exclude_host and attr.exclude_guest, unless
> it makes sense to stop counting these events when entering guest/host?

TBH, I haven't considered that scenario yet and don't think we have to stop the 
counters. I will check that.

> 
> Also returning -EINVAL may be more desirable than -EOPNOTSUPP. If a user
> makes use of the perf utility and attempts to filter one of the above
> exclusion flags - then perf will retry opening the event but without some
> of the unsupported flags, but it will only do this if you return -EINVAL.
> 
> (See line 1927 of tools/perf/util/evsel.c and run the perf stat tool with
> the verbose flag to see this in action.)

Ok. I will take a look.

> > +
> > +	/* Verify specified event is supported on this PMU */
> > +	event_id = get_event(event);
> > +	if (((event_id <= SMMU_ARCH_MAX_EVENT_ID) &&
> > +	    (!test_bit(event_id, smmu_pmu->supported_events))) ||
> > +	    (event_id > SMMU_IMPDEF_MAX_EVENT_ID)) {
> > +		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* Don't allow groups with mixed PMUs, except for s/w events */
> > +	if (event->group_leader->pmu != event->pmu &&
> > +	    !is_software_event(event->group_leader)) {
> > +		dev_dbg(dev, "Can't create mixed PMU group\n");
> > +		return -EINVAL;
> > +	}
> > +
> > +	list_for_each_entry(sibling, &event->group_leader->sibling_list,
> > +			    sibling_list)
> > +		if (sibling->pmu != event->pmu &&
> > +		    !is_software_event(sibling)) {
> > +			dev_dbg(dev, "Can't create mixed PMU group\n");
> > +			return -EINVAL;
> > +		}
> > +
> > +	/* Ensure all events in a group are on the same cpu */
> > +	if ((event->group_leader != event) &&
> > +	    (event->cpu != event->group_leader->cpu)) {
> > +		dev_dbg(dev, "Can't create group on CPUs %d and %d\n",
> > +			event->cpu, event->group_leader->cpu);
> > +		return -EINVAL;
> > +	}
> > +
> > +	hwc->idx = -1;
> > +
> > +	/*
> > +	 * Ensure all events are on the same cpu so all events are in the
> > +	 * same cpu context, to avoid races on pmu_enable etc.
> > +	 */
> > +	event->cpu = smmu_pmu->on_cpu;
> > +
> > +	return 0;
> > +}
> > +
> > +static void smmu_pmu_event_start(struct perf_event *event, int flags)
> > +{
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	int idx = hwc->idx;
> > +	u32 filter_span, filter_sid;
> > +	u32 evtyper;
> > +
> > +	hwc->state = 0;
> > +
> > +	smmu_pmu_set_period(smmu_pmu, hwc);
> > +
> > +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> > +
> > +	evtyper = get_event(event) |
> > +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
> > +
> > +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
> > +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
> > +	smmu_pmu_interrupt_enable(smmu_pmu, idx);
> > +	smmu_pmu_counter_enable(smmu_pmu, idx);
> > +}
> > +
> > +static void smmu_pmu_event_stop(struct perf_event *event, int flags)
> > +{
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	int idx = hwc->idx;
> > +
> > +	if (hwc->state & PERF_HES_STOPPED)
> > +		return;
> > +
> > +	smmu_pmu_counter_disable(smmu_pmu, idx);
> 
> Is it intentional not to call smmu_pmu_interrupt_disable here?

Yes, it is. Earlier patch had the interrupt toggling and Robin pointed out that
it is not really needed as counters are anyway stopped and explicitly disabling
may not solve the spurious interrupt case as well.

> > +
> > +	if (flags & PERF_EF_UPDATE)
> > +		smmu_pmu_event_update(event);
> > +	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> 
> Shouldn't we only set PERF_HES_UPTODATE if we've updated the count (e.g.
> when PERF_EF_UPDATE was set?)

Hmm..I had a look at couple of other drivers(arm-cci.c/arm_dsu_pmu.c etc) and those 
ones always call update ignoring PERF_EF_UPDATE and sets PERF_HES_UPTODATE. I
think the reason being the counter is always reprogrammed on start. Do you think
changing to that logic is a better way?

> This may be a nit, but shouldn't smmu_pmu_event_update come before the
> test for PERF_HES_STOPPED? I guess in the odd case where stop is called
> without the PERF_EF_UPDATE flag (to stop the counter) and then called
> a second time with PERF_EF_UPDATE to update the count.

Not sure such a scenario exists. I think most of the ones I checked has the logic -its
already stopped, nothing to do. 

> > +}
> > +
> > +static int smmu_pmu_event_add(struct perf_event *event, int flags)
> > +{
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	int idx;
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +
> > +	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
> > +	if (idx < 0)
> > +		return idx;
> > +
> > +	hwc->idx = idx;
> > +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> > +	smmu_pmu->events[idx] = event;
> > +	local64_set(&hwc->prev_count, 0);
> > +
> > +	if (flags & PERF_EF_START)
> > +		smmu_pmu_event_start(event, flags);
> > +
> > +	/* Propagate changes to the userspace mapping. */
> > +	perf_event_update_userpage(event);
> > +
> > +	return 0;
> > +}
> > +
> > +static void smmu_pmu_event_del(struct perf_event *event, int flags)
> > +{
> > +	struct hw_perf_event *hwc = &event->hw;
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > +	int idx = hwc->idx;
> > +
> > +	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
> > +	smmu_pmu->events[idx] = NULL;
> > +	clear_bit(idx, smmu_pmu->used_counters);
> > +
> > +	perf_event_update_userpage(event);
> > +}
> > +
> > +static void smmu_pmu_event_read(struct perf_event *event)
> > +{
> > +	smmu_pmu_event_update(event);
> > +}
> > +
> > +/* cpumask */
> > +
> > +static ssize_t smmu_pmu_cpumask_show(struct device *dev,
> > +				     struct device_attribute *attr,
> > +				     char *buf)
> > +{
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> > +
> > +	return cpumap_print_to_pagebuf(true, buf,
> cpumask_of(smmu_pmu->on_cpu));
> > +}
> > +
> > +static struct device_attribute smmu_pmu_cpumask_attr =
> > +		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
> > +
> > +static struct attribute *smmu_pmu_cpumask_attrs[] = {
> > +	&smmu_pmu_cpumask_attr.attr,
> > +	NULL
> > +};
> > +
> > +static struct attribute_group smmu_pmu_cpumask_group = {
> > +	.attrs = smmu_pmu_cpumask_attrs,
> > +};
> > +
> > +/* Events */
> > +
> > +ssize_t smmu_pmu_event_show(struct device *dev,
> > +			    struct device_attribute *attr, char *page)
> > +{
> > +	struct perf_pmu_events_attr *pmu_attr;
> > +
> > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
> > +
> > +	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
> > +}
> > +
> > +#define SMMU_EVENT_ATTR(_name, _id)					  \
> > +	(&((struct perf_pmu_events_attr[]) {				  \
> > +		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \
> > +		  .id = _id, }						  \
> > +	})[0].attr.attr)
> 
> This is very similar to PMU_EVENT_ATTR, have you considered taking an
> approach
> similar to the ARMV8_EVENT_ATTR macro that wraps up PMU_EVENT_ATTR.
> It may
> be more readable if we define these attributes consistently across PMUs.

Ok. I will take a look at that.

Thanks,
Shameer

> Thanks,
> 
> Andrew Murray
> 
> > +
> > +static struct attribute *smmu_pmu_events[] = {
> > +	SMMU_EVENT_ATTR(cycles, 0),
> > +	SMMU_EVENT_ATTR(transaction, 1),
> > +	SMMU_EVENT_ATTR(tlb_miss, 2),
> > +	SMMU_EVENT_ATTR(config_cache_miss, 3),
> > +	SMMU_EVENT_ATTR(trans_table_walk_access, 4),
> > +	SMMU_EVENT_ATTR(config_struct_access, 5),
> > +	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
> > +	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
> > +	NULL
> > +};
> > +
> > +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
> > +					 struct attribute *attr, int unused)
> > +{
> > +	struct device *dev = kobj_to_dev(kobj);
> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> > +	struct perf_pmu_events_attr *pmu_attr;
> > +
> > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
> > +
> > +	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
> > +		return attr->mode;
> > +
> > +	return 0;
> > +}
> > +static struct attribute_group smmu_pmu_events_group = {
> > +	.name = "events",
> > +	.attrs = smmu_pmu_events,
> > +	.is_visible = smmu_pmu_event_is_visible,
> > +};
> > +
> > +/* Formats */
> > +PMU_FORMAT_ATTR(event,		   "config:0-15");
> > +PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
> > +PMU_FORMAT_ATTR(filter_span,	   "config1:32");
> > +PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
> > +
> > +static struct attribute *smmu_pmu_formats[] = {
> > +	&format_attr_event.attr,
> > +	&format_attr_filter_stream_id.attr,
> > +	&format_attr_filter_span.attr,
> > +	&format_attr_filter_enable.attr,
> > +	NULL
> > +};
> > +
> > +static struct attribute_group smmu_pmu_format_group = {
> > +	.name = "format",
> > +	.attrs = smmu_pmu_formats,
> > +};
> > +
> > +static const struct attribute_group *smmu_pmu_attr_grps[] = {
> > +	&smmu_pmu_cpumask_group,
> > +	&smmu_pmu_events_group,
> > +	&smmu_pmu_format_group,
> > +	NULL
> > +};
> > +
> > +/*
> > + * Generic device handlers
> > + */
> > +
> > +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
> > +{
> > +	struct smmu_pmu *smmu_pmu;
> > +	unsigned int target;
> > +
> > +	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
> > +	if (cpu != smmu_pmu->on_cpu)
> > +		return 0;
> > +
> > +	target = cpumask_any_but(cpu_online_mask, cpu);
> > +	if (target >= nr_cpu_ids)
> > +		return 0;
> > +
> > +	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
> > +	smmu_pmu->on_cpu = target;
> > +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
> > +
> > +	return 0;
> > +}
> > +
> > +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
> > +{
> > +	struct smmu_pmu *smmu_pmu = data;
> > +	u64 ovsr;
> > +	unsigned int idx;
> > +
> > +	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
> > +	if (!ovsr)
> > +		return IRQ_NONE;
> > +
> > +	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> > +
> > +	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters)
> {
> > +		struct perf_event *event = smmu_pmu->events[idx];
> > +		struct hw_perf_event *hwc;
> > +
> > +		if (WARN_ON_ONCE(!event))
> > +			continue;
> > +
> > +		smmu_pmu_event_update(event);
> > +		hwc = &event->hw;
> > +
> > +		smmu_pmu_set_period(smmu_pmu, hwc);
> > +	}
> > +
> > +	return IRQ_HANDLED;
> > +}
> > +
> > +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
> > +{
> > +	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED |
> IRQF_NO_THREAD;
> > +	int irq, ret = -ENXIO;
> > +
> > +	irq = pmu->irq;
> > +	if (irq)
> > +		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
> > +				       flags, "smmuv3-pmu", pmu);
> > +	return ret;
> > +}
> > +
> > +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
> > +{
> > +	smmu_pmu_disable(&smmu_pmu->pmu);
> > +
> > +	/* Disable counter and interrupt */
> > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > +		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > +		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > +		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> > +}
> > +
> > +static int smmu_pmu_probe(struct platform_device *pdev)
> > +{
> > +	struct smmu_pmu *smmu_pmu;
> > +	struct resource *res_0, *res_1;
> > +	u32 cfgr, reg_size;
> > +	u64 ceid_64[2];
> > +	int irq, err;
> > +	char *name;
> > +	struct device *dev = &pdev->dev;
> > +
> > +	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
> > +	if (!smmu_pmu)
> > +		return -ENOMEM;
> > +
> > +	smmu_pmu->dev = dev;
> > +	platform_set_drvdata(pdev, smmu_pmu);
> > +
> > +	smmu_pmu->pmu = (struct pmu) {
> > +		.task_ctx_nr    = perf_invalid_context,
> > +		.pmu_enable	= smmu_pmu_enable,
> > +		.pmu_disable	= smmu_pmu_disable,
> > +		.event_init	= smmu_pmu_event_init,
> > +		.add		= smmu_pmu_event_add,
> > +		.del		= smmu_pmu_event_del,
> > +		.start		= smmu_pmu_event_start,
> > +		.stop		= smmu_pmu_event_stop,
> > +		.read		= smmu_pmu_event_read,
> > +		.attr_groups	= smmu_pmu_attr_grps,
> > +	};
> > +
> > +	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> > +	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
> > +	if (IS_ERR(smmu_pmu->reg_base))
> > +		return PTR_ERR(smmu_pmu->reg_base);
> > +
> > +	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
> > +
> > +	/* Determine if page 1 is present */
> > +	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
> > +		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
> > +		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
> > +		if (IS_ERR(smmu_pmu->reloc_base))
> > +			return PTR_ERR(smmu_pmu->reloc_base);
> > +	} else {
> > +		smmu_pmu->reloc_base = smmu_pmu->reg_base;
> > +	}
> > +
> > +	irq = platform_get_irq(pdev, 0);
> > +	if (irq > 0)
> > +		smmu_pmu->irq = irq;
> > +
> > +	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base +
> SMMU_PMCG_CEID0);
> > +	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base +
> SMMU_PMCG_CEID1);
> > +	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
> > +			  SMMU_ARCH_MAX_EVENTS);
> > +
> > +	smmu_pmu->num_counters =
> FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;
> > +	smmu_pmu->counter_present_mask =
> GENMASK(smmu_pmu->num_counters - 1, 0);
> > +
> > +	smmu_pmu->sid_filter_global = !!(cfgr &
> SMMU_PMCG_CFGR_SID_FILTER_TYPE);
> > +
> > +	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);
> > +	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
> > +
> > +	smmu_pmu_reset(smmu_pmu);
> > +
> > +	err = smmu_pmu_setup_irq(smmu_pmu);
> > +	if (err) {
> > +		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
> > +		return err;
> > +	}
> > +
> > +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
> "smmuv3_pmcg_%llx",
> > +			      (res_0->start) >> SMMU_PA_SHIFT);
> > +	if (!name) {
> > +		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* Pick one CPU to be the preferred one to use */
> > +	smmu_pmu->on_cpu = get_cpu();
> > +	WARN_ON(irq_set_affinity(smmu_pmu->irq,
> cpumask_of(smmu_pmu->on_cpu)));
> > +
> > +	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
> > +					       &smmu_pmu->node);
> > +	if (err) {
> > +		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
> > +			err, &res_0->start);
> > +		goto out_cpuhp_err;
> > +	}
> > +
> > +	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
> > +	if (err) {
> > +		dev_err(dev, "Error %d registering PMU @%pa\n",
> > +			err, &res_0->start);
> > +		goto out_unregister;
> > +	}
> > +
> > +	put_cpu();
> > +	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter
> settings\n",
> > +		 &res_0->start, smmu_pmu->num_counters,
> > +		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :
> > +		 "Individual");
> > +
> > +	return 0;
> > +
> > +out_unregister:
> > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,
> &smmu_pmu->node);
> > +out_cpuhp_err:
> > +	put_cpu();
> > +	return err;
> > +}
> > +
> > +static int smmu_pmu_remove(struct platform_device *pdev)
> > +{
> > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> > +
> > +	perf_pmu_unregister(&smmu_pmu->pmu);
> > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,
> &smmu_pmu->node);
> > +
> > +	return 0;
> > +}
> > +
> > +static void smmu_pmu_shutdown(struct platform_device *pdev)
> > +{
> > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> > +
> > +	smmu_pmu_disable(&smmu_pmu->pmu);
> > +}
> > +
> > +static struct platform_driver smmu_pmu_driver = {
> > +	.driver = {
> > +		.name = "arm-smmu-v3-pmu",
> > +	},
> > +	.probe = smmu_pmu_probe,
> > +	.remove = smmu_pmu_remove,
> > +	.shutdown = smmu_pmu_shutdown,
> > +};
> > +
> > +static int __init arm_smmu_pmu_init(void)
> > +{
> > +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> > +				      "perf/arm/pmcg:online",
> > +				      NULL,
> > +				      smmu_pmu_offline_cpu);
> > +	if (cpuhp_state_num < 0)
> > +		return cpuhp_state_num;
> > +
> > +	return platform_driver_register(&smmu_pmu_driver);
> > +}
> > +module_init(arm_smmu_pmu_init);
> > +
> > +static void __exit arm_smmu_pmu_exit(void)
> > +{
> > +	platform_driver_unregister(&smmu_pmu_driver);
> > +	cpuhp_remove_multi_state(cpuhp_state_num);
> > +}
> > +
> > +module_exit(arm_smmu_pmu_exit);
> > +
> > +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance
> Monitors Extension");
> > +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
> > +MODULE_AUTHOR("Shameer Kolothum
> <shameerali.kolothum.thodi@huawei.com>");
> > +MODULE_LICENSE("GPL v2");
> > --
> > 2.7.4
> >
> >
> >
> > _______________________________________________
> > linux-arm-kernel mailing list
> > linux-arm-kernel@lists.infradead.org
> > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Andrew Murray - Jan. 23, 2019, 12:14 p.m.
On Wed, Jan 23, 2019 at 11:02:48AM +0000, Shameerali Kolothum Thodi wrote:
> Hi Andrew,
> 
> Thanks for taking a look at this.
> 
> > > From: Neil Leeder <nleeder@codeaurora.org>
> > >
> > > Adds a new driver to support the SMMUv3 PMU and add it into the
> > > perf events framework.
> > >
> > > Each SMMU node may have multiple PMUs associated with it, each of
> > > which may support different events.
> > >
> > > SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page>
> > where
> > > <phys_addr_page> is the physical page address of the SMMU PMCG
> > > wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is
> > > named smmuv3_pmcg_ff88840
> > >
> > > Filtering by stream id is done by specifying filtering parameters
> > > with the event. options are:
> > >    filter_enable    - 0 = no filtering, 1 = filtering enabled
> > >    filter_span      - 0 = exact match, 1 = pattern match
> > >    filter_stream_id - pattern to filter against
> > >
> > > Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
> > >                        filter_span=1,filter_stream_id=0x42/ -a netperf
> > >
> > > Applies filter pattern 0x42 to transaction events, which means events
> > > matching stream ids 0x42 & 0x43 are counted as only upper StreamID
> > > bits are required to match the given filter. Further filtering
> > > information is available in the SMMU documentation.
> > >
> > > SMMU events are not attributable to a CPU, so task mode and sampling
> > > are not supported.
> > >
> > > Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
> > > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> > > ---
> > >  drivers/perf/Kconfig          |   9 +
> > >  drivers/perf/Makefile         |   1 +
> > >  drivers/perf/arm_smmuv3_pmu.c | 778
> > ++++++++++++++++++++++++++++++++++++++++++
> > >  3 files changed, 788 insertions(+)
> > >  create mode 100644 drivers/perf/arm_smmuv3_pmu.c
> > >

> > > +
> > > +static inline void smmu_pmu_enable(struct pmu *pmu)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> > > +
> > > +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base +
> > SMMU_PMCG_CR);
> > > +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
> > > +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> > 
> > Should the interrupts be enabled before enabling the PMU?
> 
> Yes, it make sense to do so.
> 
> > > +}
> > > +
> > > +static inline void smmu_pmu_disable(struct pmu *pmu)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> > > +
> > > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
> > > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> > > +}
> > > +
> > > +static inline void smmu_pmu_counter_set_value(struct smmu_pmu
> > *smmu_pmu,
> > > +					      u32 idx, u64 value)
> > > +{
> > > +	if (smmu_pmu->counter_mask & BIT(32))
> > > +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> > 8));
> > > +	else
> > > +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
> > 4));
> > 
> > The arm64 IO macros use __force u32 and so it's probably OK to provide a 64
> > bit
> > value to writel. But you could use something like lower_32_bits for clarity.
> 
> Yes, macro uses __force u32. I will change it to make it more clear though.
> 

> > > +
> > > +/*
> > > + * Implementation of abstract pmu functionality required by
> > > + * the core perf events code.
> > > + */
> > > +
> > > +static int smmu_pmu_event_init(struct perf_event *event)
> > > +{
> > > +	struct hw_perf_event *hwc = &event->hw;
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > > +	struct device *dev = smmu_pmu->dev;
> > > +	struct perf_event *sibling;
> > > +	u32 event_id;
> > > +
> > > +	if (event->attr.type != event->pmu->type)
> > > +		return -ENOENT;
> > > +
> > > +	if (hwc->sample_period) {
> > > +		dev_dbg(dev, "Sampling not supported\n");
> > > +		return -EOPNOTSUPP;
> > > +	}
> > > +
> > > +	if (event->cpu < 0) {
> > > +		dev_dbg(dev, "Per-task mode not supported\n");
> > > +		return -EOPNOTSUPP;
> > > +	}
> > > +
> > > +	/* We cannot filter accurately so we just don't allow it. */
> > > +	if (event->attr.exclude_user || event->attr.exclude_kernel ||
> > > +	    event->attr.exclude_hv || event->attr.exclude_idle) {
> > > +		dev_dbg(dev, "Can't exclude execution levels\n");
> > > +		return -EOPNOTSUPP;
> > > +	}
> > 
> > You should also test for attr.exclude_host and attr.exclude_guest, unless
> > it makes sense to stop counting these events when entering guest/host?
> 
> TBH, I haven't considered that scenario yet and don't think we have to stop the 
> counters. I will check that.

I would suggest returning an error if the exclude_guest / exclude_host flags
are set then - this is quite common across PMU drivers.

> 
> > 
> > Also returning -EINVAL may be more desirable than -EOPNOTSUPP. If a user
> > makes use of the perf utility and attempts to filter one of the above
> > exclusion flags - then perf will retry opening the event but without some
> > of the unsupported flags, but it will only do this if you return -EINVAL.
> > 
> > (See line 1927 of tools/perf/util/evsel.c and run the perf stat tool with
> > the verbose flag to see this in action.)
> 
> Ok. I will take a look.
> 

> > > +
> > > +static void smmu_pmu_event_start(struct perf_event *event, int flags)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > > +	struct hw_perf_event *hwc = &event->hw;
> > > +	int idx = hwc->idx;
> > > +	u32 filter_span, filter_sid;
> > > +	u32 evtyper;
> > > +
> > > +	hwc->state = 0;
> > > +
> > > +	smmu_pmu_set_period(smmu_pmu, hwc);
> > > +
> > > +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> > > +
> > > +	evtyper = get_event(event) |
> > > +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
> > > +
> > > +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
> > > +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
> > > +	smmu_pmu_interrupt_enable(smmu_pmu, idx);
> > > +	smmu_pmu_counter_enable(smmu_pmu, idx);
> > > +}
> > > +
> > > +static void smmu_pmu_event_stop(struct perf_event *event, int flags)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > > +	struct hw_perf_event *hwc = &event->hw;
> > > +	int idx = hwc->idx;
> > > +
> > > +	if (hwc->state & PERF_HES_STOPPED)
> > > +		return;
> > > +
> > > +	smmu_pmu_counter_disable(smmu_pmu, idx);
> > 
> > Is it intentional not to call smmu_pmu_interrupt_disable here?
> 
> Yes, it is. Earlier patch had the interrupt toggling and Robin pointed out that
> it is not really needed as counters are anyway stopped and explicitly disabling
> may not solve the spurious interrupt case as well.
> 

Ah apologies for not seeing that in earlier reviews.

> > > +
> > > +	if (flags & PERF_EF_UPDATE)
> > > +		smmu_pmu_event_update(event);
> > > +	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> > 
> > Shouldn't we only set PERF_HES_UPTODATE if we've updated the count (e.g.
> > when PERF_EF_UPDATE was set?)
> 
> Hmm..I had a look at couple of other drivers(arm-cci.c/arm_dsu_pmu.c etc) and those 
> ones always call update ignoring PERF_EF_UPDATE and sets PERF_HES_UPTODATE. I
> think the reason being the counter is always reprogrammed on start. Do you think
> changing to that logic is a better way?

If I recall correctly it was not possible to stop some counters on earlier
ARM PMUs, as a result when _stop is called it calls _update (otherwise
you lose the value as the counter continues counting). I wonder if other
PMUs have copied this pattern despite not having that limitation?

As you write the counter value in your _start then I guess you probably
should call _update in your _stop.  Perhaps you can add a comment to explain.

> 
> > This may be a nit, but shouldn't smmu_pmu_event_update come before the
> > test for PERF_HES_STOPPED? I guess in the odd case where stop is called
> > without the PERF_EF_UPDATE flag (to stop the counter) and then called
> > a second time with PERF_EF_UPDATE to update the count.
> 
> Not sure such a scenario exists. I think most of the ones I checked has the logic -its
> already stopped, nothing to do. 
> 
> > > +}
> > > +
> > > +static int smmu_pmu_event_add(struct perf_event *event, int flags)
> > > +{
> > > +	struct hw_perf_event *hwc = &event->hw;
> > > +	int idx;
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > > +
> > > +	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
> > > +	if (idx < 0)
> > > +		return idx;
> > > +
> > > +	hwc->idx = idx;
> > > +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> > > +	smmu_pmu->events[idx] = event;
> > > +	local64_set(&hwc->prev_count, 0);
> > > +
> > > +	if (flags & PERF_EF_START)
> > > +		smmu_pmu_event_start(event, flags);
> > > +
> > > +	/* Propagate changes to the userspace mapping. */
> > > +	perf_event_update_userpage(event);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static void smmu_pmu_event_del(struct perf_event *event, int flags)
> > > +{
> > > +	struct hw_perf_event *hwc = &event->hw;
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> > > +	int idx = hwc->idx;
> > > +
> > > +	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
> > > +	smmu_pmu->events[idx] = NULL;
> > > +	clear_bit(idx, smmu_pmu->used_counters);
> > > +
> > > +	perf_event_update_userpage(event);
> > > +}
> > > +
> > > +static void smmu_pmu_event_read(struct perf_event *event)
> > > +{
> > > +	smmu_pmu_event_update(event);
> > > +}
> > > +
> > > +/* cpumask */
> > > +
> > > +static ssize_t smmu_pmu_cpumask_show(struct device *dev,
> > > +				     struct device_attribute *attr,
> > > +				     char *buf)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> > > +
> > > +	return cpumap_print_to_pagebuf(true, buf,
> > cpumask_of(smmu_pmu->on_cpu));
> > > +}
> > > +
> > > +static struct device_attribute smmu_pmu_cpumask_attr =
> > > +		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
> > > +
> > > +static struct attribute *smmu_pmu_cpumask_attrs[] = {
> > > +	&smmu_pmu_cpumask_attr.attr,
> > > +	NULL
> > > +};
> > > +
> > > +static struct attribute_group smmu_pmu_cpumask_group = {
> > > +	.attrs = smmu_pmu_cpumask_attrs,
> > > +};
> > > +
> > > +/* Events */
> > > +
> > > +ssize_t smmu_pmu_event_show(struct device *dev,
> > > +			    struct device_attribute *attr, char *page)
> > > +{
> > > +	struct perf_pmu_events_attr *pmu_attr;
> > > +
> > > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
> > > +
> > > +	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
> > > +}
> > > +
> > > +#define SMMU_EVENT_ATTR(_name, _id)					  \
> > > +	(&((struct perf_pmu_events_attr[]) {				  \
> > > +		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \
> > > +		  .id = _id, }						  \
> > > +	})[0].attr.attr)
> > 
> > This is very similar to PMU_EVENT_ATTR, have you considered taking an
> > approach
> > similar to the ARMV8_EVENT_ATTR macro that wraps up PMU_EVENT_ATTR.
> > It may
> > be more readable if we define these attributes consistently across PMUs.
> 
> Ok. I will take a look at that.

Thanks,

Andrew Murray

> 
> Thanks,
> Shameer
> 
> > Thanks,
> > 
> > Andrew Murray
> > 
> > > +
> > > +static struct attribute *smmu_pmu_events[] = {
> > > +	SMMU_EVENT_ATTR(cycles, 0),
> > > +	SMMU_EVENT_ATTR(transaction, 1),
> > > +	SMMU_EVENT_ATTR(tlb_miss, 2),
> > > +	SMMU_EVENT_ATTR(config_cache_miss, 3),
> > > +	SMMU_EVENT_ATTR(trans_table_walk_access, 4),
> > > +	SMMU_EVENT_ATTR(config_struct_access, 5),
> > > +	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
> > > +	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
> > > +	NULL
> > > +};
> > > +
> > > +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
> > > +					 struct attribute *attr, int unused)
> > > +{
> > > +	struct device *dev = kobj_to_dev(kobj);
> > > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> > > +	struct perf_pmu_events_attr *pmu_attr;
> > > +
> > > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
> > > +
> > > +	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
> > > +		return attr->mode;
> > > +
> > > +	return 0;
> > > +}
> > > +static struct attribute_group smmu_pmu_events_group = {
> > > +	.name = "events",
> > > +	.attrs = smmu_pmu_events,
> > > +	.is_visible = smmu_pmu_event_is_visible,
> > > +};
> > > +
> > > +/* Formats */
> > > +PMU_FORMAT_ATTR(event,		   "config:0-15");
> > > +PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
> > > +PMU_FORMAT_ATTR(filter_span,	   "config1:32");
> > > +PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
> > > +
> > > +static struct attribute *smmu_pmu_formats[] = {
> > > +	&format_attr_event.attr,
> > > +	&format_attr_filter_stream_id.attr,
> > > +	&format_attr_filter_span.attr,
> > > +	&format_attr_filter_enable.attr,
> > > +	NULL
> > > +};
> > > +
> > > +static struct attribute_group smmu_pmu_format_group = {
> > > +	.name = "format",
> > > +	.attrs = smmu_pmu_formats,
> > > +};
> > > +
> > > +static const struct attribute_group *smmu_pmu_attr_grps[] = {
> > > +	&smmu_pmu_cpumask_group,
> > > +	&smmu_pmu_events_group,
> > > +	&smmu_pmu_format_group,
> > > +	NULL
> > > +};
> > > +
> > > +/*
> > > + * Generic device handlers
> > > + */
> > > +
> > > +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu;
> > > +	unsigned int target;
> > > +
> > > +	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
> > > +	if (cpu != smmu_pmu->on_cpu)
> > > +		return 0;
> > > +
> > > +	target = cpumask_any_but(cpu_online_mask, cpu);
> > > +	if (target >= nr_cpu_ids)
> > > +		return 0;
> > > +
> > > +	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
> > > +	smmu_pmu->on_cpu = target;
> > > +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = data;
> > > +	u64 ovsr;
> > > +	unsigned int idx;
> > > +
> > > +	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
> > > +	if (!ovsr)
> > > +		return IRQ_NONE;
> > > +
> > > +	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> > > +
> > > +	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters)
> > {
> > > +		struct perf_event *event = smmu_pmu->events[idx];
> > > +		struct hw_perf_event *hwc;
> > > +
> > > +		if (WARN_ON_ONCE(!event))
> > > +			continue;
> > > +
> > > +		smmu_pmu_event_update(event);
> > > +		hwc = &event->hw;
> > > +
> > > +		smmu_pmu_set_period(smmu_pmu, hwc);
> > > +	}
> > > +
> > > +	return IRQ_HANDLED;
> > > +}
> > > +
> > > +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
> > > +{
> > > +	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED |
> > IRQF_NO_THREAD;
> > > +	int irq, ret = -ENXIO;
> > > +
> > > +	irq = pmu->irq;
> > > +	if (irq)
> > > +		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
> > > +				       flags, "smmuv3-pmu", pmu);
> > > +	return ret;
> > > +}
> > > +
> > > +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
> > > +{
> > > +	smmu_pmu_disable(&smmu_pmu->pmu);
> > > +
> > > +	/* Disable counter and interrupt */
> > > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > > +		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> > > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > > +		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> > > +	writeq_relaxed(smmu_pmu->counter_present_mask,
> > > +		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> > > +}
> > > +
> > > +static int smmu_pmu_probe(struct platform_device *pdev)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu;
> > > +	struct resource *res_0, *res_1;
> > > +	u32 cfgr, reg_size;
> > > +	u64 ceid_64[2];
> > > +	int irq, err;
> > > +	char *name;
> > > +	struct device *dev = &pdev->dev;
> > > +
> > > +	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
> > > +	if (!smmu_pmu)
> > > +		return -ENOMEM;
> > > +
> > > +	smmu_pmu->dev = dev;
> > > +	platform_set_drvdata(pdev, smmu_pmu);
> > > +
> > > +	smmu_pmu->pmu = (struct pmu) {
> > > +		.task_ctx_nr    = perf_invalid_context,
> > > +		.pmu_enable	= smmu_pmu_enable,
> > > +		.pmu_disable	= smmu_pmu_disable,
> > > +		.event_init	= smmu_pmu_event_init,
> > > +		.add		= smmu_pmu_event_add,
> > > +		.del		= smmu_pmu_event_del,
> > > +		.start		= smmu_pmu_event_start,
> > > +		.stop		= smmu_pmu_event_stop,
> > > +		.read		= smmu_pmu_event_read,
> > > +		.attr_groups	= smmu_pmu_attr_grps,
> > > +	};
> > > +
> > > +	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> > > +	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
> > > +	if (IS_ERR(smmu_pmu->reg_base))
> > > +		return PTR_ERR(smmu_pmu->reg_base);
> > > +
> > > +	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
> > > +
> > > +	/* Determine if page 1 is present */
> > > +	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
> > > +		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
> > > +		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
> > > +		if (IS_ERR(smmu_pmu->reloc_base))
> > > +			return PTR_ERR(smmu_pmu->reloc_base);
> > > +	} else {
> > > +		smmu_pmu->reloc_base = smmu_pmu->reg_base;
> > > +	}
> > > +
> > > +	irq = platform_get_irq(pdev, 0);
> > > +	if (irq > 0)
> > > +		smmu_pmu->irq = irq;
> > > +
> > > +	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base +
> > SMMU_PMCG_CEID0);
> > > +	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base +
> > SMMU_PMCG_CEID1);
> > > +	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
> > > +			  SMMU_ARCH_MAX_EVENTS);
> > > +
> > > +	smmu_pmu->num_counters =
> > FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;
> > > +	smmu_pmu->counter_present_mask =
> > GENMASK(smmu_pmu->num_counters - 1, 0);
> > > +
> > > +	smmu_pmu->sid_filter_global = !!(cfgr &
> > SMMU_PMCG_CFGR_SID_FILTER_TYPE);
> > > +
> > > +	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);
> > > +	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
> > > +
> > > +	smmu_pmu_reset(smmu_pmu);
> > > +
> > > +	err = smmu_pmu_setup_irq(smmu_pmu);
> > > +	if (err) {
> > > +		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
> > > +		return err;
> > > +	}
> > > +
> > > +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
> > "smmuv3_pmcg_%llx",
> > > +			      (res_0->start) >> SMMU_PA_SHIFT);
> > > +	if (!name) {
> > > +		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* Pick one CPU to be the preferred one to use */
> > > +	smmu_pmu->on_cpu = get_cpu();
> > > +	WARN_ON(irq_set_affinity(smmu_pmu->irq,
> > cpumask_of(smmu_pmu->on_cpu)));
> > > +
> > > +	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
> > > +					       &smmu_pmu->node);
> > > +	if (err) {
> > > +		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
> > > +			err, &res_0->start);
> > > +		goto out_cpuhp_err;
> > > +	}
> > > +
> > > +	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
> > > +	if (err) {
> > > +		dev_err(dev, "Error %d registering PMU @%pa\n",
> > > +			err, &res_0->start);
> > > +		goto out_unregister;
> > > +	}
> > > +
> > > +	put_cpu();
> > > +	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter
> > settings\n",
> > > +		 &res_0->start, smmu_pmu->num_counters,
> > > +		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :
> > > +		 "Individual");
> > > +
> > > +	return 0;
> > > +
> > > +out_unregister:
> > > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,
> > &smmu_pmu->node);
> > > +out_cpuhp_err:
> > > +	put_cpu();
> > > +	return err;
> > > +}
> > > +
> > > +static int smmu_pmu_remove(struct platform_device *pdev)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> > > +
> > > +	perf_pmu_unregister(&smmu_pmu->pmu);
> > > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,
> > &smmu_pmu->node);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static void smmu_pmu_shutdown(struct platform_device *pdev)
> > > +{
> > > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> > > +
> > > +	smmu_pmu_disable(&smmu_pmu->pmu);
> > > +}
> > > +
> > > +static struct platform_driver smmu_pmu_driver = {
> > > +	.driver = {
> > > +		.name = "arm-smmu-v3-pmu",
> > > +	},
> > > +	.probe = smmu_pmu_probe,
> > > +	.remove = smmu_pmu_remove,
> > > +	.shutdown = smmu_pmu_shutdown,
> > > +};
> > > +
> > > +static int __init arm_smmu_pmu_init(void)
> > > +{
> > > +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> > > +				      "perf/arm/pmcg:online",
> > > +				      NULL,
> > > +				      smmu_pmu_offline_cpu);
> > > +	if (cpuhp_state_num < 0)
> > > +		return cpuhp_state_num;
> > > +
> > > +	return platform_driver_register(&smmu_pmu_driver);
> > > +}
> > > +module_init(arm_smmu_pmu_init);
> > > +
> > > +static void __exit arm_smmu_pmu_exit(void)
> > > +{
> > > +	platform_driver_unregister(&smmu_pmu_driver);
> > > +	cpuhp_remove_multi_state(cpuhp_state_num);
> > > +}
> > > +
> > > +module_exit(arm_smmu_pmu_exit);
> > > +
> > > +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance
> > Monitors Extension");
> > > +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
> > > +MODULE_AUTHOR("Shameer Kolothum
> > <shameerali.kolothum.thodi@huawei.com>");
> > > +MODULE_LICENSE("GPL v2");
> > > --
> > > 2.7.4
> > >
> > >
> > >
> > > _______________________________________________
> > > linux-arm-kernel mailing list
> > > linux-arm-kernel@lists.infradead.org
> > > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Robin Murphy - Jan. 24, 2019, 6:25 p.m.
On 23/01/2019 12:14, Andrew Murray wrote:
[...]
>>>> +static inline void smmu_pmu_counter_set_value(struct smmu_pmu
>>> *smmu_pmu,
>>>> +					      u32 idx, u64 value)
>>>> +{
>>>> +	if (smmu_pmu->counter_mask & BIT(32))
>>>> +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
>>> 8));
>>>> +	else
>>>> +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
>>> 4));
>>>
>>> The arm64 IO macros use __force u32 and so it's probably OK to provide a 64
>>> bit
>>> value to writel. But you could use something like lower_32_bits for clarity.
>>
>> Yes, macro uses __force u32. I will change it to make it more clear though.

To be pedantic, the first cast which the value actually undergoes is to 
(__u32) ;)

Ultimately, __raw_writel() takes a u32, so in that sense it's never a 
problem to pass a u64, since unsigned truncation is well-defined in the 
C standard. The casting involved in the I/O accessors is mostly about 
going to an endian-specific type and back again.

[...]
>>>> +static void smmu_pmu_event_start(struct perf_event *event, int flags)
>>>> +{
>>>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
>>>> +	struct hw_perf_event *hwc = &event->hw;
>>>> +	int idx = hwc->idx;
>>>> +	u32 filter_span, filter_sid;
>>>> +	u32 evtyper;
>>>> +
>>>> +	hwc->state = 0;
>>>> +
>>>> +	smmu_pmu_set_period(smmu_pmu, hwc);
>>>> +
>>>> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
>>>> +
>>>> +	evtyper = get_event(event) |
>>>> +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
>>>> +
>>>> +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
>>>> +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
>>>> +	smmu_pmu_interrupt_enable(smmu_pmu, idx);
>>>> +	smmu_pmu_counter_enable(smmu_pmu, idx);
>>>> +}
>>>> +
>>>> +static void smmu_pmu_event_stop(struct perf_event *event, int flags)
>>>> +{
>>>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
>>>> +	struct hw_perf_event *hwc = &event->hw;
>>>> +	int idx = hwc->idx;
>>>> +
>>>> +	if (hwc->state & PERF_HES_STOPPED)
>>>> +		return;
>>>> +
>>>> +	smmu_pmu_counter_disable(smmu_pmu, idx);
>>>
>>> Is it intentional not to call smmu_pmu_interrupt_disable here?
>>
>> Yes, it is. Earlier patch had the interrupt toggling and Robin pointed out that
>> it is not really needed as counters are anyway stopped and explicitly disabling
>> may not solve the spurious interrupt case as well.
>>
> 
> Ah apologies for not seeing that in earlier reviews.

Hmm, I didn't exactly mean "keep enabling it every time an event is 
restarted and never disable it anywhere", though. I was thinking more 
along the lines of enabling in event_add() and disabling in event_del() 
(i.e. effectively tying it to allocation and deallocation of the counter).

Robin.
Shameerali Kolothum Thodi - Jan. 25, 2019, 9:22 a.m.
> -----Original Message-----

> From: Robin Murphy [mailto:robin.murphy@arm.com]

> Sent: 24 January 2019 18:25

> To: Andrew Murray <andrew.murray@arm.com>; Shameerali Kolothum Thodi

> <shameerali.kolothum.thodi@huawei.com>

> Cc: lorenzo.pieralisi@arm.com; mark.rutland@arm.com;

> vkilari@codeaurora.org; neil.m.leeder@gmail.com;

> jean-philippe.brucker@arm.com; pabba@codeaurora.org; John Garry

> <john.garry@huawei.com>; will.deacon@arm.com; rruigrok@codeaurora.org;

> Linuxarm <linuxarm@huawei.com>; linux-kernel@vger.kernel.org;

> linux-acpi@vger.kernel.org; Guohanjun (Hanjun Guo)

> <guohanjun@huawei.com>; linux-arm-kernel@lists.infradead.org

> Subject: Re: [PATCH v5 2/4] perf: add arm64 smmuv3 pmu driver

> 

> On 23/01/2019 12:14, Andrew Murray wrote:

> [...]

> >>>> +static inline void smmu_pmu_counter_set_value(struct smmu_pmu

> >>> *smmu_pmu,

> >>>> +					      u32 idx, u64 value)

> >>>> +{

> >>>> +	if (smmu_pmu->counter_mask & BIT(32))

> >>>> +		writeq(value, smmu_pmu->reloc_base +

> SMMU_PMCG_EVCNTR(idx,

> >>> 8));

> >>>> +	else

> >>>> +		writel(value, smmu_pmu->reloc_base +

> SMMU_PMCG_EVCNTR(idx,

> >>> 4));

> >>>

> >>> The arm64 IO macros use __force u32 and so it's probably OK to provide a

> 64

> >>> bit

> >>> value to writel. But you could use something like lower_32_bits for clarity.

> >>

> >> Yes, macro uses __force u32. I will change it to make it more clear though.

> 

> To be pedantic, the first cast which the value actually undergoes is to

> (__u32) ;)

> 

> Ultimately, __raw_writel() takes a u32, so in that sense it's never a

> problem to pass a u64, since unsigned truncation is well-defined in the

> C standard. The casting involved in the I/O accessors is mostly about

> going to an endian-specific type and back again.

> 

> [...]

> >>>> +static void smmu_pmu_event_start(struct perf_event *event, int flags)

> >>>> +{

> >>>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> >>>> +	struct hw_perf_event *hwc = &event->hw;

> >>>> +	int idx = hwc->idx;

> >>>> +	u32 filter_span, filter_sid;

> >>>> +	u32 evtyper;

> >>>> +

> >>>> +	hwc->state = 0;

> >>>> +

> >>>> +	smmu_pmu_set_period(smmu_pmu, hwc);

> >>>> +

> >>>> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);

> >>>> +

> >>>> +	evtyper = get_event(event) |

> >>>> +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;

> >>>> +

> >>>> +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);

> >>>> +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);

> >>>> +	smmu_pmu_interrupt_enable(smmu_pmu, idx);

> >>>> +	smmu_pmu_counter_enable(smmu_pmu, idx);

> >>>> +}

> >>>> +

> >>>> +static void smmu_pmu_event_stop(struct perf_event *event, int flags)

> >>>> +{

> >>>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> >>>> +	struct hw_perf_event *hwc = &event->hw;

> >>>> +	int idx = hwc->idx;

> >>>> +

> >>>> +	if (hwc->state & PERF_HES_STOPPED)

> >>>> +		return;

> >>>> +

> >>>> +	smmu_pmu_counter_disable(smmu_pmu, idx);

> >>>

> >>> Is it intentional not to call smmu_pmu_interrupt_disable here?

> >>

> >> Yes, it is. Earlier patch had the interrupt toggling and Robin pointed out that

> >> it is not really needed as counters are anyway stopped and explicitly

> disabling

> >> may not solve the spurious interrupt case as well.

> >>

> >

> > Ah apologies for not seeing that in earlier reviews.

> 

> Hmm, I didn't exactly mean "keep enabling it every time an event is

> restarted and never disable it anywhere", though. I was thinking more

> along the lines of enabling in event_add() and disabling in event_del()

> (i.e. effectively tying it to allocation and deallocation of the counter).

> 


Right. I missed the point that it was not disabled at all!. I will rearrange it 
to _add/_del path. 

Thanks for all the comments. I am planning to send out a v6 soon. 
Between, did you get a chance to take a look at patch #4 (HiSilicon erratum one) ? 
Appreciate if you could take a look and let me know before v6.

Thanks,
Shameer
Robin Murphy - Jan. 25, 2019, 3:13 p.m.
On 30/11/2018 15:47, Shameer Kolothum wrote:
> From: Neil Leeder <nleeder@codeaurora.org>
> 
> Adds a new driver to support the SMMUv3 PMU and add it into the
> perf events framework.
> 
> Each SMMU node may have multiple PMUs associated with it, each of
> which may support different events.
> 
> SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
> <phys_addr_page> is the physical page address of the SMMU PMCG
> wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is
> named smmuv3_pmcg_ff88840
> 
> Filtering by stream id is done by specifying filtering parameters
> with the event. options are:
>     filter_enable    - 0 = no filtering, 1 = filtering enabled
>     filter_span      - 0 = exact match, 1 = pattern match
>     filter_stream_id - pattern to filter against
> 
> Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
>                         filter_span=1,filter_stream_id=0x42/ -a netperf
> 
> Applies filter pattern 0x42 to transaction events, which means events
> matching stream ids 0x42 & 0x43 are counted as only upper StreamID
> bits are required to match the given filter. Further filtering
> information is available in the SMMU documentation.
> 
> SMMU events are not attributable to a CPU, so task mode and sampling
> are not supported.
> 
> Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> ---
>   drivers/perf/Kconfig          |   9 +
>   drivers/perf/Makefile         |   1 +
>   drivers/perf/arm_smmuv3_pmu.c | 778 ++++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 788 insertions(+)
>   create mode 100644 drivers/perf/arm_smmuv3_pmu.c
> 
> diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> index 08ebaf7..92be6a3 100644
> --- a/drivers/perf/Kconfig
> +++ b/drivers/perf/Kconfig
> @@ -52,6 +52,15 @@ config ARM_PMU_ACPI
>   	depends on ARM_PMU && ACPI
>   	def_bool y
>   
> +config ARM_SMMU_V3_PMU
> +	 bool "ARM SMMUv3 Performance Monitors Extension"
> +	 depends on ARM64 && ACPI && ARM_SMMU_V3
> +	   help
> +	   Provides support for the SMMU version 3 performance monitor unit (PMU)
> +	   on ARM-based systems.
> +	   Adds the SMMU PMU into the perf events subsystem for
> +	   monitoring SMMU performance events.
> +
>   config ARM_DSU_PMU
>   	tristate "ARM DynamIQ Shared Unit (DSU) PMU"
>   	depends on ARM64
> diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
> index b3902bd..f10a932 100644
> --- a/drivers/perf/Makefile
> +++ b/drivers/perf/Makefile
> @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
>   obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
>   obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
>   obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
> +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
>   obj-$(CONFIG_HISI_PMU) += hisilicon/
>   obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
>   obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
> diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
> new file mode 100644
> index 0000000..fb9dcd8
> --- /dev/null
> +++ b/drivers/perf/arm_smmuv3_pmu.c
> @@ -0,0 +1,778 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * This driver adds support for perf events to use the Performance
> + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
> + * to monitor that node.
> + *
> + * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
> + * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
> + * to 4K boundary. For example, the PMCG at 0xff88840000 is named
> + * smmuv3_pmcg_ff88840
> + *
> + * Filtering by stream id is done by specifying filtering parameters
> + * with the event. options are:
> + *   filter_enable    - 0 = no filtering, 1 = filtering enabled
> + *   filter_span      - 0 = exact match, 1 = pattern match
> + *   filter_stream_id - pattern to filter against
> + *
> + * To match a partial StreamID where the X most-significant bits must match
> + * but the Y least-significant bits might differ, STREAMID is programmed
> + * with a value that contains:
> + *  STREAMID[Y - 1] == 0.
> + *  STREAMID[Y - 2:0] == 1 (where Y > 1).
> + * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
> + * contain a value to match from the corresponding bits of event StreamID.
> + *
> + * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
> + *                    filter_span=1,filter_stream_id=0x42/ -a netperf
> + * Applies filter pattern 0x42 to transaction events, which means events
> + * matching stream ids 0x42 and 0x43 are counted. Further filtering
> + * information is available in the SMMU documentation.
> + *
> + * SMMU events are not attributable to a CPU, so task mode and sampling
> + * are not supported.
> + */
> +
> +#include <linux/acpi.h>
> +#include <linux/bitfield.h>
> +#include <linux/bitops.h>
> +#include <linux/cpuhotplug.h>
> +#include <linux/cpumask.h>
> +#include <linux/device.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/kernel.h>
> +#include <linux/list.h>
> +#include <linux/msi.h>
> +#include <linux/perf_event.h>
> +#include <linux/platform_device.h>
> +#include <linux/smp.h>
> +#include <linux/sysfs.h>
> +#include <linux/types.h>
> +
> +#define SMMU_PMCG_EVCNTR0               0x0
> +#define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 + (n) * (stride))
> +#define SMMU_PMCG_EVTYPER0              0x400
> +#define SMMU_PMCG_EVTYPER(n)            (SMMU_PMCG_EVTYPER0 + (n) * 4)
> +#define SMMU_PMCG_SID_SPAN_SHIFT        29
> +#define SMMU_PMCG_SID_SPAN_MASK         GENMASK(29, 29)

Surely just:

#define SMMU_PMCG_SID_SPAN	BIT(29)

(or maybe even SMMU_PMCG_EVTYPER_SID_SPAN for clarity)?

> +#define SMMU_PMCG_SMR0                  0xA00
> +#define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 + (n) * 4)
> +#define SMMU_PMCG_CNTENSET0             0xC00
> +#define SMMU_PMCG_CNTENCLR0             0xC20
> +#define SMMU_PMCG_INTENSET0             0xC40
> +#define SMMU_PMCG_INTENCLR0             0xC60
> +#define SMMU_PMCG_OVSCLR0               0xC80
> +#define SMMU_PMCG_OVSSET0               0xCC0
> +#define SMMU_PMCG_CFGR                  0xE00
> +#define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)
> +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)
> +#define SMMU_PMCG_CFGR_SIZE_MASK        GENMASK(13, 8)
> +#define SMMU_PMCG_CFGR_NCTR_MASK        GENMASK(5, 0)

Nit: as long as we use the bitfield accessors consistently, these _MASK 
suffixes are a bit of a waste of space IMO.

> +#define SMMU_PMCG_CR                    0xE04
> +#define SMMU_PMCG_CR_ENABLE             BIT(0)
> +#define SMMU_PMCG_CEID0                 0xE20
> +#define SMMU_PMCG_CEID1                 0xE28
> +#define SMMU_PMCG_IRQ_CTRL              0xE50
> +#define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)
> +#define SMMU_PMCG_IRQ_CFG0              0xE58
> +
> +#define SMMU_DEFAULT_FILTER_SPAN        1
> +#define SMMU_DEFAULT_FILTER_STREAM_ID   GENMASK(31, 0)
> +
> +#define SMMU_MAX_COUNTERS               64
> +#define SMMU_ARCH_MAX_EVENT_ID          0x7F
> +#define SMMU_IMPDEF_MAX_EVENT_ID        0xFFFF
> +#define SMMU_ARCH_MAX_EVENTS            (SMMU_ARCH_MAX_EVENT_ID + 1)

Do we really need two definitions of effectively the same thing?

> +
> +#define SMMU_PA_SHIFT                   12
> +
> +static int cpuhp_state_num;
> +
> +struct smmu_pmu {
> +	bool sid_filter_global;
> +	struct hlist_node node;
> +	struct perf_event *events[SMMU_MAX_COUNTERS];
> +	DECLARE_BITMAP(used_counters, SMMU_MAX_COUNTERS);
> +	DECLARE_BITMAP(supported_events, SMMU_ARCH_MAX_EVENTS);
> +	unsigned int irq;
> +	unsigned int on_cpu;
> +	struct pmu pmu;
> +	unsigned int num_counters;
> +	struct device *dev;
> +	void __iomem *reg_base;
> +	void __iomem *reloc_base;
> +	u64 counter_present_mask;
> +	u64 counter_mask;
> +};
> +
> +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
> +
> +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end)        \
> +	static inline u32 get_##_name(struct perf_event *event)            \
> +	{                                                                  \
> +		return FIELD_GET(GENMASK_ULL(_end, _start),                \
> +				 event->attr._config);                     \
> +	}                                                                  \
> +
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
> +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
> +
> +static inline void smmu_pmu_enable(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +
> +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
> +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
> +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> +}
> +
> +static inline void smmu_pmu_disable(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> +}
> +
> +static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
> +					      u32 idx, u64 value)
> +{
> +	if (smmu_pmu->counter_mask & BIT(32))
> +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
> +	else
> +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
> +}
> +
> +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	u64 value;
> +
> +	if (smmu_pmu->counter_mask & BIT(32))
> +		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
> +	else
> +		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
> +
> +	return value;
> +}
> +
> +static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
> +}
> +
> +static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> +}
> +
> +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
> +}
> +
> +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
> +					      u32 idx)
> +{
> +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
> +}
> +
> +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
> +					u32 val)
> +{
> +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
> +}
> +
> +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
> +{
> +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
> +}
> +
> +static void smmu_pmu_event_update(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	u64 delta, prev, now;
> +	u32 idx = hwc->idx;
> +
> +	do {
> +		prev = local64_read(&hwc->prev_count);
> +		now = smmu_pmu_counter_get_value(smmu_pmu, idx);
> +	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
> +
> +	/* handle overflow. */
> +	delta = now - prev;
> +	delta &= smmu_pmu->counter_mask;
> +
> +	local64_add(delta, &event->count);
> +}
> +
> +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
> +				struct hw_perf_event *hwc)
> +{
> +	u32 idx = hwc->idx;
> +	u64 new;
> +
> +	/*
> +	 * We limit the max period to half the max counter value of the counter
> +	 * size, so that even in the case of extreme interrupt latency the
> +	 * counter will (hopefully) not wrap past its initial value.
> +	 */
> +	new = smmu_pmu->counter_mask >> 1;
> +
> +	local64_set(&hwc->prev_count, new);
> +	smmu_pmu_counter_set_value(smmu_pmu, idx, new);
> +}
> +
> +static void smmu_pmu_get_event_filter(struct perf_event *event, u32 *span,
> +				      u32 *stream_id)

It might just be me, but those u32s seem a little non-obvious given that 
they're both actually communicating boolean values.

> +{
> +	bool filter_en = !!get_filter_enable(event);
> +
> +	*span = filter_en ? get_filter_span(event) : SMMU_DEFAULT_FILTER_SPAN;
> +	*stream_id = filter_en ? get_filter_stream_id(event) :
> +				 SMMU_DEFAULT_FILTER_STREAM_ID;
> +}
> +
> +static bool smmu_pmu_event_filter_valid(struct smmu_pmu *smmu_pmu,
> +				  struct perf_event *event, int idx)
> +{
> +	u32 filter_span, filter_sid;
> +	u32 curr_span, curr_sid;
> +
> +	if (!idx  || !smmu_pmu->sid_filter_global)
> +		return true;

Consider this sequence on an initially-empty PMCG with global filtering:

  - add event A with filter X
  - add event B with filter X
  - delete event A
  - add event C with filter Y

event B is suddenly filtered by Y. Oops.

> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> +
> +	/* Read the current global filter setting */
> +	curr_span = FIELD_GET(SMMU_PMCG_SID_SPAN_MASK,
> +			      readl(smmu_pmu->reg_base + SMMU_PMCG_EVTYPER0));
> +	curr_sid = readl(smmu_pmu->reg_base + SMMU_PMCG_SMR0);

The solution to the above problem will likely help avoid this slightly 
yucky reading back of register state, too.

> +
> +	if (filter_span != curr_span || filter_sid != curr_sid)
> +		return false;
> +
> +	return true;
> +}
> +
> +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
> +				  struct perf_event *event)
> +{
> +	unsigned int idx;
> +	unsigned int num_ctrs = smmu_pmu->num_counters;
> +
> +	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
> +	if (idx == num_ctrs)
> +		/* The counters are all in use. */
> +		return -EAGAIN;
> +
> +	if (!smmu_pmu_event_filter_valid(smmu_pmu, event, idx))
> +		return -EAGAIN;
> +
> +	set_bit(idx, smmu_pmu->used_counters);
> +
> +	return idx;
> +}
> +
> +/*
> + * Implementation of abstract pmu functionality required by
> + * the core perf events code.
> + */
> +
> +static int smmu_pmu_event_init(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct device *dev = smmu_pmu->dev;
> +	struct perf_event *sibling;
> +	u32 event_id;
> +
> +	if (event->attr.type != event->pmu->type)
> +		return -ENOENT;
> +
> +	if (hwc->sample_period) {
> +		dev_dbg(dev, "Sampling not supported\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	if (event->cpu < 0) {
> +		dev_dbg(dev, "Per-task mode not supported\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	/* We cannot filter accurately so we just don't allow it. */
> +	if (event->attr.exclude_user || event->attr.exclude_kernel ||
> +	    event->attr.exclude_hv || event->attr.exclude_idle) {
> +		dev_dbg(dev, "Can't exclude execution levels\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	/* Verify specified event is supported on this PMU */
> +	event_id = get_event(event);

Given what this does...

> +	if (((event_id <= SMMU_ARCH_MAX_EVENT_ID) &&
> +	    (!test_bit(event_id, smmu_pmu->supported_events))) ||
> +	    (event_id > SMMU_IMPDEF_MAX_EVENT_ID)) {

...how can this last condition ever be true? On which note, event_id 
could also be a u16 here, which might aid clarity even more.

Really, both MAX_EVENT_ID definitions seem redundant.

> +		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
> +		return -EINVAL;
> +	}
> +
> +	/* Don't allow groups with mixed PMUs, except for s/w events */
> +	if (event->group_leader->pmu != event->pmu &&
> +	    !is_software_event(event->group_leader)) {
> +		dev_dbg(dev, "Can't create mixed PMU group\n");
> +		return -EINVAL;
> +	}
> +
> +	list_for_each_entry(sibling, &event->group_leader->sibling_list,
> +			    sibling_list)

Maybe consider the for_each_sibling_event() helper?

> +		if (sibling->pmu != event->pmu &&
> +		    !is_software_event(sibling)) {
> +			dev_dbg(dev, "Can't create mixed PMU group\n");
> +			return -EINVAL;
> +		}
> +
> +	/* Ensure all events in a group are on the same cpu */
> +	if ((event->group_leader != event) &&
> +	    (event->cpu != event->group_leader->cpu)) {
> +		dev_dbg(dev, "Can't create group on CPUs %d and %d\n",
> +			event->cpu, event->group_leader->cpu);
> +		return -EINVAL;
> +	}

Is this really necessary? At this point, event->cpu might not have the 
value that we're just about to force it to, and AFAICS perf_event_open() 
will eventually bail out if they don't end up matching anyway.

> +
> +	hwc->idx = -1;
> +
> +	/*
> +	 * Ensure all events are on the same cpu so all events are in the
> +	 * same cpu context, to avoid races on pmu_enable etc.
> +	 */
> +	event->cpu = smmu_pmu->on_cpu;
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_event_start(struct perf_event *event, int flags)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +	u32 filter_span, filter_sid;
> +	u32 evtyper;
> +
> +	hwc->state = 0;
> +
> +	smmu_pmu_set_period(smmu_pmu, hwc);
> +
> +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
> +
> +	evtyper = get_event(event) |
> +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
> +
> +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
> +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);

Careful, SMRn and EVTYPERn.FILTER_SID_SPAN are RES0 for n > 0 with 
global filtering, so we shouldn't *always* be writing to them.

> +	smmu_pmu_interrupt_enable(smmu_pmu, idx);
> +	smmu_pmu_counter_enable(smmu_pmu, idx);
> +}
> +
> +static void smmu_pmu_event_stop(struct perf_event *event, int flags)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +
> +	if (hwc->state & PERF_HES_STOPPED)
> +		return;
> +
> +	smmu_pmu_counter_disable(smmu_pmu, idx);
> +
> +	if (flags & PERF_EF_UPDATE)
> +		smmu_pmu_event_update(event);
> +	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +}
> +
> +static int smmu_pmu_event_add(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +
> +	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
> +	if (idx < 0)
> +		return idx;
> +
> +	hwc->idx = idx;
> +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +	smmu_pmu->events[idx] = event;
> +	local64_set(&hwc->prev_count, 0);
> +
> +	if (flags & PERF_EF_START)
> +		smmu_pmu_event_start(event, flags);
> +
> +	/* Propagate changes to the userspace mapping. */
> +	perf_event_update_userpage(event);
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_event_del(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
> +	int idx = hwc->idx;
> +
> +	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
> +	smmu_pmu->events[idx] = NULL;
> +	clear_bit(idx, smmu_pmu->used_counters);
> +
> +	perf_event_update_userpage(event);
> +}
> +
> +static void smmu_pmu_event_read(struct perf_event *event)
> +{
> +	smmu_pmu_event_update(event);
> +}
> +
> +/* cpumask */
> +
> +static ssize_t smmu_pmu_cpumask_show(struct device *dev,
> +				     struct device_attribute *attr,
> +				     char *buf)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> +
> +	return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
> +}
> +
> +static struct device_attribute smmu_pmu_cpumask_attr =
> +		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
> +
> +static struct attribute *smmu_pmu_cpumask_attrs[] = {
> +	&smmu_pmu_cpumask_attr.attr,
> +	NULL
> +};
> +
> +static struct attribute_group smmu_pmu_cpumask_group = {
> +	.attrs = smmu_pmu_cpumask_attrs,
> +};
> +
> +/* Events */
> +
> +ssize_t smmu_pmu_event_show(struct device *dev,
> +			    struct device_attribute *attr, char *page)
> +{
> +	struct perf_pmu_events_attr *pmu_attr;
> +
> +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
> +
> +	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
> +}
> +
> +#define SMMU_EVENT_ATTR(_name, _id)					  \
> +	(&((struct perf_pmu_events_attr[]) {				  \
> +		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \
> +		  .id = _id, }						  \
> +	})[0].attr.attr)
> +
> +static struct attribute *smmu_pmu_events[] = {
> +	SMMU_EVENT_ATTR(cycles, 0),
> +	SMMU_EVENT_ATTR(transaction, 1),
> +	SMMU_EVENT_ATTR(tlb_miss, 2),
> +	SMMU_EVENT_ATTR(config_cache_miss, 3),
> +	SMMU_EVENT_ATTR(trans_table_walk_access, 4),
> +	SMMU_EVENT_ATTR(config_struct_access, 5),
> +	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
> +	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
> +	NULL
> +};
> +
> +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
> +					 struct attribute *attr, int unused)
> +{
> +	struct device *dev = kobj_to_dev(kobj);
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
> +	struct perf_pmu_events_attr *pmu_attr;
> +
> +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
> +
> +	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
> +		return attr->mode;
> +
> +	return 0;
> +}
> +static struct attribute_group smmu_pmu_events_group = {
> +	.name = "events",
> +	.attrs = smmu_pmu_events,
> +	.is_visible = smmu_pmu_event_is_visible,
> +};
> +
> +/* Formats */
> +PMU_FORMAT_ATTR(event,		   "config:0-15");
> +PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
> +PMU_FORMAT_ATTR(filter_span,	   "config1:32");
> +PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
> +
> +static struct attribute *smmu_pmu_formats[] = {
> +	&format_attr_event.attr,
> +	&format_attr_filter_stream_id.attr,
> +	&format_attr_filter_span.attr,
> +	&format_attr_filter_enable.attr,
> +	NULL
> +};
> +
> +static struct attribute_group smmu_pmu_format_group = {
> +	.name = "format",
> +	.attrs = smmu_pmu_formats,
> +};
> +
> +static const struct attribute_group *smmu_pmu_attr_grps[] = {
> +	&smmu_pmu_cpumask_group,
> +	&smmu_pmu_events_group,
> +	&smmu_pmu_format_group,
> +	NULL
> +};
> +
> +/*
> + * Generic device handlers
> + */
> +
> +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
> +{
> +	struct smmu_pmu *smmu_pmu;
> +	unsigned int target;
> +
> +	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
> +	if (cpu != smmu_pmu->on_cpu)
> +		return 0;
> +
> +	target = cpumask_any_but(cpu_online_mask, cpu);
> +	if (target >= nr_cpu_ids)
> +		return 0;
> +
> +	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
> +	smmu_pmu->on_cpu = target;
> +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
> +
> +	return 0;
> +}
> +
> +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
> +{
> +	struct smmu_pmu *smmu_pmu = data;
> +	u64 ovsr;
> +	unsigned int idx;
> +
> +	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
> +	if (!ovsr)
> +		return IRQ_NONE;
> +
> +	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> +
> +	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
> +		struct perf_event *event = smmu_pmu->events[idx];
> +		struct hw_perf_event *hwc;
> +
> +		if (WARN_ON_ONCE(!event))
> +			continue;
> +
> +		smmu_pmu_event_update(event);
> +		hwc = &event->hw;
> +
> +		smmu_pmu_set_period(smmu_pmu, hwc);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
> +{
> +	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
> +	int irq, ret = -ENXIO;
> +
> +	irq = pmu->irq;
> +	if (irq)
> +		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
> +				       flags, "smmuv3-pmu", pmu);
> +	return ret;
> +}
> +
> +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
> +{
> +	smmu_pmu_disable(&smmu_pmu->pmu);
> +
> +	/* Disable counter and interrupt */
> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);

I suppose if we really wanted to save on register-poking in 
event_add/event_del, we *could* permanently enable the per-counter 
interrupts here and just rely on enable/disable/event_start/event_stop 
naturally preventing anything unexpected. That's a little non-obvious, 
though, so unless there proves to be some concrete benefit I'm inclined 
to stick with the logically-straightforward approach suggested yesterday.

> +	writeq_relaxed(smmu_pmu->counter_present_mask,
> +		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
> +}
> +
> +static int smmu_pmu_probe(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu;
> +	struct resource *res_0, *res_1;
> +	u32 cfgr, reg_size;
> +	u64 ceid_64[2];
> +	int irq, err;
> +	char *name;
> +	struct device *dev = &pdev->dev;
> +
> +	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
> +	if (!smmu_pmu)
> +		return -ENOMEM;
> +
> +	smmu_pmu->dev = dev;
> +	platform_set_drvdata(pdev, smmu_pmu);
> +
> +	smmu_pmu->pmu = (struct pmu) {
> +		.task_ctx_nr    = perf_invalid_context,
> +		.pmu_enable	= smmu_pmu_enable,
> +		.pmu_disable	= smmu_pmu_disable,
> +		.event_init	= smmu_pmu_event_init,
> +		.add		= smmu_pmu_event_add,
> +		.del		= smmu_pmu_event_del,
> +		.start		= smmu_pmu_event_start,
> +		.stop		= smmu_pmu_event_stop,
> +		.read		= smmu_pmu_event_read,
> +		.attr_groups	= smmu_pmu_attr_grps,
> +	};
> +
> +	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
> +	if (IS_ERR(smmu_pmu->reg_base))
> +		return PTR_ERR(smmu_pmu->reg_base);
> +
> +	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
> +
> +	/* Determine if page 1 is present */
> +	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
> +		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
> +		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
> +		if (IS_ERR(smmu_pmu->reloc_base))
> +			return PTR_ERR(smmu_pmu->reloc_base);
> +	} else {
> +		smmu_pmu->reloc_base = smmu_pmu->reg_base;
> +	}
> +
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq > 0)
> +		smmu_pmu->irq = irq;
> +
> +	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
> +	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
> +	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
> +			  SMMU_ARCH_MAX_EVENTS);
> +
> +	smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;
> +	smmu_pmu->counter_present_mask = GENMASK(smmu_pmu->num_counters - 1, 0);

GENMASK_ULL() for consistency?

Although it looks like we now only ever use counter_present_mask in the 
reset routine anyway, so maybe we could just dynamically generate it 
there instead of storing it.

Robin.

> +
> +	smmu_pmu->sid_filter_global = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
> +
> +	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);
> +	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
> +
> +	smmu_pmu_reset(smmu_pmu);
> +
> +	err = smmu_pmu_setup_irq(smmu_pmu);
> +	if (err) {
> +		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
> +		return err;
> +	}
> +
> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
> +			      (res_0->start) >> SMMU_PA_SHIFT);
> +	if (!name) {
> +		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
> +		return -EINVAL;
> +	}
> +
> +	/* Pick one CPU to be the preferred one to use */
> +	smmu_pmu->on_cpu = get_cpu();
> +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
> +
> +	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
> +					       &smmu_pmu->node);
> +	if (err) {
> +		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
> +			err, &res_0->start);
> +		goto out_cpuhp_err;
> +	}
> +
> +	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
> +	if (err) {
> +		dev_err(dev, "Error %d registering PMU @%pa\n",
> +			err, &res_0->start);
> +		goto out_unregister;
> +	}
> +
> +	put_cpu();
> +	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
> +		 &res_0->start, smmu_pmu->num_counters,
> +		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :
> +		 "Individual");
> +
> +	return 0;
> +
> +out_unregister:
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
> +out_cpuhp_err:
> +	put_cpu();
> +	return err;
> +}
> +
> +static int smmu_pmu_remove(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> +
> +	perf_pmu_unregister(&smmu_pmu->pmu);
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
> +
> +	return 0;
> +}
> +
> +static void smmu_pmu_shutdown(struct platform_device *pdev)
> +{
> +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
> +
> +	smmu_pmu_disable(&smmu_pmu->pmu);
> +}
> +
> +static struct platform_driver smmu_pmu_driver = {
> +	.driver = {
> +		.name = "arm-smmu-v3-pmu",
> +	},
> +	.probe = smmu_pmu_probe,
> +	.remove = smmu_pmu_remove,
> +	.shutdown = smmu_pmu_shutdown,
> +};
> +
> +static int __init arm_smmu_pmu_init(void)
> +{
> +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> +				      "perf/arm/pmcg:online",
> +				      NULL,
> +				      smmu_pmu_offline_cpu);
> +	if (cpuhp_state_num < 0)
> +		return cpuhp_state_num;
> +
> +	return platform_driver_register(&smmu_pmu_driver);
> +}
> +module_init(arm_smmu_pmu_init);
> +
> +static void __exit arm_smmu_pmu_exit(void)
> +{
> +	platform_driver_unregister(&smmu_pmu_driver);
> +	cpuhp_remove_multi_state(cpuhp_state_num);
> +}
> +
> +module_exit(arm_smmu_pmu_exit);
> +
> +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
> +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
> +MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
> +MODULE_LICENSE("GPL v2");
>
Shameerali Kolothum Thodi - Jan. 28, 2019, 9:10 a.m.
> -----Original Message-----

> From: Robin Murphy [mailto:robin.murphy@arm.com]

> Sent: 25 January 2019 15:14

> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>;

> lorenzo.pieralisi@arm.com

> Cc: jean-philippe.brucker@arm.com; will.deacon@arm.com;

> mark.rutland@arm.com; Guohanjun (Hanjun Guo) <guohanjun@huawei.com>;

> John Garry <john.garry@huawei.com>; pabba@codeaurora.org;

> vkilari@codeaurora.org; rruigrok@codeaurora.org; linux-acpi@vger.kernel.org;

> linux-kernel@vger.kernel.org; linux-arm-kernel@lists.infradead.org; Linuxarm

> <linuxarm@huawei.com>; neil.m.leeder@gmail.com

> Subject: Re: [PATCH v5 2/4] perf: add arm64 smmuv3 pmu driver

> 

> On 30/11/2018 15:47, Shameer Kolothum wrote:

> > From: Neil Leeder <nleeder@codeaurora.org>

> >

> > Adds a new driver to support the SMMUv3 PMU and add it into the

> > perf events framework.

> >

> > Each SMMU node may have multiple PMUs associated with it, each of

> > which may support different events.

> >

> > SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page>

> where

> > <phys_addr_page> is the physical page address of the SMMU PMCG

> > wrapped to 4K boundary. For example, the PMCG at 0xff88840000 is

> > named smmuv3_pmcg_ff88840

> >

> > Filtering by stream id is done by specifying filtering parameters

> > with the event. options are:

> >     filter_enable    - 0 = no filtering, 1 = filtering enabled

> >     filter_span      - 0 = exact match, 1 = pattern match

> >     filter_stream_id - pattern to filter against

> >

> > Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,

> >                         filter_span=1,filter_stream_id=0x42/ -a

> netperf

> >

> > Applies filter pattern 0x42 to transaction events, which means events

> > matching stream ids 0x42 & 0x43 are counted as only upper StreamID

> > bits are required to match the given filter. Further filtering

> > information is available in the SMMU documentation.

> >

> > SMMU events are not attributable to a CPU, so task mode and sampling

> > are not supported.

> >

> > Signed-off-by: Neil Leeder <nleeder@codeaurora.org>

> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> > ---

> >   drivers/perf/Kconfig          |   9 +

> >   drivers/perf/Makefile         |   1 +

> >   drivers/perf/arm_smmuv3_pmu.c | 778

> ++++++++++++++++++++++++++++++++++++++++++

> >   3 files changed, 788 insertions(+)

> >   create mode 100644 drivers/perf/arm_smmuv3_pmu.c

> >

> > diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig

> > index 08ebaf7..92be6a3 100644

> > --- a/drivers/perf/Kconfig

> > +++ b/drivers/perf/Kconfig

> > @@ -52,6 +52,15 @@ config ARM_PMU_ACPI

> >   	depends on ARM_PMU && ACPI

> >   	def_bool y

> >

> > +config ARM_SMMU_V3_PMU

> > +	 bool "ARM SMMUv3 Performance Monitors Extension"

> > +	 depends on ARM64 && ACPI && ARM_SMMU_V3

> > +	   help

> > +	   Provides support for the SMMU version 3 performance monitor unit

> (PMU)

> > +	   on ARM-based systems.

> > +	   Adds the SMMU PMU into the perf events subsystem for

> > +	   monitoring SMMU performance events.

> > +

> >   config ARM_DSU_PMU

> >   	tristate "ARM DynamIQ Shared Unit (DSU) PMU"

> >   	depends on ARM64

> > diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile

> > index b3902bd..f10a932 100644

> > --- a/drivers/perf/Makefile

> > +++ b/drivers/perf/Makefile

> > @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o

> >   obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o

> >   obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o

> >   obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o

> > +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o

> >   obj-$(CONFIG_HISI_PMU) += hisilicon/

> >   obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o

> >   obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o

> > diff --git a/drivers/perf/arm_smmuv3_pmu.c

> b/drivers/perf/arm_smmuv3_pmu.c

> > new file mode 100644

> > index 0000000..fb9dcd8

> > --- /dev/null

> > +++ b/drivers/perf/arm_smmuv3_pmu.c

> > @@ -0,0 +1,778 @@

> > +// SPDX-License-Identifier: GPL-2.0

> > +

> > +/*

> > + * This driver adds support for perf events to use the Performance

> > + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node

> > + * to monitor that node.

> > + *

> > + * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page>

> where

> > + * <phys_addr_page> is the physical page address of the SMMU PMCG

> wrapped

> > + * to 4K boundary. For example, the PMCG at 0xff88840000 is named

> > + * smmuv3_pmcg_ff88840

> > + *

> > + * Filtering by stream id is done by specifying filtering parameters

> > + * with the event. options are:

> > + *   filter_enable    - 0 = no filtering, 1 = filtering enabled

> > + *   filter_span      - 0 = exact match, 1 = pattern match

> > + *   filter_stream_id - pattern to filter against

> > + *

> > + * To match a partial StreamID where the X most-significant bits must match

> > + * but the Y least-significant bits might differ, STREAMID is programmed

> > + * with a value that contains:

> > + *  STREAMID[Y - 1] == 0.

> > + *  STREAMID[Y - 2:0] == 1 (where Y > 1).

> > + * The remainder of implemented bits of STREAMID (X bits, from bit Y

> upwards)

> > + * contain a value to match from the corresponding bits of event StreamID.

> > + *

> > + * Example: perf stat -e

> smmuv3_pmcg_ff88840/transaction,filter_enable=1,

> > + *                    filter_span=1,filter_stream_id=0x42/ -a netperf

> > + * Applies filter pattern 0x42 to transaction events, which means events

> > + * matching stream ids 0x42 and 0x43 are counted. Further filtering

> > + * information is available in the SMMU documentation.

> > + *

> > + * SMMU events are not attributable to a CPU, so task mode and sampling

> > + * are not supported.

> > + */

> > +

> > +#include <linux/acpi.h>

> > +#include <linux/bitfield.h>

> > +#include <linux/bitops.h>

> > +#include <linux/cpuhotplug.h>

> > +#include <linux/cpumask.h>

> > +#include <linux/device.h>

> > +#include <linux/errno.h>

> > +#include <linux/interrupt.h>

> > +#include <linux/irq.h>

> > +#include <linux/kernel.h>

> > +#include <linux/list.h>

> > +#include <linux/msi.h>

> > +#include <linux/perf_event.h>

> > +#include <linux/platform_device.h>

> > +#include <linux/smp.h>

> > +#include <linux/sysfs.h>

> > +#include <linux/types.h>

> > +

> > +#define SMMU_PMCG_EVCNTR0               0x0

> > +#define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 +

> (n) * (stride))

> > +#define SMMU_PMCG_EVTYPER0              0x400

> > +#define SMMU_PMCG_EVTYPER(n)

> (SMMU_PMCG_EVTYPER0 + (n) * 4)

> > +#define SMMU_PMCG_SID_SPAN_SHIFT        29

> > +#define SMMU_PMCG_SID_SPAN_MASK         GENMASK(29, 29)

> 

> Surely just:

> 

> #define SMMU_PMCG_SID_SPAN	BIT(29)

> 

> (or maybe even SMMU_PMCG_EVTYPER_SID_SPAN for clarity)?


Ok.

> > +#define SMMU_PMCG_SMR0                  0xA00

> > +#define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 +

> (n) * 4)

> > +#define SMMU_PMCG_CNTENSET0             0xC00

> > +#define SMMU_PMCG_CNTENCLR0             0xC20

> > +#define SMMU_PMCG_INTENSET0             0xC40

> > +#define SMMU_PMCG_INTENCLR0             0xC60

> > +#define SMMU_PMCG_OVSCLR0               0xC80

> > +#define SMMU_PMCG_OVSSET0               0xCC0

> > +#define SMMU_PMCG_CFGR                  0xE00

> > +#define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)

> > +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)

> > +#define SMMU_PMCG_CFGR_SIZE_MASK        GENMASK(13, 8)

> > +#define SMMU_PMCG_CFGR_NCTR_MASK        GENMASK(5, 0)

> 

> Nit: as long as we use the bitfield accessors consistently, these _MASK

> suffixes are a bit of a waste of space IMO.


Ok.

> > +#define SMMU_PMCG_CR                    0xE04

> > +#define SMMU_PMCG_CR_ENABLE             BIT(0)

> > +#define SMMU_PMCG_CEID0                 0xE20

> > +#define SMMU_PMCG_CEID1                 0xE28

> > +#define SMMU_PMCG_IRQ_CTRL              0xE50

> > +#define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)

> > +#define SMMU_PMCG_IRQ_CFG0              0xE58

> > +

> > +#define SMMU_DEFAULT_FILTER_SPAN        1

> > +#define SMMU_DEFAULT_FILTER_STREAM_ID   GENMASK(31, 0)

> > +

> > +#define SMMU_MAX_COUNTERS               64

> > +#define SMMU_ARCH_MAX_EVENT_ID          0x7F

> > +#define SMMU_IMPDEF_MAX_EVENT_ID        0xFFFF

> > +#define SMMU_ARCH_MAX_EVENTS

> (SMMU_ARCH_MAX_EVENT_ID + 1)

> 

> Do we really need two definitions of effectively the same thing?


I see your comment below and looks like we can get rid of these.

> > +

> > +#define SMMU_PA_SHIFT                   12

> > +

> > +static int cpuhp_state_num;

> > +

> > +struct smmu_pmu {

> > +	bool sid_filter_global;

> > +	struct hlist_node node;

> > +	struct perf_event *events[SMMU_MAX_COUNTERS];

> > +	DECLARE_BITMAP(used_counters, SMMU_MAX_COUNTERS);

> > +	DECLARE_BITMAP(supported_events, SMMU_ARCH_MAX_EVENTS);

> > +	unsigned int irq;

> > +	unsigned int on_cpu;

> > +	struct pmu pmu;

> > +	unsigned int num_counters;

> > +	struct device *dev;

> > +	void __iomem *reg_base;

> > +	void __iomem *reloc_base;

> > +	u64 counter_present_mask;

> > +	u64 counter_mask;

> > +};

> > +

> > +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))

> > +

> > +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start,

> _end)        \

> > +	static inline u32 get_##_name(struct perf_event *event)            \

> > +

> 	{

>        \

> > +		return FIELD_GET(GENMASK_ULL(_end, _start),                \

> > +				 event->attr._config);                     \

> > +	}

> \

> > +

> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);

> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);

> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);

> > +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);

> > +

> > +static inline void smmu_pmu_enable(struct pmu *pmu)

> > +{

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);

> > +

> > +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base +

> SMMU_PMCG_CR);

> > +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,

> > +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);

> > +}

> > +

> > +static inline void smmu_pmu_disable(struct pmu *pmu)

> > +{

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);

> > +

> > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);

> > +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);

> > +}

> > +

> > +static inline void smmu_pmu_counter_set_value(struct smmu_pmu

> *smmu_pmu,

> > +					      u32 idx, u64 value)

> > +{

> > +	if (smmu_pmu->counter_mask & BIT(32))

> > +		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,

> 8));

> > +	else

> > +		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,

> 4));

> > +}

> > +

> > +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu

> *smmu_pmu, u32 idx)

> > +{

> > +	u64 value;

> > +

> > +	if (smmu_pmu->counter_mask & BIT(32))

> > +		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,

> 8));

> > +	else

> > +		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,

> 4));

> > +

> > +	return value;

> > +}

> > +

> > +static inline void smmu_pmu_counter_enable(struct smmu_pmu

> *smmu_pmu, u32 idx)

> > +{

> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);

> > +}

> > +

> > +static inline void smmu_pmu_counter_disable(struct smmu_pmu

> *smmu_pmu, u32 idx)

> > +{

> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);

> > +}

> > +

> > +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu

> *smmu_pmu, u32 idx)

> > +{

> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);

> > +}

> > +

> > +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu

> *smmu_pmu,

> > +					      u32 idx)

> > +{

> > +	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);

> > +}

> > +

> > +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu,

> u32 idx,

> > +					u32 val)

> > +{

> > +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));

> > +}

> > +

> > +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32

> idx, u32 val)

> > +{

> > +	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));

> > +}

> > +

> > +static void smmu_pmu_event_update(struct perf_event *event)

> > +{

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +	u64 delta, prev, now;

> > +	u32 idx = hwc->idx;

> > +

> > +	do {

> > +		prev = local64_read(&hwc->prev_count);

> > +		now = smmu_pmu_counter_get_value(smmu_pmu, idx);

> > +	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);

> > +

> > +	/* handle overflow. */

> > +	delta = now - prev;

> > +	delta &= smmu_pmu->counter_mask;

> > +

> > +	local64_add(delta, &event->count);

> > +}

> > +

> > +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,

> > +				struct hw_perf_event *hwc)

> > +{

> > +	u32 idx = hwc->idx;

> > +	u64 new;

> > +

> > +	/*

> > +	 * We limit the max period to half the max counter value of the counter

> > +	 * size, so that even in the case of extreme interrupt latency the

> > +	 * counter will (hopefully) not wrap past its initial value.

> > +	 */

> > +	new = smmu_pmu->counter_mask >> 1;

> > +

> > +	local64_set(&hwc->prev_count, new);

> > +	smmu_pmu_counter_set_value(smmu_pmu, idx, new);

> > +}

> > +

> > +static void smmu_pmu_get_event_filter(struct perf_event *event, u32

> *span,

> > +				      u32 *stream_id)

> 

> It might just be me, but those u32s seem a little non-obvious given that

> they're both actually communicating boolean values.


Ok. I will try to rewrite this.

> > +{

> > +	bool filter_en = !!get_filter_enable(event);

> > +

> > +	*span = filter_en ? get_filter_span(event) :

> SMMU_DEFAULT_FILTER_SPAN;

> > +	*stream_id = filter_en ? get_filter_stream_id(event) :

> > +				 SMMU_DEFAULT_FILTER_STREAM_ID;

> > +}

> > +

> > +static bool smmu_pmu_event_filter_valid(struct smmu_pmu *smmu_pmu,

> > +				  struct perf_event *event, int idx)

> > +{

> > +	u32 filter_span, filter_sid;

> > +	u32 curr_span, curr_sid;

> > +

> > +	if (!idx  || !smmu_pmu->sid_filter_global)

> > +		return true;

> 

> Consider this sequence on an initially-empty PMCG with global filtering:

> 

>   - add event A with filter X

>   - add event B with filter X

>   - delete event A

>   - add event C with filter Y

> 

> event B is suddenly filtered by Y. Oops.


Ahh..Thanks for this. I missed the possibility that a delete event like the above
will make the counter 0 to be reused again.

> > +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);

> > +

> > +	/* Read the current global filter setting */

> > +	curr_span = FIELD_GET(SMMU_PMCG_SID_SPAN_MASK,

> > +			      readl(smmu_pmu->reg_base +

> SMMU_PMCG_EVTYPER0));

> > +	curr_sid = readl(smmu_pmu->reg_base + SMMU_PMCG_SMR0);

> 

> The solution to the above problem will likely help avoid this slightly

> yucky reading back of register state, too.


Right. I will change this.

> > +

> > +	if (filter_span != curr_span || filter_sid != curr_sid)

> > +		return false;

> > +

> > +	return true;

> > +}

> > +

> > +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,

> > +				  struct perf_event *event)

> > +{

> > +	unsigned int idx;

> > +	unsigned int num_ctrs = smmu_pmu->num_counters;

> > +

> > +	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);

> > +	if (idx == num_ctrs)

> > +		/* The counters are all in use. */

> > +		return -EAGAIN;

> > +

> > +	if (!smmu_pmu_event_filter_valid(smmu_pmu, event, idx))

> > +		return -EAGAIN;

> > +

> > +	set_bit(idx, smmu_pmu->used_counters);

> > +

> > +	return idx;

> > +}

> > +

> > +/*

> > + * Implementation of abstract pmu functionality required by

> > + * the core perf events code.

> > + */

> > +

> > +static int smmu_pmu_event_init(struct perf_event *event)

> > +{

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +	struct device *dev = smmu_pmu->dev;

> > +	struct perf_event *sibling;

> > +	u32 event_id;

> > +

> > +	if (event->attr.type != event->pmu->type)

> > +		return -ENOENT;

> > +

> > +	if (hwc->sample_period) {

> > +		dev_dbg(dev, "Sampling not supported\n");

> > +		return -EOPNOTSUPP;

> > +	}

> > +

> > +	if (event->cpu < 0) {

> > +		dev_dbg(dev, "Per-task mode not supported\n");

> > +		return -EOPNOTSUPP;

> > +	}

> > +

> > +	/* We cannot filter accurately so we just don't allow it. */

> > +	if (event->attr.exclude_user || event->attr.exclude_kernel ||

> > +	    event->attr.exclude_hv || event->attr.exclude_idle) {

> > +		dev_dbg(dev, "Can't exclude execution levels\n");

> > +		return -EOPNOTSUPP;

> > +	}

> > +

> > +	/* Verify specified event is supported on this PMU */

> > +	event_id = get_event(event);

> 

> Given what this does...

> 

> > +	if (((event_id <= SMMU_ARCH_MAX_EVENT_ID) &&

> > +	    (!test_bit(event_id, smmu_pmu->supported_events))) ||

> > +	    (event_id > SMMU_IMPDEF_MAX_EVENT_ID)) {

> 

> ...how can this last condition ever be true? On which note, event_id

> could also be a u16 here, which might aid clarity even more.


True. TBH, I also got carried away by event_id being u32!. I will change this. 

> Really, both MAX_EVENT_ID definitions seem redundant.

> 

> > +		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);

> > +		return -EINVAL;

> > +	}

> > +

> > +	/* Don't allow groups with mixed PMUs, except for s/w events */

> > +	if (event->group_leader->pmu != event->pmu &&

> > +	    !is_software_event(event->group_leader)) {

> > +		dev_dbg(dev, "Can't create mixed PMU group\n");

> > +		return -EINVAL;

> > +	}

> > +

> > +	list_for_each_entry(sibling, &event->group_leader->sibling_list,

> > +			    sibling_list)

> 

> Maybe consider the for_each_sibling_event() helper?


Ok.

> > +		if (sibling->pmu != event->pmu &&

> > +		    !is_software_event(sibling)) {

> > +			dev_dbg(dev, "Can't create mixed PMU group\n");

> > +			return -EINVAL;

> > +		}

> > +

> > +	/* Ensure all events in a group are on the same cpu */

> > +	if ((event->group_leader != event) &&

> > +	    (event->cpu != event->group_leader->cpu)) {

> > +		dev_dbg(dev, "Can't create group on CPUs %d and %d\n",

> > +			event->cpu, event->group_leader->cpu);

> > +		return -EINVAL;

> > +	}

> 

> Is this really necessary? At this point, event->cpu might not have the

> value that we're just about to force it to, and AFAICS perf_event_open()

> will eventually bail out if they don't end up matching anyway.


Ok. I will double check this
 
> > +

> > +	hwc->idx = -1;

> > +

> > +	/*

> > +	 * Ensure all events are on the same cpu so all events are in the

> > +	 * same cpu context, to avoid races on pmu_enable etc.

> > +	 */

> > +	event->cpu = smmu_pmu->on_cpu;

> > +

> > +	return 0;

> > +}

> > +

> > +static void smmu_pmu_event_start(struct perf_event *event, int flags)

> > +{

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	int idx = hwc->idx;

> > +	u32 filter_span, filter_sid;

> > +	u32 evtyper;

> > +

> > +	hwc->state = 0;

> > +

> > +	smmu_pmu_set_period(smmu_pmu, hwc);

> > +

> > +	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);

> > +

> > +	evtyper = get_event(event) |

> > +		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;

> > +

> > +	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);

> > +	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);

> 

> Careful, SMRn and EVTYPERn.FILTER_SID_SPAN are RES0 for n > 0 with

> global filtering, so we shouldn't *always* be writing to them.


Ok.

> > +	smmu_pmu_interrupt_enable(smmu_pmu, idx);

> > +	smmu_pmu_counter_enable(smmu_pmu, idx);

> > +}

> > +

> > +static void smmu_pmu_event_stop(struct perf_event *event, int flags)

> > +{

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	int idx = hwc->idx;

> > +

> > +	if (hwc->state & PERF_HES_STOPPED)

> > +		return;

> > +

> > +	smmu_pmu_counter_disable(smmu_pmu, idx);

> > +

> > +	if (flags & PERF_EF_UPDATE)

> > +		smmu_pmu_event_update(event);

> > +	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;

> > +}

> > +

> > +static int smmu_pmu_event_add(struct perf_event *event, int flags)

> > +{

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	int idx;

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +

> > +	idx = smmu_pmu_get_event_idx(smmu_pmu, event);

> > +	if (idx < 0)

> > +		return idx;

> > +

> > +	hwc->idx = idx;

> > +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;

> > +	smmu_pmu->events[idx] = event;

> > +	local64_set(&hwc->prev_count, 0);

> > +

> > +	if (flags & PERF_EF_START)

> > +		smmu_pmu_event_start(event, flags);

> > +

> > +	/* Propagate changes to the userspace mapping. */

> > +	perf_event_update_userpage(event);

> > +

> > +	return 0;

> > +}

> > +

> > +static void smmu_pmu_event_del(struct perf_event *event, int flags)

> > +{

> > +	struct hw_perf_event *hwc = &event->hw;

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);

> > +	int idx = hwc->idx;

> > +

> > +	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);

> > +	smmu_pmu->events[idx] = NULL;

> > +	clear_bit(idx, smmu_pmu->used_counters);

> > +

> > +	perf_event_update_userpage(event);

> > +}

> > +

> > +static void smmu_pmu_event_read(struct perf_event *event)

> > +{

> > +	smmu_pmu_event_update(event);

> > +}

> > +

> > +/* cpumask */

> > +

> > +static ssize_t smmu_pmu_cpumask_show(struct device *dev,

> > +				     struct device_attribute *attr,

> > +				     char *buf)

> > +{

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));

> > +

> > +	return cpumap_print_to_pagebuf(true, buf,

> cpumask_of(smmu_pmu->on_cpu));

> > +}

> > +

> > +static struct device_attribute smmu_pmu_cpumask_attr =

> > +		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);

> > +

> > +static struct attribute *smmu_pmu_cpumask_attrs[] = {

> > +	&smmu_pmu_cpumask_attr.attr,

> > +	NULL

> > +};

> > +

> > +static struct attribute_group smmu_pmu_cpumask_group = {

> > +	.attrs = smmu_pmu_cpumask_attrs,

> > +};

> > +

> > +/* Events */

> > +

> > +ssize_t smmu_pmu_event_show(struct device *dev,

> > +			    struct device_attribute *attr, char *page)

> > +{

> > +	struct perf_pmu_events_attr *pmu_attr;

> > +

> > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);

> > +

> > +	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);

> > +}

> > +

> > +#define SMMU_EVENT_ATTR(_name, _id)					  \

> > +	(&((struct perf_pmu_events_attr[]) {				  \

> > +		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \

> > +		  .id = _id, }						  \

> > +	})[0].attr.attr)

> > +

> > +static struct attribute *smmu_pmu_events[] = {

> > +	SMMU_EVENT_ATTR(cycles, 0),

> > +	SMMU_EVENT_ATTR(transaction, 1),

> > +	SMMU_EVENT_ATTR(tlb_miss, 2),

> > +	SMMU_EVENT_ATTR(config_cache_miss, 3),

> > +	SMMU_EVENT_ATTR(trans_table_walk_access, 4),

> > +	SMMU_EVENT_ATTR(config_struct_access, 5),

> > +	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),

> > +	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),

> > +	NULL

> > +};

> > +

> > +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,

> > +					 struct attribute *attr, int unused)

> > +{

> > +	struct device *dev = kobj_to_dev(kobj);

> > +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));

> > +	struct perf_pmu_events_attr *pmu_attr;

> > +

> > +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);

> > +

> > +	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))

> > +		return attr->mode;

> > +

> > +	return 0;

> > +}

> > +static struct attribute_group smmu_pmu_events_group = {

> > +	.name = "events",

> > +	.attrs = smmu_pmu_events,

> > +	.is_visible = smmu_pmu_event_is_visible,

> > +};

> > +

> > +/* Formats */

> > +PMU_FORMAT_ATTR(event,		   "config:0-15");

> > +PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");

> > +PMU_FORMAT_ATTR(filter_span,	   "config1:32");

> > +PMU_FORMAT_ATTR(filter_enable,	   "config1:33");

> > +

> > +static struct attribute *smmu_pmu_formats[] = {

> > +	&format_attr_event.attr,

> > +	&format_attr_filter_stream_id.attr,

> > +	&format_attr_filter_span.attr,

> > +	&format_attr_filter_enable.attr,

> > +	NULL

> > +};

> > +

> > +static struct attribute_group smmu_pmu_format_group = {

> > +	.name = "format",

> > +	.attrs = smmu_pmu_formats,

> > +};

> > +

> > +static const struct attribute_group *smmu_pmu_attr_grps[] = {

> > +	&smmu_pmu_cpumask_group,

> > +	&smmu_pmu_events_group,

> > +	&smmu_pmu_format_group,

> > +	NULL

> > +};

> > +

> > +/*

> > + * Generic device handlers

> > + */

> > +

> > +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)

> > +{

> > +	struct smmu_pmu *smmu_pmu;

> > +	unsigned int target;

> > +

> > +	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);

> > +	if (cpu != smmu_pmu->on_cpu)

> > +		return 0;

> > +

> > +	target = cpumask_any_but(cpu_online_mask, cpu);

> > +	if (target >= nr_cpu_ids)

> > +		return 0;

> > +

> > +	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);

> > +	smmu_pmu->on_cpu = target;

> > +	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));

> > +

> > +	return 0;

> > +}

> > +

> > +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)

> > +{

> > +	struct smmu_pmu *smmu_pmu = data;

> > +	u64 ovsr;

> > +	unsigned int idx;

> > +

> > +	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);

> > +	if (!ovsr)

> > +		return IRQ_NONE;

> > +

> > +	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);

> > +

> > +	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters)

> {

> > +		struct perf_event *event = smmu_pmu->events[idx];

> > +		struct hw_perf_event *hwc;

> > +

> > +		if (WARN_ON_ONCE(!event))

> > +			continue;

> > +

> > +		smmu_pmu_event_update(event);

> > +		hwc = &event->hw;

> > +

> > +		smmu_pmu_set_period(smmu_pmu, hwc);

> > +	}

> > +

> > +	return IRQ_HANDLED;

> > +}

> > +

> > +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)

> > +{

> > +	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED |

> IRQF_NO_THREAD;

> > +	int irq, ret = -ENXIO;

> > +

> > +	irq = pmu->irq;

> > +	if (irq)

> > +		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,

> > +				       flags, "smmuv3-pmu", pmu);

> > +	return ret;

> > +}

> > +

> > +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)

> > +{

> > +	smmu_pmu_disable(&smmu_pmu->pmu);

> > +

> > +	/* Disable counter and interrupt */

> > +	writeq_relaxed(smmu_pmu->counter_present_mask,

> > +		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);

> > +	writeq_relaxed(smmu_pmu->counter_present_mask,

> > +		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);

> 

> I suppose if we really wanted to save on register-poking in

> event_add/event_del, we *could* permanently enable the per-counter

> interrupts here and just rely on enable/disable/event_start/event_stop

> naturally preventing anything unexpected. That's a little non-obvious,

> though, so unless there proves to be some concrete benefit I'm inclined

> to stick with the logically-straightforward approach suggested yesterday.


Sure. I will move the enable/disable to the _add/_del path as discussed.
 
> > +	writeq_relaxed(smmu_pmu->counter_present_mask,

> > +		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);

> > +}

> > +

> > +static int smmu_pmu_probe(struct platform_device *pdev)

> > +{

> > +	struct smmu_pmu *smmu_pmu;

> > +	struct resource *res_0, *res_1;

> > +	u32 cfgr, reg_size;

> > +	u64 ceid_64[2];

> > +	int irq, err;

> > +	char *name;

> > +	struct device *dev = &pdev->dev;

> > +

> > +	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);

> > +	if (!smmu_pmu)

> > +		return -ENOMEM;

> > +

> > +	smmu_pmu->dev = dev;

> > +	platform_set_drvdata(pdev, smmu_pmu);

> > +

> > +	smmu_pmu->pmu = (struct pmu) {

> > +		.task_ctx_nr    = perf_invalid_context,

> > +		.pmu_enable	= smmu_pmu_enable,

> > +		.pmu_disable	= smmu_pmu_disable,

> > +		.event_init	= smmu_pmu_event_init,

> > +		.add		= smmu_pmu_event_add,

> > +		.del		= smmu_pmu_event_del,

> > +		.start		= smmu_pmu_event_start,

> > +		.stop		= smmu_pmu_event_stop,

> > +		.read		= smmu_pmu_event_read,

> > +		.attr_groups	= smmu_pmu_attr_grps,

> > +	};

> > +

> > +	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);

> > +	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);

> > +	if (IS_ERR(smmu_pmu->reg_base))

> > +		return PTR_ERR(smmu_pmu->reg_base);

> > +

> > +	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);

> > +

> > +	/* Determine if page 1 is present */

> > +	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {

> > +		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);

> > +		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);

> > +		if (IS_ERR(smmu_pmu->reloc_base))

> > +			return PTR_ERR(smmu_pmu->reloc_base);

> > +	} else {

> > +		smmu_pmu->reloc_base = smmu_pmu->reg_base;

> > +	}

> > +

> > +	irq = platform_get_irq(pdev, 0);

> > +	if (irq > 0)

> > +		smmu_pmu->irq = irq;

> > +

> > +	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base +

> SMMU_PMCG_CEID0);

> > +	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base +

> SMMU_PMCG_CEID1);

> > +	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,

> > +			  SMMU_ARCH_MAX_EVENTS);

> > +

> > +	smmu_pmu->num_counters =

> FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;

> > +	smmu_pmu->counter_present_mask =

> GENMASK(smmu_pmu->num_counters - 1, 0);

> 

> GENMASK_ULL() for consistency?


Ok.

> Although it looks like we now only ever use counter_present_mask in the

> reset routine anyway, so maybe we could just dynamically generate it

> there instead of storing it.


Ok.

Thanks,
Shameer
 
> Robin.

> 

> > +

> > +	smmu_pmu->sid_filter_global = !!(cfgr &

> SMMU_PMCG_CFGR_SID_FILTER_TYPE);

> > +

> > +	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);

> > +	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);

> > +

> > +	smmu_pmu_reset(smmu_pmu);

> > +

> > +	err = smmu_pmu_setup_irq(smmu_pmu);

> > +	if (err) {

> > +		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);

> > +		return err;

> > +	}

> > +

> > +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,

> "smmuv3_pmcg_%llx",

> > +			      (res_0->start) >> SMMU_PA_SHIFT);

> > +	if (!name) {

> > +		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);

> > +		return -EINVAL;

> > +	}

> > +

> > +	/* Pick one CPU to be the preferred one to use */

> > +	smmu_pmu->on_cpu = get_cpu();

> > +	WARN_ON(irq_set_affinity(smmu_pmu->irq,

> cpumask_of(smmu_pmu->on_cpu)));

> > +

> > +	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,

> > +					       &smmu_pmu->node);

> > +	if (err) {

> > +		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",

> > +			err, &res_0->start);

> > +		goto out_cpuhp_err;

> > +	}

> > +

> > +	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);

> > +	if (err) {

> > +		dev_err(dev, "Error %d registering PMU @%pa\n",

> > +			err, &res_0->start);

> > +		goto out_unregister;

> > +	}

> > +

> > +	put_cpu();

> > +	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter

> settings\n",

> > +		 &res_0->start, smmu_pmu->num_counters,

> > +		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :

> > +		 "Individual");

> > +

> > +	return 0;

> > +

> > +out_unregister:

> > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,

> &smmu_pmu->node);

> > +out_cpuhp_err:

> > +	put_cpu();

> > +	return err;

> > +}

> > +

> > +static int smmu_pmu_remove(struct platform_device *pdev)

> > +{

> > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);

> > +

> > +	perf_pmu_unregister(&smmu_pmu->pmu);

> > +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num,

> &smmu_pmu->node);

> > +

> > +	return 0;

> > +}

> > +

> > +static void smmu_pmu_shutdown(struct platform_device *pdev)

> > +{

> > +	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);

> > +

> > +	smmu_pmu_disable(&smmu_pmu->pmu);

> > +}

> > +

> > +static struct platform_driver smmu_pmu_driver = {

> > +	.driver = {

> > +		.name = "arm-smmu-v3-pmu",

> > +	},

> > +	.probe = smmu_pmu_probe,

> > +	.remove = smmu_pmu_remove,

> > +	.shutdown = smmu_pmu_shutdown,

> > +};

> > +

> > +static int __init arm_smmu_pmu_init(void)

> > +{

> > +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,

> > +				      "perf/arm/pmcg:online",

> > +				      NULL,

> > +				      smmu_pmu_offline_cpu);

> > +	if (cpuhp_state_num < 0)

> > +		return cpuhp_state_num;

> > +

> > +	return platform_driver_register(&smmu_pmu_driver);

> > +}

> > +module_init(arm_smmu_pmu_init);

> > +

> > +static void __exit arm_smmu_pmu_exit(void)

> > +{

> > +	platform_driver_unregister(&smmu_pmu_driver);

> > +	cpuhp_remove_multi_state(cpuhp_state_num);

> > +}

> > +

> > +module_exit(arm_smmu_pmu_exit);

> > +

> > +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance

> Monitors Extension");

> > +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");

> > +MODULE_AUTHOR("Shameer Kolothum

> <shameerali.kolothum.thodi@huawei.com>");

> > +MODULE_LICENSE("GPL v2");

> >

Patch

diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 08ebaf7..92be6a3 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -52,6 +52,15 @@  config ARM_PMU_ACPI
 	depends on ARM_PMU && ACPI
 	def_bool y
 
+config ARM_SMMU_V3_PMU
+	 bool "ARM SMMUv3 Performance Monitors Extension"
+	 depends on ARM64 && ACPI && ARM_SMMU_V3
+	   help
+	   Provides support for the SMMU version 3 performance monitor unit (PMU)
+	   on ARM-based systems.
+	   Adds the SMMU PMU into the perf events subsystem for
+	   monitoring SMMU performance events.
+
 config ARM_DSU_PMU
 	tristate "ARM DynamIQ Shared Unit (DSU) PMU"
 	depends on ARM64
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index b3902bd..f10a932 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -4,6 +4,7 @@  obj-$(CONFIG_ARM_CCN) += arm-ccn.o
 obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
 obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
 obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
 obj-$(CONFIG_HISI_PMU) += hisilicon/
 obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
 obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
new file mode 100644
index 0000000..fb9dcd8
--- /dev/null
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -0,0 +1,778 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This driver adds support for perf events to use the Performance
+ * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
+ * to monitor that node.
+ *
+ * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
+ * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
+ * to 4K boundary. For example, the PMCG at 0xff88840000 is named
+ * smmuv3_pmcg_ff88840
+ *
+ * Filtering by stream id is done by specifying filtering parameters
+ * with the event. options are:
+ *   filter_enable    - 0 = no filtering, 1 = filtering enabled
+ *   filter_span      - 0 = exact match, 1 = pattern match
+ *   filter_stream_id - pattern to filter against
+ *
+ * To match a partial StreamID where the X most-significant bits must match
+ * but the Y least-significant bits might differ, STREAMID is programmed
+ * with a value that contains:
+ *  STREAMID[Y - 1] == 0.
+ *  STREAMID[Y - 2:0] == 1 (where Y > 1).
+ * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
+ * contain a value to match from the corresponding bits of event StreamID.
+ *
+ * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
+ *                    filter_span=1,filter_stream_id=0x42/ -a netperf
+ * Applies filter pattern 0x42 to transaction events, which means events
+ * matching stream ids 0x42 and 0x43 are counted. Further filtering
+ * information is available in the SMMU documentation.
+ *
+ * SMMU events are not attributable to a CPU, so task mode and sampling
+ * are not supported.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/msi.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define SMMU_PMCG_EVCNTR0               0x0
+#define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 + (n) * (stride))
+#define SMMU_PMCG_EVTYPER0              0x400
+#define SMMU_PMCG_EVTYPER(n)            (SMMU_PMCG_EVTYPER0 + (n) * 4)
+#define SMMU_PMCG_SID_SPAN_SHIFT        29
+#define SMMU_PMCG_SID_SPAN_MASK         GENMASK(29, 29)
+#define SMMU_PMCG_SMR0                  0xA00
+#define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 + (n) * 4)
+#define SMMU_PMCG_CNTENSET0             0xC00
+#define SMMU_PMCG_CNTENCLR0             0xC20
+#define SMMU_PMCG_INTENSET0             0xC40
+#define SMMU_PMCG_INTENCLR0             0xC60
+#define SMMU_PMCG_OVSCLR0               0xC80
+#define SMMU_PMCG_OVSSET0               0xCC0
+#define SMMU_PMCG_CFGR                  0xE00
+#define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)
+#define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)
+#define SMMU_PMCG_CFGR_SIZE_MASK        GENMASK(13, 8)
+#define SMMU_PMCG_CFGR_NCTR_MASK        GENMASK(5, 0)
+#define SMMU_PMCG_CR                    0xE04
+#define SMMU_PMCG_CR_ENABLE             BIT(0)
+#define SMMU_PMCG_CEID0                 0xE20
+#define SMMU_PMCG_CEID1                 0xE28
+#define SMMU_PMCG_IRQ_CTRL              0xE50
+#define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)
+#define SMMU_PMCG_IRQ_CFG0              0xE58
+
+#define SMMU_DEFAULT_FILTER_SPAN        1
+#define SMMU_DEFAULT_FILTER_STREAM_ID   GENMASK(31, 0)
+
+#define SMMU_MAX_COUNTERS               64
+#define SMMU_ARCH_MAX_EVENT_ID          0x7F
+#define SMMU_IMPDEF_MAX_EVENT_ID        0xFFFF
+#define SMMU_ARCH_MAX_EVENTS            (SMMU_ARCH_MAX_EVENT_ID + 1)
+
+#define SMMU_PA_SHIFT                   12
+
+static int cpuhp_state_num;
+
+struct smmu_pmu {
+	bool sid_filter_global;
+	struct hlist_node node;
+	struct perf_event *events[SMMU_MAX_COUNTERS];
+	DECLARE_BITMAP(used_counters, SMMU_MAX_COUNTERS);
+	DECLARE_BITMAP(supported_events, SMMU_ARCH_MAX_EVENTS);
+	unsigned int irq;
+	unsigned int on_cpu;
+	struct pmu pmu;
+	unsigned int num_counters;
+	struct device *dev;
+	void __iomem *reg_base;
+	void __iomem *reloc_base;
+	u64 counter_present_mask;
+	u64 counter_mask;
+};
+
+#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
+
+#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end)        \
+	static inline u32 get_##_name(struct perf_event *event)            \
+	{                                                                  \
+		return FIELD_GET(GENMASK_ULL(_end, _start),                \
+				 event->attr._config);                     \
+	}                                                                  \
+
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
+
+static inline void smmu_pmu_enable(struct pmu *pmu)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
+	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+}
+
+static inline void smmu_pmu_disable(struct pmu *pmu)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
+	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+}
+
+static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
+					      u32 idx, u64 value)
+{
+	if (smmu_pmu->counter_mask & BIT(32))
+		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+	else
+		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+}
+
+static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+	u64 value;
+
+	if (smmu_pmu->counter_mask & BIT(32))
+		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+	else
+		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+
+	return value;
+}
+
+static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
+}
+
+static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+}
+
+static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
+}
+
+static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
+					      u32 idx)
+{
+	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+}
+
+static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
+					u32 val)
+{
+	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+}
+
+static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
+{
+	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
+}
+
+static void smmu_pmu_event_update(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+	u64 delta, prev, now;
+	u32 idx = hwc->idx;
+
+	do {
+		prev = local64_read(&hwc->prev_count);
+		now = smmu_pmu_counter_get_value(smmu_pmu, idx);
+	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+	/* handle overflow. */
+	delta = now - prev;
+	delta &= smmu_pmu->counter_mask;
+
+	local64_add(delta, &event->count);
+}
+
+static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
+				struct hw_perf_event *hwc)
+{
+	u32 idx = hwc->idx;
+	u64 new;
+
+	/*
+	 * We limit the max period to half the max counter value of the counter
+	 * size, so that even in the case of extreme interrupt latency the
+	 * counter will (hopefully) not wrap past its initial value.
+	 */
+	new = smmu_pmu->counter_mask >> 1;
+
+	local64_set(&hwc->prev_count, new);
+	smmu_pmu_counter_set_value(smmu_pmu, idx, new);
+}
+
+static void smmu_pmu_get_event_filter(struct perf_event *event, u32 *span,
+				      u32 *stream_id)
+{
+	bool filter_en = !!get_filter_enable(event);
+
+	*span = filter_en ? get_filter_span(event) : SMMU_DEFAULT_FILTER_SPAN;
+	*stream_id = filter_en ? get_filter_stream_id(event) :
+				 SMMU_DEFAULT_FILTER_STREAM_ID;
+}
+
+static bool smmu_pmu_event_filter_valid(struct smmu_pmu *smmu_pmu,
+				  struct perf_event *event, int idx)
+{
+	u32 filter_span, filter_sid;
+	u32 curr_span, curr_sid;
+
+	if (!idx  || !smmu_pmu->sid_filter_global)
+		return true;
+
+	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
+
+	/* Read the current global filter setting */
+	curr_span = FIELD_GET(SMMU_PMCG_SID_SPAN_MASK,
+			      readl(smmu_pmu->reg_base + SMMU_PMCG_EVTYPER0));
+	curr_sid = readl(smmu_pmu->reg_base + SMMU_PMCG_SMR0);
+
+	if (filter_span != curr_span || filter_sid != curr_sid)
+		return false;
+
+	return true;
+}
+
+static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
+				  struct perf_event *event)
+{
+	unsigned int idx;
+	unsigned int num_ctrs = smmu_pmu->num_counters;
+
+	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
+	if (idx == num_ctrs)
+		/* The counters are all in use. */
+		return -EAGAIN;
+
+	if (!smmu_pmu_event_filter_valid(smmu_pmu, event, idx))
+		return -EAGAIN;
+
+	set_bit(idx, smmu_pmu->used_counters);
+
+	return idx;
+}
+
+/*
+ * Implementation of abstract pmu functionality required by
+ * the core perf events code.
+ */
+
+static int smmu_pmu_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+	struct device *dev = smmu_pmu->dev;
+	struct perf_event *sibling;
+	u32 event_id;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	if (hwc->sample_period) {
+		dev_dbg(dev, "Sampling not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (event->cpu < 0) {
+		dev_dbg(dev, "Per-task mode not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* We cannot filter accurately so we just don't allow it. */
+	if (event->attr.exclude_user || event->attr.exclude_kernel ||
+	    event->attr.exclude_hv || event->attr.exclude_idle) {
+		dev_dbg(dev, "Can't exclude execution levels\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* Verify specified event is supported on this PMU */
+	event_id = get_event(event);
+	if (((event_id <= SMMU_ARCH_MAX_EVENT_ID) &&
+	    (!test_bit(event_id, smmu_pmu->supported_events))) ||
+	    (event_id > SMMU_IMPDEF_MAX_EVENT_ID)) {
+		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
+		return -EINVAL;
+	}
+
+	/* Don't allow groups with mixed PMUs, except for s/w events */
+	if (event->group_leader->pmu != event->pmu &&
+	    !is_software_event(event->group_leader)) {
+		dev_dbg(dev, "Can't create mixed PMU group\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(sibling, &event->group_leader->sibling_list,
+			    sibling_list)
+		if (sibling->pmu != event->pmu &&
+		    !is_software_event(sibling)) {
+			dev_dbg(dev, "Can't create mixed PMU group\n");
+			return -EINVAL;
+		}
+
+	/* Ensure all events in a group are on the same cpu */
+	if ((event->group_leader != event) &&
+	    (event->cpu != event->group_leader->cpu)) {
+		dev_dbg(dev, "Can't create group on CPUs %d and %d\n",
+			event->cpu, event->group_leader->cpu);
+		return -EINVAL;
+	}
+
+	hwc->idx = -1;
+
+	/*
+	 * Ensure all events are on the same cpu so all events are in the
+	 * same cpu context, to avoid races on pmu_enable etc.
+	 */
+	event->cpu = smmu_pmu->on_cpu;
+
+	return 0;
+}
+
+static void smmu_pmu_event_start(struct perf_event *event, int flags)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+	u32 filter_span, filter_sid;
+	u32 evtyper;
+
+	hwc->state = 0;
+
+	smmu_pmu_set_period(smmu_pmu, hwc);
+
+	smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
+
+	evtyper = get_event(event) |
+		  filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
+
+	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
+	smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
+	smmu_pmu_interrupt_enable(smmu_pmu, idx);
+	smmu_pmu_counter_enable(smmu_pmu, idx);
+}
+
+static void smmu_pmu_event_stop(struct perf_event *event, int flags)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (hwc->state & PERF_HES_STOPPED)
+		return;
+
+	smmu_pmu_counter_disable(smmu_pmu, idx);
+
+	if (flags & PERF_EF_UPDATE)
+		smmu_pmu_event_update(event);
+	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int smmu_pmu_event_add(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+
+	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
+	if (idx < 0)
+		return idx;
+
+	hwc->idx = idx;
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	smmu_pmu->events[idx] = event;
+	local64_set(&hwc->prev_count, 0);
+
+	if (flags & PERF_EF_START)
+		smmu_pmu_event_start(event, flags);
+
+	/* Propagate changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+	return 0;
+}
+
+static void smmu_pmu_event_del(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+	int idx = hwc->idx;
+
+	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+	smmu_pmu->events[idx] = NULL;
+	clear_bit(idx, smmu_pmu->used_counters);
+
+	perf_event_update_userpage(event);
+}
+
+static void smmu_pmu_event_read(struct perf_event *event)
+{
+	smmu_pmu_event_update(event);
+}
+
+/* cpumask */
+
+static ssize_t smmu_pmu_cpumask_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+
+	return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
+}
+
+static struct device_attribute smmu_pmu_cpumask_attr =
+		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
+
+static struct attribute *smmu_pmu_cpumask_attrs[] = {
+	&smmu_pmu_cpumask_attr.attr,
+	NULL
+};
+
+static struct attribute_group smmu_pmu_cpumask_group = {
+	.attrs = smmu_pmu_cpumask_attrs,
+};
+
+/* Events */
+
+ssize_t smmu_pmu_event_show(struct device *dev,
+			    struct device_attribute *attr, char *page)
+{
+	struct perf_pmu_events_attr *pmu_attr;
+
+	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define SMMU_EVENT_ATTR(_name, _id)					  \
+	(&((struct perf_pmu_events_attr[]) {				  \
+		{ .attr = __ATTR(_name, 0444, smmu_pmu_event_show, NULL), \
+		  .id = _id, }						  \
+	})[0].attr.attr)
+
+static struct attribute *smmu_pmu_events[] = {
+	SMMU_EVENT_ATTR(cycles, 0),
+	SMMU_EVENT_ATTR(transaction, 1),
+	SMMU_EVENT_ATTR(tlb_miss, 2),
+	SMMU_EVENT_ATTR(config_cache_miss, 3),
+	SMMU_EVENT_ATTR(trans_table_walk_access, 4),
+	SMMU_EVENT_ATTR(config_struct_access, 5),
+	SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
+	SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
+	NULL
+};
+
+static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
+					 struct attribute *attr, int unused)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+	struct perf_pmu_events_attr *pmu_attr;
+
+	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
+		return attr->mode;
+
+	return 0;
+}
+static struct attribute_group smmu_pmu_events_group = {
+	.name = "events",
+	.attrs = smmu_pmu_events,
+	.is_visible = smmu_pmu_event_is_visible,
+};
+
+/* Formats */
+PMU_FORMAT_ATTR(event,		   "config:0-15");
+PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
+PMU_FORMAT_ATTR(filter_span,	   "config1:32");
+PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
+
+static struct attribute *smmu_pmu_formats[] = {
+	&format_attr_event.attr,
+	&format_attr_filter_stream_id.attr,
+	&format_attr_filter_span.attr,
+	&format_attr_filter_enable.attr,
+	NULL
+};
+
+static struct attribute_group smmu_pmu_format_group = {
+	.name = "format",
+	.attrs = smmu_pmu_formats,
+};
+
+static const struct attribute_group *smmu_pmu_attr_grps[] = {
+	&smmu_pmu_cpumask_group,
+	&smmu_pmu_events_group,
+	&smmu_pmu_format_group,
+	NULL
+};
+
+/*
+ * Generic device handlers
+ */
+
+static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct smmu_pmu *smmu_pmu;
+	unsigned int target;
+
+	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
+	if (cpu != smmu_pmu->on_cpu)
+		return 0;
+
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
+	smmu_pmu->on_cpu = target;
+	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
+
+	return 0;
+}
+
+static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
+{
+	struct smmu_pmu *smmu_pmu = data;
+	u64 ovsr;
+	unsigned int idx;
+
+	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
+	if (!ovsr)
+		return IRQ_NONE;
+
+	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+
+	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
+		struct perf_event *event = smmu_pmu->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (WARN_ON_ONCE(!event))
+			continue;
+
+		smmu_pmu_event_update(event);
+		hwc = &event->hw;
+
+		smmu_pmu_set_period(smmu_pmu, hwc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
+{
+	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
+	int irq, ret = -ENXIO;
+
+	irq = pmu->irq;
+	if (irq)
+		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
+				       flags, "smmuv3-pmu", pmu);
+	return ret;
+}
+
+static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
+{
+	smmu_pmu_disable(&smmu_pmu->pmu);
+
+	/* Disable counter and interrupt */
+	writeq_relaxed(smmu_pmu->counter_present_mask,
+		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+	writeq_relaxed(smmu_pmu->counter_present_mask,
+		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+	writeq_relaxed(smmu_pmu->counter_present_mask,
+		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+}
+
+static int smmu_pmu_probe(struct platform_device *pdev)
+{
+	struct smmu_pmu *smmu_pmu;
+	struct resource *res_0, *res_1;
+	u32 cfgr, reg_size;
+	u64 ceid_64[2];
+	int irq, err;
+	char *name;
+	struct device *dev = &pdev->dev;
+
+	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
+	if (!smmu_pmu)
+		return -ENOMEM;
+
+	smmu_pmu->dev = dev;
+	platform_set_drvdata(pdev, smmu_pmu);
+
+	smmu_pmu->pmu = (struct pmu) {
+		.task_ctx_nr    = perf_invalid_context,
+		.pmu_enable	= smmu_pmu_enable,
+		.pmu_disable	= smmu_pmu_disable,
+		.event_init	= smmu_pmu_event_init,
+		.add		= smmu_pmu_event_add,
+		.del		= smmu_pmu_event_del,
+		.start		= smmu_pmu_event_start,
+		.stop		= smmu_pmu_event_stop,
+		.read		= smmu_pmu_event_read,
+		.attr_groups	= smmu_pmu_attr_grps,
+	};
+
+	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
+	if (IS_ERR(smmu_pmu->reg_base))
+		return PTR_ERR(smmu_pmu->reg_base);
+
+	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
+
+	/* Determine if page 1 is present */
+	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
+		res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
+		if (IS_ERR(smmu_pmu->reloc_base))
+			return PTR_ERR(smmu_pmu->reloc_base);
+	} else {
+		smmu_pmu->reloc_base = smmu_pmu->reg_base;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq > 0)
+		smmu_pmu->irq = irq;
+
+	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
+	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
+	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
+			  SMMU_ARCH_MAX_EVENTS);
+
+	smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR_MASK, cfgr) + 1;
+	smmu_pmu->counter_present_mask = GENMASK(smmu_pmu->num_counters - 1, 0);
+
+	smmu_pmu->sid_filter_global = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
+
+	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE_MASK, cfgr);
+	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
+
+	smmu_pmu_reset(smmu_pmu);
+
+	err = smmu_pmu_setup_irq(smmu_pmu);
+	if (err) {
+		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
+		return err;
+	}
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
+			      (res_0->start) >> SMMU_PA_SHIFT);
+	if (!name) {
+		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
+		return -EINVAL;
+	}
+
+	/* Pick one CPU to be the preferred one to use */
+	smmu_pmu->on_cpu = get_cpu();
+	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
+
+	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
+					       &smmu_pmu->node);
+	if (err) {
+		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
+			err, &res_0->start);
+		goto out_cpuhp_err;
+	}
+
+	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
+	if (err) {
+		dev_err(dev, "Error %d registering PMU @%pa\n",
+			err, &res_0->start);
+		goto out_unregister;
+	}
+
+	put_cpu();
+	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
+		 &res_0->start, smmu_pmu->num_counters,
+		 smmu_pmu->sid_filter_global ? "Global(Counter0)" :
+		 "Individual");
+
+	return 0;
+
+out_unregister:
+	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+out_cpuhp_err:
+	put_cpu();
+	return err;
+}
+
+static int smmu_pmu_remove(struct platform_device *pdev)
+{
+	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&smmu_pmu->pmu);
+	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+
+	return 0;
+}
+
+static void smmu_pmu_shutdown(struct platform_device *pdev)
+{
+	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+	smmu_pmu_disable(&smmu_pmu->pmu);
+}
+
+static struct platform_driver smmu_pmu_driver = {
+	.driver = {
+		.name = "arm-smmu-v3-pmu",
+	},
+	.probe = smmu_pmu_probe,
+	.remove = smmu_pmu_remove,
+	.shutdown = smmu_pmu_shutdown,
+};
+
+static int __init arm_smmu_pmu_init(void)
+{
+	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      "perf/arm/pmcg:online",
+				      NULL,
+				      smmu_pmu_offline_cpu);
+	if (cpuhp_state_num < 0)
+		return cpuhp_state_num;
+
+	return platform_driver_register(&smmu_pmu_driver);
+}
+module_init(arm_smmu_pmu_init);
+
+static void __exit arm_smmu_pmu_exit(void)
+{
+	platform_driver_unregister(&smmu_pmu_driver);
+	cpuhp_remove_multi_state(cpuhp_state_num);
+}
+
+module_exit(arm_smmu_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
+MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
+MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
+MODULE_LICENSE("GPL v2");