Patchwork [v7,20/27] arm64/sve: In-kernel vector length availability query interface

login
register
mail settings
Submitter Dave Martin
Date March 29, 2019, 1 p.m.
Message ID <1553864452-15080-21-git-send-email-Dave.Martin@arm.com>
Download mbox | patch
Permalink /patch/761179/
State New
Headers show

Comments

Dave Martin - March 29, 2019, 1 p.m.
KVM will need to interrogate the set of SVE vector lengths
available on the system.

This patch exposes the relevant bits to the kernel, along with a
sve_vq_available() helper to check whether a particular vector
length is supported.

__vq_to_bit() and __bit_to_vq() are not intended for use outside
these functions: now that these are exposed outside fpsimd.c, they
are prefixed with __ in order to provide an extra hint that they
are not intended for general-purpose use.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
---
 arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++
 arch/arm64/kernel/fpsimd.c      | 35 ++++++++---------------------------
 2 files changed, 37 insertions(+), 27 deletions(-)
Andrew Jones - April 4, 2019, 2:20 p.m.
On Fri, Mar 29, 2019 at 01:00:45PM +0000, Dave Martin wrote:
> KVM will need to interrogate the set of SVE vector lengths
> available on the system.
> 
> This patch exposes the relevant bits to the kernel, along with a
> sve_vq_available() helper to check whether a particular vector
> length is supported.
> 
> __vq_to_bit() and __bit_to_vq() are not intended for use outside
> these functions: now that these are exposed outside fpsimd.c, they
> are prefixed with __ in order to provide an extra hint that they
> are not intended for general-purpose use.
> 
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
> ---
>  arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++
>  arch/arm64/kernel/fpsimd.c      | 35 ++++++++---------------------------
>  2 files changed, 37 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
> index df7a143..ad6d2e4 100644
> --- a/arch/arm64/include/asm/fpsimd.h
> +++ b/arch/arm64/include/asm/fpsimd.h
> @@ -24,10 +24,13 @@
>  
>  #ifndef __ASSEMBLY__
>  
> +#include <linux/bitmap.h>
>  #include <linux/build_bug.h>
> +#include <linux/bug.h>
>  #include <linux/cache.h>
>  #include <linux/init.h>
>  #include <linux/stddef.h>
> +#include <linux/types.h>
>  
>  #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
>  /* Masks for extracting the FPSR and FPCR from the FPSCR */
> @@ -89,6 +92,32 @@ extern u64 read_zcr_features(void);
>  
>  extern int __ro_after_init sve_max_vl;
>  extern int __ro_after_init sve_max_virtualisable_vl;
> +/* Set of available vector lengths, as vq_to_bit(vq): */

s/as/for use with/ ?
s/vq_to_bit/__vq_to_bit/

> +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
> +
> +/*
> + * Helpers to translate bit indices in sve_vq_map to VQ values (and
> + * vice versa).  This allows find_next_bit() to be used to find the
> + * _maximum_ VQ not exceeding a certain value.
> + */
> +static inline unsigned int __vq_to_bit(unsigned int vq)
> +{

Why not have the same WARN_ON and clamping here as we do
in __bit_to_vq. Here a vq > SVE_VQ_MAX will wrap around
to a super high bit.

> +	return SVE_VQ_MAX - vq;
> +}
> +
> +static inline unsigned int __bit_to_vq(unsigned int bit)
> +{
> +	if (WARN_ON(bit >= SVE_VQ_MAX))
> +		bit = SVE_VQ_MAX - 1;
> +
> +	return SVE_VQ_MAX - bit;
> +}
> +
> +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */

Are we avoiding putting these tests and WARN_ONs in this function to
keep it fast?

> +static inline bool sve_vq_available(unsigned int vq)
> +{
> +	return test_bit(__vq_to_bit(vq), sve_vq_map);
> +}
>  
>  #ifdef CONFIG_ARM64_SVE
>  
> diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
> index 8a93afa..577296b 100644
> --- a/arch/arm64/kernel/fpsimd.c
> +++ b/arch/arm64/kernel/fpsimd.c
> @@ -136,7 +136,7 @@ static int sve_default_vl = -1;
>  int __ro_after_init sve_max_vl = SVE_VL_MIN;
>  int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
>  /* Set of available vector lengths, as vq_to_bit(vq): */
> -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
> +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
>  /* Set of vector lengths present on at least one cpu: */
>  static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
>  static void __percpu *efi_sve_state;
> @@ -270,25 +270,6 @@ void fpsimd_save(void)
>  }
>  
>  /*
> - * Helpers to translate bit indices in sve_vq_map to VQ values (and
> - * vice versa).  This allows find_next_bit() to be used to find the
> - * _maximum_ VQ not exceeding a certain value.
> - */
> -
> -static unsigned int vq_to_bit(unsigned int vq)
> -{
> -	return SVE_VQ_MAX - vq;
> -}
> -
> -static unsigned int bit_to_vq(unsigned int bit)
> -{
> -	if (WARN_ON(bit >= SVE_VQ_MAX))
> -		bit = SVE_VQ_MAX - 1;
> -
> -	return SVE_VQ_MAX - bit;
> -}
> -
> -/*
>   * All vector length selection from userspace comes through here.
>   * We're on a slow path, so some sanity-checks are included.
>   * If things go wrong there's a bug somewhere, but try to fall back to a
> @@ -309,8 +290,8 @@ static unsigned int find_supported_vector_length(unsigned int vl)
>  		vl = max_vl;
>  
>  	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
> -			    vq_to_bit(sve_vq_from_vl(vl)));
> -	return sve_vl_from_vq(bit_to_vq(bit));
> +			    __vq_to_bit(sve_vq_from_vl(vl)));
> +	return sve_vl_from_vq(__bit_to_vq(bit));
>  }
>  
>  #ifdef CONFIG_SYSCTL
> @@ -648,7 +629,7 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
>  		write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
>  		vl = sve_get_vl();
>  		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
> -		set_bit(vq_to_bit(vq), map);
> +		set_bit(__vq_to_bit(vq), map);
>  	}
>  }
>  
> @@ -717,7 +698,7 @@ int sve_verify_vq_map(void)
>  	 * Mismatches above sve_max_virtualisable_vl are fine, since
>  	 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
>  	 */
> -	if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) {
> +	if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
>  		pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
>  			smp_processor_id());
>  		return -EINVAL;
> @@ -801,8 +782,8 @@ void __init sve_setup(void)
>  	 * so sve_vq_map must have at least SVE_VQ_MIN set.
>  	 * If something went wrong, at least try to patch it up:
>  	 */
> -	if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
> -		set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
> +	if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
> +		set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
>  
>  	zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
>  	sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
> @@ -831,7 +812,7 @@ void __init sve_setup(void)
>  		/* No virtualisable VLs?  This is architecturally forbidden. */
>  		sve_max_virtualisable_vl = SVE_VQ_MIN;
>  	else /* b + 1 < SVE_VQ_MAX */
> -		sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1));
> +		sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
>  
>  	if (sve_max_virtualisable_vl > sve_max_vl)
>  		sve_max_virtualisable_vl = sve_max_vl;
> -- 
> 2.1.4

Thanks,
drew
Dave Martin - April 5, 2019, 9:35 a.m.
On Thu, Apr 04, 2019 at 04:20:34PM +0200, Andrew Jones wrote:
> On Fri, Mar 29, 2019 at 01:00:45PM +0000, Dave Martin wrote:
> > KVM will need to interrogate the set of SVE vector lengths
> > available on the system.
> > 
> > This patch exposes the relevant bits to the kernel, along with a
> > sve_vq_available() helper to check whether a particular vector
> > length is supported.
> > 
> > __vq_to_bit() and __bit_to_vq() are not intended for use outside
> > these functions: now that these are exposed outside fpsimd.c, they
> > are prefixed with __ in order to provide an extra hint that they
> > are not intended for general-purpose use.
> > 
> > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> > Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> > Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
> > ---
> >  arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++
> >  arch/arm64/kernel/fpsimd.c      | 35 ++++++++---------------------------
> >  2 files changed, 37 insertions(+), 27 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
> > index df7a143..ad6d2e4 100644
> > --- a/arch/arm64/include/asm/fpsimd.h
> > +++ b/arch/arm64/include/asm/fpsimd.h
> > @@ -24,10 +24,13 @@
> >  
> >  #ifndef __ASSEMBLY__
> >  
> > +#include <linux/bitmap.h>
> >  #include <linux/build_bug.h>
> > +#include <linux/bug.h>
> >  #include <linux/cache.h>
> >  #include <linux/init.h>
> >  #include <linux/stddef.h>
> > +#include <linux/types.h>
> >  
> >  #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
> >  /* Masks for extracting the FPSR and FPCR from the FPSCR */
> > @@ -89,6 +92,32 @@ extern u64 read_zcr_features(void);
> >  
> >  extern int __ro_after_init sve_max_vl;
> >  extern int __ro_after_init sve_max_virtualisable_vl;
> > +/* Set of available vector lengths, as vq_to_bit(vq): */
> 
> s/as/for use with/ ?

Not exactly.  Does the following work for you:

/*
 * Set of available vector lengths
 * Vector length vq is encoded as bit __vq_to_bit(vq):
 */

> s/vq_to_bit/__vq_to_bit/

Ack: that got renamed when I moved it to fpsimd.h, bit I clearly didn't
update the comment as I pasted it across.

> 
> > +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
> > +
> > +/*
> > + * Helpers to translate bit indices in sve_vq_map to VQ values (and
> > + * vice versa).  This allows find_next_bit() to be used to find the
> > + * _maximum_ VQ not exceeding a certain value.
> > + */
> > +static inline unsigned int __vq_to_bit(unsigned int vq)
> > +{
> 
> Why not have the same WARN_ON and clamping here as we do
> in __bit_to_vq. Here a vq > SVE_VQ_MAX will wrap around
> to a super high bit.
> 
> > +	return SVE_VQ_MAX - vq;
> > +}
> > +
> > +static inline unsigned int __bit_to_vq(unsigned int bit)
> > +{
> > +	if (WARN_ON(bit >= SVE_VQ_MAX))
> > +		bit = SVE_VQ_MAX - 1;
> > +
> > +	return SVE_VQ_MAX - bit;
> > +}
> > +
> > +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
> 
> Are we avoiding putting these tests and WARN_ONs in this function to
> keep it fast?

These are intended as backend for use only by fpsimd.c and this header,
so peppering them with WARN_ON() felt excessive.  I don't expect a lot
of new calls to these (or any, probably).

I don't recall why I kept the WARN_ON() just in __bit_to_vq(), except
that the way that gets called is a bit more complex in some places.

Are you happy to replace these with comments?  e.g.:

/* Only valid when vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX */
__vq_to_bit()

/* Only valid when bit < SVE_VQ_MAX */
__bit_to_vq()


OTOH, these are not used on fast paths, so maybe having both as
WARN_ON() would be better.  Part of the problem is knowing what to clamp
to: these are generally used in conjunction with looping or bitmap find
operations, so the caller may be making assumptions about the return
value that may wrong when the value is clamped.

Alternatively, these could be BUG() -- but that seems heavy.

What do you think?

[...]

Cheers
---Dave
Andrew Jones - April 5, 2019, 9:54 a.m.
On Fri, Apr 05, 2019 at 10:35:55AM +0100, Dave Martin wrote:
> On Thu, Apr 04, 2019 at 04:20:34PM +0200, Andrew Jones wrote:
> > On Fri, Mar 29, 2019 at 01:00:45PM +0000, Dave Martin wrote:
> > > KVM will need to interrogate the set of SVE vector lengths
> > > available on the system.
> > > 
> > > This patch exposes the relevant bits to the kernel, along with a
> > > sve_vq_available() helper to check whether a particular vector
> > > length is supported.
> > > 
> > > __vq_to_bit() and __bit_to_vq() are not intended for use outside
> > > these functions: now that these are exposed outside fpsimd.c, they
> > > are prefixed with __ in order to provide an extra hint that they
> > > are not intended for general-purpose use.
> > > 
> > > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> > > Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> > > Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
> > > ---
> > >  arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++
> > >  arch/arm64/kernel/fpsimd.c      | 35 ++++++++---------------------------
> > >  2 files changed, 37 insertions(+), 27 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
> > > index df7a143..ad6d2e4 100644
> > > --- a/arch/arm64/include/asm/fpsimd.h
> > > +++ b/arch/arm64/include/asm/fpsimd.h
> > > @@ -24,10 +24,13 @@
> > >  
> > >  #ifndef __ASSEMBLY__
> > >  
> > > +#include <linux/bitmap.h>
> > >  #include <linux/build_bug.h>
> > > +#include <linux/bug.h>
> > >  #include <linux/cache.h>
> > >  #include <linux/init.h>
> > >  #include <linux/stddef.h>
> > > +#include <linux/types.h>
> > >  
> > >  #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
> > >  /* Masks for extracting the FPSR and FPCR from the FPSCR */
> > > @@ -89,6 +92,32 @@ extern u64 read_zcr_features(void);
> > >  
> > >  extern int __ro_after_init sve_max_vl;
> > >  extern int __ro_after_init sve_max_virtualisable_vl;
> > > +/* Set of available vector lengths, as vq_to_bit(vq): */
> > 
> > s/as/for use with/ ?
> 
> Not exactly.  Does the following work for you:
> 
> /*
>  * Set of available vector lengths
>  * Vector length vq is encoded as bit __vq_to_bit(vq):
>  */

Yes. That reads much better.

> 
> > s/vq_to_bit/__vq_to_bit/
> 
> Ack: that got renamed when I moved it to fpsimd.h, bit I clearly didn't
> update the comment as I pasted it across.
> 
> > 
> > > +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
> > > +
> > > +/*
> > > + * Helpers to translate bit indices in sve_vq_map to VQ values (and
> > > + * vice versa).  This allows find_next_bit() to be used to find the
> > > + * _maximum_ VQ not exceeding a certain value.
> > > + */
> > > +static inline unsigned int __vq_to_bit(unsigned int vq)
> > > +{
> > 
> > Why not have the same WARN_ON and clamping here as we do
> > in __bit_to_vq. Here a vq > SVE_VQ_MAX will wrap around
> > to a super high bit.
> > 
> > > +	return SVE_VQ_MAX - vq;
> > > +}
> > > +
> > > +static inline unsigned int __bit_to_vq(unsigned int bit)
> > > +{
> > > +	if (WARN_ON(bit >= SVE_VQ_MAX))
> > > +		bit = SVE_VQ_MAX - 1;
> > > +
> > > +	return SVE_VQ_MAX - bit;
> > > +}
> > > +
> > > +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
> > 
> > Are we avoiding putting these tests and WARN_ONs in this function to
> > keep it fast?
> 
> These are intended as backend for use only by fpsimd.c and this header,
> so peppering them with WARN_ON() felt excessive.  I don't expect a lot
> of new calls to these (or any, probably).
> 
> I don't recall why I kept the WARN_ON() just in __bit_to_vq(), except
> that the way that gets called is a bit more complex in some places.
> 
> Are you happy to replace these with comments?  e.g.:
> 
> /* Only valid when vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX */
> __vq_to_bit()
> 
> /* Only valid when bit < SVE_VQ_MAX */
> __bit_to_vq()
> 
> 
> OTOH, these are not used on fast paths, so maybe having both as
> WARN_ON() would be better.  Part of the problem is knowing what to clamp
> to: these are generally used in conjunction with looping or bitmap find
> operations, so the caller may be making assumptions about the return
> value that may wrong when the value is clamped.
> 
> Alternatively, these could be BUG() -- but that seems heavy.
> 
> What do you think?

I like the idea of having WARN_ON's to enforce the constraints. I
wouldn't be completely opposed to not having anything other than
the comments, though, as there is a limit to how defensive we should
be. I'll abstain from this vote.

Thanks,
drew
Dave Martin - April 5, 2019, 11:13 a.m.
On Fri, Apr 05, 2019 at 11:54:07AM +0200, Andrew Jones wrote:
> On Fri, Apr 05, 2019 at 10:35:55AM +0100, Dave Martin wrote:
> > On Thu, Apr 04, 2019 at 04:20:34PM +0200, Andrew Jones wrote:
> > > On Fri, Mar 29, 2019 at 01:00:45PM +0000, Dave Martin wrote:
> > > > KVM will need to interrogate the set of SVE vector lengths
> > > > available on the system.
> > > > 
> > > > This patch exposes the relevant bits to the kernel, along with a
> > > > sve_vq_available() helper to check whether a particular vector
> > > > length is supported.
> > > > 
> > > > __vq_to_bit() and __bit_to_vq() are not intended for use outside
> > > > these functions: now that these are exposed outside fpsimd.c, they
> > > > are prefixed with __ in order to provide an extra hint that they
> > > > are not intended for general-purpose use.
> > > > 
> > > > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> > > > Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> > > > Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
> > > > ---
> > > >  arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++
> > > >  arch/arm64/kernel/fpsimd.c      | 35 ++++++++---------------------------
> > > >  2 files changed, 37 insertions(+), 27 deletions(-)
> > > > 
> > > > diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h

[...]

> > > > +/* Set of available vector lengths, as vq_to_bit(vq): */
> > > 
> > > s/as/for use with/ ?
> > 
> > Not exactly.  Does the following work for you:
> > 
> > /*
> >  * Set of available vector lengths
> >  * Vector length vq is encoded as bit __vq_to_bit(vq):
> >  */
> 
> Yes. That reads much better.

OK

> > > s/vq_to_bit/__vq_to_bit/
> > 
> > Ack: that got renamed when I moved it to fpsimd.h, bit I clearly didn't
> > update the comment as I pasted it across.
> > 
> > > 
> > > > +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
> > > > +
> > > > +/*
> > > > + * Helpers to translate bit indices in sve_vq_map to VQ values (and
> > > > + * vice versa).  This allows find_next_bit() to be used to find the
> > > > + * _maximum_ VQ not exceeding a certain value.
> > > > + */
> > > > +static inline unsigned int __vq_to_bit(unsigned int vq)
> > > > +{
> > > 
> > > Why not have the same WARN_ON and clamping here as we do
> > > in __bit_to_vq. Here a vq > SVE_VQ_MAX will wrap around
> > > to a super high bit.
> > > 
> > > > +	return SVE_VQ_MAX - vq;
> > > > +}
> > > > +
> > > > +static inline unsigned int __bit_to_vq(unsigned int bit)
> > > > +{
> > > > +	if (WARN_ON(bit >= SVE_VQ_MAX))
> > > > +		bit = SVE_VQ_MAX - 1;
> > > > +
> > > > +	return SVE_VQ_MAX - bit;
> > > > +}
> > > > +
> > > > +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
> > > 
> > > Are we avoiding putting these tests and WARN_ONs in this function to
> > > keep it fast?
> > 
> > These are intended as backend for use only by fpsimd.c and this header,
> > so peppering them with WARN_ON() felt excessive.  I don't expect a lot
> > of new calls to these (or any, probably).
> > 
> > I don't recall why I kept the WARN_ON() just in __bit_to_vq(), except
> > that the way that gets called is a bit more complex in some places.
> > 
> > Are you happy to replace these with comments?  e.g.:
> > 
> > /* Only valid when vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX */
> > __vq_to_bit()
> > 
> > /* Only valid when bit < SVE_VQ_MAX */
> > __bit_to_vq()
> > 
> > 
> > OTOH, these are not used on fast paths, so maybe having both as
> > WARN_ON() would be better.  Part of the problem is knowing what to clamp
> > to: these are generally used in conjunction with looping or bitmap find
> > operations, so the caller may be making assumptions about the return
> > value that may wrong when the value is clamped.
> > 
> > Alternatively, these could be BUG() -- but that seems heavy.
> > 
> > What do you think?
> 
> I like the idea of having WARN_ON's to enforce the constraints. I
> wouldn't be completely opposed to not having anything other than
> the comments, though, as there is a limit to how defensive we should
> be. I'll abstain from this vote.

I'll have a think about whether there's anything non-toxic that we can
return in the error cases.  If not, I may demote these to comments:
returning an actual error code for this sort of things feels like a
step too far.

Otherwise we can have WARNs.

Cheers
---Dave

Patch

diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index df7a143..ad6d2e4 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -24,10 +24,13 @@ 
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bitmap.h>
 #include <linux/build_bug.h>
+#include <linux/bug.h>
 #include <linux/cache.h>
 #include <linux/init.h>
 #include <linux/stddef.h>
+#include <linux/types.h>
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /* Masks for extracting the FPSR and FPCR from the FPSCR */
@@ -89,6 +92,32 @@  extern u64 read_zcr_features(void);
 
 extern int __ro_after_init sve_max_vl;
 extern int __ro_after_init sve_max_virtualisable_vl;
+/* Set of available vector lengths, as vq_to_bit(vq): */
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+
+/*
+ * Helpers to translate bit indices in sve_vq_map to VQ values (and
+ * vice versa).  This allows find_next_bit() to be used to find the
+ * _maximum_ VQ not exceeding a certain value.
+ */
+static inline unsigned int __vq_to_bit(unsigned int vq)
+{
+	return SVE_VQ_MAX - vq;
+}
+
+static inline unsigned int __bit_to_vq(unsigned int bit)
+{
+	if (WARN_ON(bit >= SVE_VQ_MAX))
+		bit = SVE_VQ_MAX - 1;
+
+	return SVE_VQ_MAX - bit;
+}
+
+/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
+static inline bool sve_vq_available(unsigned int vq)
+{
+	return test_bit(__vq_to_bit(vq), sve_vq_map);
+}
 
 #ifdef CONFIG_ARM64_SVE
 
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 8a93afa..577296b 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -136,7 +136,7 @@  static int sve_default_vl = -1;
 int __ro_after_init sve_max_vl = SVE_VL_MIN;
 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
 /* Set of available vector lengths, as vq_to_bit(vq): */
-static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
 /* Set of vector lengths present on at least one cpu: */
 static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
 static void __percpu *efi_sve_state;
@@ -270,25 +270,6 @@  void fpsimd_save(void)
 }
 
 /*
- * Helpers to translate bit indices in sve_vq_map to VQ values (and
- * vice versa).  This allows find_next_bit() to be used to find the
- * _maximum_ VQ not exceeding a certain value.
- */
-
-static unsigned int vq_to_bit(unsigned int vq)
-{
-	return SVE_VQ_MAX - vq;
-}
-
-static unsigned int bit_to_vq(unsigned int bit)
-{
-	if (WARN_ON(bit >= SVE_VQ_MAX))
-		bit = SVE_VQ_MAX - 1;
-
-	return SVE_VQ_MAX - bit;
-}
-
-/*
  * All vector length selection from userspace comes through here.
  * We're on a slow path, so some sanity-checks are included.
  * If things go wrong there's a bug somewhere, but try to fall back to a
@@ -309,8 +290,8 @@  static unsigned int find_supported_vector_length(unsigned int vl)
 		vl = max_vl;
 
 	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
-			    vq_to_bit(sve_vq_from_vl(vl)));
-	return sve_vl_from_vq(bit_to_vq(bit));
+			    __vq_to_bit(sve_vq_from_vl(vl)));
+	return sve_vl_from_vq(__bit_to_vq(bit));
 }
 
 #ifdef CONFIG_SYSCTL
@@ -648,7 +629,7 @@  static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
 		write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
 		vl = sve_get_vl();
 		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
-		set_bit(vq_to_bit(vq), map);
+		set_bit(__vq_to_bit(vq), map);
 	}
 }
 
@@ -717,7 +698,7 @@  int sve_verify_vq_map(void)
 	 * Mismatches above sve_max_virtualisable_vl are fine, since
 	 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
 	 */
-	if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+	if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
 		pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
 			smp_processor_id());
 		return -EINVAL;
@@ -801,8 +782,8 @@  void __init sve_setup(void)
 	 * so sve_vq_map must have at least SVE_VQ_MIN set.
 	 * If something went wrong, at least try to patch it up:
 	 */
-	if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
-		set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+	if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+		set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
 
 	zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
 	sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
@@ -831,7 +812,7 @@  void __init sve_setup(void)
 		/* No virtualisable VLs?  This is architecturally forbidden. */
 		sve_max_virtualisable_vl = SVE_VQ_MIN;
 	else /* b + 1 < SVE_VQ_MAX */
-		sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1));
+		sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
 
 	if (sve_max_virtualisable_vl > sve_max_vl)
 		sve_max_virtualisable_vl = sve_max_vl;