50a9ff6df4
The kernel is moved ahead to version 3.10.0-693.21.1.el7 To summarize: CVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1' This is fixed by load fences and is "baked in" and cannot be turned off. CVE-2017-5715 [branch target injection] aka 'Spectre Variant 2' This is fixed by a combination of retpolines and IBPB, or IBRS+IBPB if on skylake. This requires a microcode change in the processors. This feature, if on, has a significant performance impact. It is assumed on unless turned off via the "nospectre_v2" bootarg. CVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3' This is fixed by page table isolation using the Kaiser patches. This feature is assumed on unless turned off via the "nopti" bootarg. As of the commit date, we have changed the installer kickstarts to issue both "nopti nospectre_v2" bootargs to minimize realtime impacts by default. The customer will be able to optionally sacrifice performance for extra security at datafill time. Change-Id: Id7c99923f2ee2ee91f77c7bd9940e684eff8b476 Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
2126 lines
62 KiB
Diff
2126 lines
62 KiB
Diff
From 5d2dbe8e437247ccb99ce3bc9daa6240943d036a Mon Sep 17 00:00:00 2001
|
|
Message-Id: <5d2dbe8e437247ccb99ce3bc9daa6240943d036a.1522795098.git.Jim.Somerville@windriver.com>
|
|
In-Reply-To: <21c11de06542297206c798b405b54a3ec9052aa4.1522795097.git.Jim.Somerville@windriver.com>
|
|
References: <21c11de06542297206c798b405b54a3ec9052aa4.1522795097.git.Jim.Somerville@windriver.com>
|
|
From: Alex Kozyrev <alex.kozyrev@windriver.com>
|
|
Date: Wed, 19 Jul 2017 02:21:59 -0500
|
|
Subject: [PATCH 20/33] Porting Cacheinfo from Kernel 4.10.17
|
|
|
|
Original source code from tag v4.10.17 in Linux stable tree for:
|
|
intel_cacheinfo.c, cacheinfo.c and cacheinfo.h.
|
|
Main commit that we are interested for is 246246cbde5e840012f853e27630ebb59f409486:
|
|
This patch adds initial support for providing processor cache information
|
|
to userspace through sysfs interface. This is based on already existing
|
|
implementations(x86, ia64, s390 and powerpc) and hence the interface is
|
|
intended to be fully compatible.
|
|
|
|
The main purpose of this generic support is to avoid further code
|
|
duplication to support new architectures and also to unify all the existing
|
|
different implementations.
|
|
|
|
This implementation maintains the hierarchy of cache objects which reflects
|
|
the system's cache topology. Cache devices are instantiated as needed as
|
|
CPUs come online. The cache information is replicated per-cpu even if they are
|
|
shared. A per-cpu array of cache information maintained is used mainly for
|
|
sysfs-related book keeping.
|
|
|
|
It also implements the shared_cpu_map attribute, which is essential for
|
|
enabling both kernel and user-space to discover the system's overall cache
|
|
topology.
|
|
|
|
This patch also add the missing ABI documentation for the cacheinfo sysfs
|
|
interface already, which is well defined and widely used.
|
|
|
|
sysfs-devices-system-cpu was nodified by taking commit 1d78dc59f5ab6f467e49882518453adc7e4caa44:
|
|
Add an ABI document entry for /sys/devices/system/cpu/cpu*/cache/index*/id.
|
|
|
|
cpu.h and cpu.c was enhanced with commit 3d52943b3a51497a777e6d7d840a38596a92cee9:
|
|
This patch adds a new function to create per-cpu devices.
|
|
This helps in:
|
|
1. reusing the device infrastructure to create any cpu related
|
|
attributes and corresponding sysfs instead of creating and
|
|
dealing with raw kobjects directly
|
|
2. retaining the legacy path(/sys/devices/system/cpu/..) to support
|
|
existing sysfs ABI
|
|
3. avoiding to create links in the bus directory pointing to the
|
|
device as there would be per-cpu instance of these devices with
|
|
the same name since dev->bus is not populated to cpu_sysbus on
|
|
purpose
|
|
|
|
Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
|
|
---
|
|
Documentation/ABI/testing/sysfs-devices-system-cpu | 65 ++
|
|
arch/x86/kernel/cpu/intel_cacheinfo.c | 832 +++++++--------------
|
|
drivers/base/Makefile | 2 +-
|
|
drivers/base/cacheinfo.c | 662 ++++++++++++++++
|
|
drivers/base/cpu.c | 54 ++
|
|
include/linux/cacheinfo.h | 104 +++
|
|
include/linux/cpu.h | 3 +
|
|
7 files changed, 1148 insertions(+), 574 deletions(-)
|
|
create mode 100644 drivers/base/cacheinfo.c
|
|
create mode 100644 include/linux/cacheinfo.h
|
|
|
|
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
|
|
index 7b467f3..145ae37 100644
|
|
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
|
|
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
|
|
@@ -201,6 +201,71 @@ Description: address and size of the percpu note.
|
|
|
|
crash_notes_size: size of the note of cpu#.
|
|
|
|
+
|
|
+What: /sys/devices/system/cpu/cpu*/cache/index*/<set_of_attributes_mentioned_below>
|
|
+Date: July 2014(documented, existed before August 2008)
|
|
+Contact: Sudeep Holla <sudeep.holla@arm.com>
|
|
+ Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
|
+Description: Parameters for the CPU cache attributes
|
|
+
|
|
+ allocation_policy:
|
|
+ - WriteAllocate: allocate a memory location to a cache line
|
|
+ on a cache miss because of a write
|
|
+ - ReadAllocate: allocate a memory location to a cache line
|
|
+ on a cache miss because of a read
|
|
+ - ReadWriteAllocate: both writeallocate and readallocate
|
|
+
|
|
+ attributes: LEGACY used only on IA64 and is same as write_policy
|
|
+
|
|
+ coherency_line_size: the minimum amount of data in bytes that gets
|
|
+ transferred from memory to cache
|
|
+
|
|
+ level: the cache hierarchy in the multi-level cache configuration
|
|
+
|
|
+ number_of_sets: total number of sets in the cache, a set is a
|
|
+ collection of cache lines with the same cache index
|
|
+
|
|
+ physical_line_partition: number of physical cache line per cache tag
|
|
+
|
|
+ shared_cpu_list: the list of logical cpus sharing the cache
|
|
+
|
|
+ shared_cpu_map: logical cpu mask containing the list of cpus sharing
|
|
+ the cache
|
|
+
|
|
+ size: the total cache size in kB
|
|
+
|
|
+ type:
|
|
+ - Instruction: cache that only holds instructions
|
|
+ - Data: cache that only caches data
|
|
+ - Unified: cache that holds both data and instructions
|
|
+
|
|
+ ways_of_associativity: degree of freedom in placing a particular block
|
|
+ of memory in the cache
|
|
+
|
|
+ write_policy:
|
|
+ - WriteThrough: data is written to both the cache line
|
|
+ and to the block in the lower-level memory
|
|
+ - WriteBack: data is written only to the cache line and
|
|
+ the modified cache line is written to main
|
|
+ memory only when it is replaced
|
|
+
|
|
+
|
|
+What: /sys/devices/system/cpu/cpu*/cache/index*/id
|
|
+Date: September 2016
|
|
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
|
+Description: Cache id
|
|
+
|
|
+ The id provides a unique number for a specific instance of
|
|
+ a cache of a particular type. E.g. there may be a level
|
|
+ 3 unified cache on each socket in a server and we may
|
|
+ assign them ids 0, 1, 2, ...
|
|
+
|
|
+ Note that id value can be non-contiguous. E.g. level 1
|
|
+ caches typically exist per core, but there may not be a
|
|
+ power of two cores on a socket, so these caches may be
|
|
+ numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ...
|
|
+
|
|
+
|
|
What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
|
|
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
|
|
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
|
|
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
|
|
index a77da35..bf23bd2 100644
|
|
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
|
|
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
|
|
@@ -1,5 +1,5 @@
|
|
/*
|
|
- * Routines to indentify caches on Intel CPU.
|
|
+ * Routines to identify caches on Intel CPU.
|
|
*
|
|
* Changes:
|
|
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
|
|
@@ -7,16 +7,14 @@
|
|
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
|
|
*/
|
|
|
|
-#include <linux/init.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/device.h>
|
|
-#include <linux/compiler.h>
|
|
+#include <linux/cacheinfo.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/sysfs.h>
|
|
#include <linux/pci.h>
|
|
|
|
-#include <asm/processor.h>
|
|
-#include <linux/smp.h>
|
|
+#include <asm/cpufeature.h>
|
|
#include <asm/amd_nb.h>
|
|
#include <asm/smp.h>
|
|
|
|
@@ -116,10 +114,10 @@ static const struct _cache_table cache_table[] =
|
|
|
|
|
|
enum _cache_type {
|
|
- CACHE_TYPE_NULL = 0,
|
|
- CACHE_TYPE_DATA = 1,
|
|
- CACHE_TYPE_INST = 2,
|
|
- CACHE_TYPE_UNIFIED = 3
|
|
+ CTYPE_NULL = 0,
|
|
+ CTYPE_DATA = 1,
|
|
+ CTYPE_INST = 2,
|
|
+ CTYPE_UNIFIED = 3
|
|
};
|
|
|
|
union _cpuid4_leaf_eax {
|
|
@@ -160,12 +158,7 @@ struct _cpuid4_info_regs {
|
|
struct amd_northbridge *nb;
|
|
};
|
|
|
|
-struct _cpuid4_info {
|
|
- struct _cpuid4_info_regs base;
|
|
- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
|
|
-};
|
|
-
|
|
-unsigned short num_cache_leaves;
|
|
+static unsigned short num_cache_leaves;
|
|
|
|
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
|
information to the user. This makes some assumptions about the machine:
|
|
@@ -221,6 +214,13 @@ static const unsigned short assocs[] = {
|
|
static const unsigned char levels[] = { 1, 1, 2, 3 };
|
|
static const unsigned char types[] = { 1, 2, 3, 3 };
|
|
|
|
+static const enum cache_type cache_type_map[] = {
|
|
+ [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
|
|
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
|
|
+ [CTYPE_INST] = CACHE_TYPE_INST,
|
|
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
|
|
+};
|
|
+
|
|
static void
|
|
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|
union _cpuid4_leaf_ebx *ebx,
|
|
@@ -292,14 +292,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|
(ebx->split.ways_of_associativity + 1) - 1;
|
|
}
|
|
|
|
-struct _cache_attr {
|
|
- struct attribute attr;
|
|
- ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
|
|
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
|
|
- unsigned int);
|
|
-};
|
|
-
|
|
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
|
|
+
|
|
/*
|
|
* L3 cache descriptors
|
|
*/
|
|
@@ -326,20 +320,6 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb)
|
|
l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
|
|
}
|
|
|
|
-static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
|
|
-{
|
|
- int node;
|
|
-
|
|
- /* only for L3, and not in virtualized environments */
|
|
- if (index < 3)
|
|
- return;
|
|
-
|
|
- node = amd_get_nb_id(smp_processor_id());
|
|
- this_leaf->nb = node_to_amd_nb(node);
|
|
- if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
|
|
- amd_calc_l3_indices(this_leaf->nb);
|
|
-}
|
|
-
|
|
/*
|
|
* check whether a slot used for disabling an L3 index is occupied.
|
|
* @l3: L3 cache descriptor
|
|
@@ -347,7 +327,7 @@ static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
|
|
*
|
|
* @returns: the disabled index if used or negative value if slot free.
|
|
*/
|
|
-int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
|
|
+static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
|
|
{
|
|
unsigned int reg = 0;
|
|
|
|
@@ -360,15 +340,13 @@ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
|
|
return -1;
|
|
}
|
|
|
|
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
|
+static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
|
|
unsigned int slot)
|
|
{
|
|
int index;
|
|
+ struct amd_northbridge *nb = this_leaf->priv;
|
|
|
|
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
- return -EINVAL;
|
|
-
|
|
- index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
|
|
+ index = amd_get_l3_disable_slot(nb, slot);
|
|
if (index >= 0)
|
|
return sprintf(buf, "%d\n", index);
|
|
|
|
@@ -377,9 +355,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
|
|
|
#define SHOW_CACHE_DISABLE(slot) \
|
|
static ssize_t \
|
|
-show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
|
|
- unsigned int cpu) \
|
|
+cache_disable_##slot##_show(struct device *dev, \
|
|
+ struct device_attribute *attr, char *buf) \
|
|
{ \
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
|
|
return show_cache_disable(this_leaf, buf, slot); \
|
|
}
|
|
SHOW_CACHE_DISABLE(0)
|
|
@@ -425,8 +404,8 @@ static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
|
|
*
|
|
* @return: 0 on success, error status on failure
|
|
*/
|
|
-int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
|
|
- unsigned long index)
|
|
+static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
|
|
+ unsigned slot, unsigned long index)
|
|
{
|
|
int ret = 0;
|
|
|
|
@@ -447,28 +426,26 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
|
|
return 0;
|
|
}
|
|
|
|
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
|
- const char *buf, size_t count,
|
|
- unsigned int slot)
|
|
+static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
|
|
+ const char *buf, size_t count,
|
|
+ unsigned int slot)
|
|
{
|
|
unsigned long val = 0;
|
|
int cpu, err = 0;
|
|
+ struct amd_northbridge *nb = this_leaf->priv;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
- return -EINVAL;
|
|
-
|
|
- cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
|
+ cpu = cpumask_first(&this_leaf->shared_cpu_map);
|
|
|
|
if (kstrtoul(buf, 10, &val) < 0)
|
|
return -EINVAL;
|
|
|
|
- err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
|
|
+ err = amd_set_l3_disable_slot(nb, cpu, slot, val);
|
|
if (err) {
|
|
if (err == -EEXIST)
|
|
- pr_warning("L3 slot %d in use/index already disabled!\n",
|
|
+ pr_warn("L3 slot %d in use/index already disabled!\n",
|
|
slot);
|
|
return err;
|
|
}
|
|
@@ -477,41 +454,36 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
|
|
|
#define STORE_CACHE_DISABLE(slot) \
|
|
static ssize_t \
|
|
-store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
|
|
- const char *buf, size_t count, \
|
|
- unsigned int cpu) \
|
|
+cache_disable_##slot##_store(struct device *dev, \
|
|
+ struct device_attribute *attr, \
|
|
+ const char *buf, size_t count) \
|
|
{ \
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
|
|
return store_cache_disable(this_leaf, buf, count, slot); \
|
|
}
|
|
STORE_CACHE_DISABLE(0)
|
|
STORE_CACHE_DISABLE(1)
|
|
|
|
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
|
- show_cache_disable_0, store_cache_disable_0);
|
|
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
|
- show_cache_disable_1, store_cache_disable_1);
|
|
-
|
|
-static ssize_t
|
|
-show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
|
|
+static ssize_t subcaches_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
{
|
|
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
- return -EINVAL;
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ int cpu = cpumask_first(&this_leaf->shared_cpu_map);
|
|
|
|
return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
|
|
}
|
|
|
|
-static ssize_t
|
|
-store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
|
- unsigned int cpu)
|
|
+static ssize_t subcaches_store(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ int cpu = cpumask_first(&this_leaf->shared_cpu_map);
|
|
unsigned long val;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
- return -EINVAL;
|
|
-
|
|
if (kstrtoul(buf, 16, &val) < 0)
|
|
return -EINVAL;
|
|
|
|
@@ -521,9 +493,92 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
|
return count;
|
|
}
|
|
|
|
-static struct _cache_attr subcaches =
|
|
- __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
|
+static DEVICE_ATTR_RW(cache_disable_0);
|
|
+static DEVICE_ATTR_RW(cache_disable_1);
|
|
+static DEVICE_ATTR_RW(subcaches);
|
|
+
|
|
+static umode_t
|
|
+cache_private_attrs_is_visible(struct kobject *kobj,
|
|
+ struct attribute *attr, int unused)
|
|
+{
|
|
+ struct device *dev = kobj_to_dev(kobj);
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ umode_t mode = attr->mode;
|
|
+
|
|
+ if (!this_leaf->priv)
|
|
+ return 0;
|
|
+
|
|
+ if ((attr == &dev_attr_subcaches.attr) &&
|
|
+ amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
+ return mode;
|
|
+
|
|
+ if ((attr == &dev_attr_cache_disable_0.attr ||
|
|
+ attr == &dev_attr_cache_disable_1.attr) &&
|
|
+ amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
+ return mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct attribute_group cache_private_group = {
|
|
+ .is_visible = cache_private_attrs_is_visible,
|
|
+};
|
|
+
|
|
+static void init_amd_l3_attrs(void)
|
|
+{
|
|
+ int n = 1;
|
|
+ static struct attribute **amd_l3_attrs;
|
|
+
|
|
+ if (amd_l3_attrs) /* already initialized */
|
|
+ return;
|
|
+
|
|
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
+ n += 2;
|
|
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
+ n += 1;
|
|
+
|
|
+ amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
|
|
+ if (!amd_l3_attrs)
|
|
+ return;
|
|
+
|
|
+ n = 0;
|
|
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
|
|
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
|
|
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
|
|
+ }
|
|
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
+ amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
|
|
+
|
|
+ cache_private_group.attrs = amd_l3_attrs;
|
|
+}
|
|
+
|
|
+const struct attribute_group *
|
|
+cache_get_priv_group(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ struct amd_northbridge *nb = this_leaf->priv;
|
|
+
|
|
+ if (this_leaf->level < 3 || !nb)
|
|
+ return NULL;
|
|
+
|
|
+ if (nb && nb->l3_cache.indices)
|
|
+ init_amd_l3_attrs();
|
|
+
|
|
+ return &cache_private_group;
|
|
+}
|
|
+
|
|
+static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
|
|
+{
|
|
+ int node;
|
|
+
|
|
+ /* only for L3, and not in virtualized environments */
|
|
+ if (index < 3)
|
|
+ return;
|
|
|
|
+ node = amd_get_nb_id(smp_processor_id());
|
|
+ this_leaf->nb = node_to_amd_nb(node);
|
|
+ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
|
|
+ amd_calc_l3_indices(this_leaf->nb);
|
|
+}
|
|
#else
|
|
#define amd_init_l3_cache(x, y)
|
|
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
|
|
@@ -537,7 +592,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
|
|
unsigned edx;
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
|
- if (cpu_has_topoext)
|
|
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT))
|
|
cpuid_count(0x8000001d, index, &eax.full,
|
|
&ebx.full, &ecx.full, &edx);
|
|
else
|
|
@@ -547,7 +602,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
|
|
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
|
|
}
|
|
|
|
- if (eax.split.type == CACHE_TYPE_NULL)
|
|
+ if (eax.split.type == CTYPE_NULL)
|
|
return -EIO; /* better error ? */
|
|
|
|
this_leaf->eax = eax;
|
|
@@ -576,14 +631,14 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
|
|
/* Do cpuid(op) loop to find out num_cache_leaves */
|
|
cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
|
|
cache_eax.full = eax;
|
|
- } while (cache_eax.split.type != CACHE_TYPE_NULL);
|
|
+ } while (cache_eax.split.type != CTYPE_NULL);
|
|
return i;
|
|
}
|
|
|
|
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
|
{
|
|
|
|
- if (cpu_has_topoext) {
|
|
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
|
num_cache_leaves = find_num_cache_leaves(c);
|
|
} else if (c->extended_cpuid_level >= 0x80000006) {
|
|
if (cpuid_edx(0x80000006) & 0xf000)
|
|
@@ -600,7 +655,7 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
|
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
|
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
|
-#ifdef CONFIG_X86_HT
|
|
+#ifdef CONFIG_SMP
|
|
unsigned int cpu = c->cpu_index;
|
|
#endif
|
|
|
|
@@ -618,36 +673,34 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
* parameters cpuid leaf to find the cache details
|
|
*/
|
|
for (i = 0; i < num_cache_leaves; i++) {
|
|
- struct _cpuid4_info_regs this_leaf;
|
|
+ struct _cpuid4_info_regs this_leaf = {};
|
|
int retval;
|
|
|
|
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
|
|
- if (retval >= 0) {
|
|
- switch (this_leaf.eax.split.level) {
|
|
- case 1:
|
|
- if (this_leaf.eax.split.type ==
|
|
- CACHE_TYPE_DATA)
|
|
- new_l1d = this_leaf.size/1024;
|
|
- else if (this_leaf.eax.split.type ==
|
|
- CACHE_TYPE_INST)
|
|
- new_l1i = this_leaf.size/1024;
|
|
- break;
|
|
- case 2:
|
|
- new_l2 = this_leaf.size/1024;
|
|
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
- index_msb = get_count_order(num_threads_sharing);
|
|
- l2_id = c->apicid & ~((1 << index_msb) - 1);
|
|
- break;
|
|
- case 3:
|
|
- new_l3 = this_leaf.size/1024;
|
|
- num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
- index_msb = get_count_order(
|
|
- num_threads_sharing);
|
|
- l3_id = c->apicid & ~((1 << index_msb) - 1);
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
+ if (retval < 0)
|
|
+ continue;
|
|
+
|
|
+ switch (this_leaf.eax.split.level) {
|
|
+ case 1:
|
|
+ if (this_leaf.eax.split.type == CTYPE_DATA)
|
|
+ new_l1d = this_leaf.size/1024;
|
|
+ else if (this_leaf.eax.split.type == CTYPE_INST)
|
|
+ new_l1i = this_leaf.size/1024;
|
|
+ break;
|
|
+ case 2:
|
|
+ new_l2 = this_leaf.size/1024;
|
|
+ num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
+ index_msb = get_count_order(num_threads_sharing);
|
|
+ l2_id = c->apicid & ~((1 << index_msb) - 1);
|
|
+ break;
|
|
+ case 3:
|
|
+ new_l3 = this_leaf.size/1024;
|
|
+ num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
+ index_msb = get_count_order(num_threads_sharing);
|
|
+ l3_id = c->apicid & ~((1 << index_msb) - 1);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
}
|
|
}
|
|
}
|
|
@@ -721,34 +774,40 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
|
if (new_l2) {
|
|
l2 = new_l2;
|
|
-#ifdef CONFIG_X86_HT
|
|
+#ifdef CONFIG_SMP
|
|
per_cpu(cpu_llc_id, cpu) = l2_id;
|
|
#endif
|
|
}
|
|
|
|
if (new_l3) {
|
|
l3 = new_l3;
|
|
-#ifdef CONFIG_X86_HT
|
|
+#ifdef CONFIG_SMP
|
|
per_cpu(cpu_llc_id, cpu) = l3_id;
|
|
#endif
|
|
}
|
|
|
|
+#ifdef CONFIG_SMP
|
|
+ /*
|
|
+ * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
|
|
+ * turns means that the only possibility is SMT (as indicated in
|
|
+ * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
|
|
+ * that SMT shares all caches, we can unconditionally set cpu_llc_id to
|
|
+ * c->phys_proc_id.
|
|
+ */
|
|
+ if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
|
|
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
|
+#endif
|
|
+
|
|
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
|
|
|
return l2;
|
|
}
|
|
|
|
-#ifdef CONFIG_SYSFS
|
|
-
|
|
-/* pointer to _cpuid4_info array (for each cache leaf) */
|
|
-static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
|
|
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
|
|
-
|
|
-#ifdef CONFIG_SMP
|
|
-
|
|
-static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
|
|
+static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
|
+ struct _cpuid4_info_regs *base)
|
|
{
|
|
- struct _cpuid4_info *this_leaf;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ struct cacheinfo *this_leaf;
|
|
int i, sibling;
|
|
|
|
/*
|
|
@@ -757,40 +816,43 @@ static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
|
|
*/
|
|
if (index == 3) {
|
|
for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
|
|
- if (!per_cpu(ici_cpuid4_info, i))
|
|
+ this_cpu_ci = get_cpu_cacheinfo(i);
|
|
+ if (!this_cpu_ci->info_list)
|
|
continue;
|
|
- this_leaf = CPUID4_INFO_IDX(i, index);
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
|
|
if (!cpu_online(sibling))
|
|
continue;
|
|
- set_bit(sibling, this_leaf->shared_cpu_map);
|
|
+ cpumask_set_cpu(sibling,
|
|
+ &this_leaf->shared_cpu_map);
|
|
}
|
|
}
|
|
- } else if (cpu_has_topoext) {
|
|
+ } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
|
unsigned int apicid, nshared, first, last;
|
|
|
|
- if (!per_cpu(ici_cpuid4_info, cpu))
|
|
- return 0;
|
|
-
|
|
- this_leaf = CPUID4_INFO_IDX(cpu, index);
|
|
- nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ nshared = base->eax.split.num_threads_sharing + 1;
|
|
apicid = cpu_data(cpu).apicid;
|
|
first = apicid - (apicid % nshared);
|
|
last = first + nshared - 1;
|
|
|
|
for_each_online_cpu(i) {
|
|
+ this_cpu_ci = get_cpu_cacheinfo(i);
|
|
+ if (!this_cpu_ci->info_list)
|
|
+ continue;
|
|
+
|
|
apicid = cpu_data(i).apicid;
|
|
if ((apicid < first) || (apicid > last))
|
|
continue;
|
|
- if (!per_cpu(ici_cpuid4_info, i))
|
|
- continue;
|
|
- this_leaf = CPUID4_INFO_IDX(i, index);
|
|
+
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
|
|
for_each_online_cpu(sibling) {
|
|
apicid = cpu_data(sibling).apicid;
|
|
if ((apicid < first) || (apicid > last))
|
|
continue;
|
|
- set_bit(sibling, this_leaf->shared_cpu_map);
|
|
+ cpumask_set_cpu(sibling,
|
|
+ &this_leaf->shared_cpu_map);
|
|
}
|
|
}
|
|
} else
|
|
@@ -799,72 +861,70 @@ static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
|
|
return 1;
|
|
}
|
|
|
|
-static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
|
+static void __cache_cpumap_setup(unsigned int cpu, int index,
|
|
+ struct _cpuid4_info_regs *base)
|
|
{
|
|
- struct _cpuid4_info *this_leaf, *sibling_leaf;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ struct cacheinfo *this_leaf, *sibling_leaf;
|
|
unsigned long num_threads_sharing;
|
|
int index_msb, i;
|
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
|
|
if (c->x86_vendor == X86_VENDOR_AMD) {
|
|
- if (cache_shared_amd_cpu_map_setup(cpu, index))
|
|
+ if (__cache_amd_cpumap_setup(cpu, index, base))
|
|
return;
|
|
}
|
|
|
|
- this_leaf = CPUID4_INFO_IDX(cpu, index);
|
|
- num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
|
|
|
|
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
|
if (num_threads_sharing == 1)
|
|
- cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
|
|
- else {
|
|
- index_msb = get_count_order(num_threads_sharing);
|
|
+ return;
|
|
|
|
- for_each_online_cpu(i) {
|
|
- if (cpu_data(i).apicid >> index_msb ==
|
|
- c->apicid >> index_msb) {
|
|
- cpumask_set_cpu(i,
|
|
- to_cpumask(this_leaf->shared_cpu_map));
|
|
- if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
|
|
- sibling_leaf =
|
|
- CPUID4_INFO_IDX(i, index);
|
|
- cpumask_set_cpu(cpu, to_cpumask(
|
|
- sibling_leaf->shared_cpu_map));
|
|
- }
|
|
- }
|
|
+ index_msb = get_count_order(num_threads_sharing);
|
|
+
|
|
+ for_each_online_cpu(i)
|
|
+ if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
|
|
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
|
|
+
|
|
+ if (i == cpu || !sib_cpu_ci->info_list)
|
|
+ continue;/* skip if itself or no cacheinfo */
|
|
+ sibling_leaf = sib_cpu_ci->info_list + index;
|
|
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
|
|
+ cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
|
|
}
|
|
- }
|
|
-}
|
|
-static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
|
-{
|
|
- struct _cpuid4_info *this_leaf, *sibling_leaf;
|
|
- int sibling;
|
|
-
|
|
- this_leaf = CPUID4_INFO_IDX(cpu, index);
|
|
- for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
|
|
- sibling_leaf = CPUID4_INFO_IDX(sibling, index);
|
|
- cpumask_clear_cpu(cpu,
|
|
- to_cpumask(sibling_leaf->shared_cpu_map));
|
|
- }
|
|
-}
|
|
-#else
|
|
-static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
|
-{
|
|
}
|
|
|
|
-static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
|
+static void ci_leaf_init(struct cacheinfo *this_leaf,
|
|
+ struct _cpuid4_info_regs *base)
|
|
{
|
|
+ this_leaf->id = base->id;
|
|
+ this_leaf->attributes = CACHE_ID;
|
|
+ this_leaf->level = base->eax.split.level;
|
|
+ this_leaf->type = cache_type_map[base->eax.split.type];
|
|
+ this_leaf->coherency_line_size =
|
|
+ base->ebx.split.coherency_line_size + 1;
|
|
+ this_leaf->ways_of_associativity =
|
|
+ base->ebx.split.ways_of_associativity + 1;
|
|
+ this_leaf->size = base->size;
|
|
+ this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
|
|
+ this_leaf->physical_line_partition =
|
|
+ base->ebx.split.physical_line_partition + 1;
|
|
+ this_leaf->priv = base->nb;
|
|
}
|
|
-#endif
|
|
|
|
-static void free_cache_attributes(unsigned int cpu)
|
|
+static int __init_cache_level(unsigned int cpu)
|
|
{
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < num_cache_leaves; i++)
|
|
- cache_remove_shared_cpu_map(cpu, i);
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
|
|
- kfree(per_cpu(ici_cpuid4_info, cpu));
|
|
- per_cpu(ici_cpuid4_info, cpu) = NULL;
|
|
+ if (!num_cache_leaves)
|
|
+ return -ENOENT;
|
|
+ if (!this_cpu_ci)
|
|
+ return -EINVAL;
|
|
+ this_cpu_ci->num_levels = 3;
|
|
+ this_cpu_ci->num_leaves = num_cache_leaves;
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -886,411 +946,37 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
|
|
int get_cpu_cache_id(int cpu, int level)
|
|
{
|
|
int i;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
|
|
- for (i = 0; i < num_cache_leaves; i++) {
|
|
- struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, i);
|
|
+ for (i = 0; i < this_cpu_ci->num_leaves; i++) {
|
|
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list + i;
|
|
|
|
- if (this_leaf->base.eax.split.level == level)
|
|
- return this_leaf->base.id;
|
|
+ if (this_leaf->level == level)
|
|
+ return this_leaf->id;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
-static void get_cpu_leaves(void *_retval)
|
|
-{
|
|
- int j, *retval = _retval, cpu = smp_processor_id();
|
|
-
|
|
- /* Do cpuid and store the results */
|
|
- for (j = 0; j < num_cache_leaves; j++) {
|
|
- struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
|
|
-
|
|
- *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
|
|
- if (unlikely(*retval < 0)) {
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < j; i++)
|
|
- cache_remove_shared_cpu_map(cpu, i);
|
|
- break;
|
|
- }
|
|
- cache_shared_cpu_map_setup(cpu, j);
|
|
- get_cache_id(cpu, &this_leaf->base);
|
|
- }
|
|
-}
|
|
-
|
|
-static int detect_cache_attributes(unsigned int cpu)
|
|
-{
|
|
- int retval;
|
|
-
|
|
- if (num_cache_leaves == 0)
|
|
- return -ENOENT;
|
|
-
|
|
- per_cpu(ici_cpuid4_info, cpu) = kzalloc(
|
|
- sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
|
|
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
|
- return -ENOMEM;
|
|
-
|
|
- smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
|
- if (retval) {
|
|
- kfree(per_cpu(ici_cpuid4_info, cpu));
|
|
- per_cpu(ici_cpuid4_info, cpu) = NULL;
|
|
- }
|
|
-
|
|
- return retval;
|
|
-}
|
|
-
|
|
-#include <linux/kobject.h>
|
|
-#include <linux/sysfs.h>
|
|
-#include <linux/cpu.h>
|
|
-
|
|
-/* pointer to kobject for cpuX/cache */
|
|
-static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
|
|
-
|
|
-struct _index_kobject {
|
|
- struct kobject kobj;
|
|
- unsigned int cpu;
|
|
- unsigned short index;
|
|
-};
|
|
-
|
|
-/* pointer to array of kobjects for cpuX/cache/indexY */
|
|
-static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
|
|
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
|
|
-
|
|
-#define show_one_plus(file_name, object, val) \
|
|
-static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
|
|
- unsigned int cpu) \
|
|
-{ \
|
|
- return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
|
-}
|
|
-
|
|
-show_one_plus(level, base.eax.split.level, 0);
|
|
-show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
|
|
-show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
|
|
-show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
|
|
-show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
|
|
-
|
|
-static ssize_t show_id(struct _cpuid4_info *this_leaf, char *buf,
|
|
- unsigned int cpu)
|
|
-{
|
|
- return sprintf(buf, "%u\n", this_leaf->base.id);
|
|
-}
|
|
-
|
|
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
|
|
- unsigned int cpu)
|
|
-{
|
|
- return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
|
|
-}
|
|
-
|
|
-static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
|
- int type, char *buf)
|
|
-{
|
|
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
|
|
- int n = 0;
|
|
-
|
|
- if (len > 1) {
|
|
- const struct cpumask *mask;
|
|
-
|
|
- mask = to_cpumask(this_leaf->shared_cpu_map);
|
|
- n = type ?
|
|
- cpulist_scnprintf(buf, len-2, mask) :
|
|
- cpumask_scnprintf(buf, len-2, mask);
|
|
- buf[n++] = '\n';
|
|
- buf[n] = '\0';
|
|
- }
|
|
- return n;
|
|
-}
|
|
-
|
|
-static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
|
|
- unsigned int cpu)
|
|
-{
|
|
- return show_shared_cpu_map_func(leaf, 0, buf);
|
|
-}
|
|
-
|
|
-static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
|
|
- unsigned int cpu)
|
|
-{
|
|
- return show_shared_cpu_map_func(leaf, 1, buf);
|
|
-}
|
|
-
|
|
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
|
|
- unsigned int cpu)
|
|
-{
|
|
- switch (this_leaf->base.eax.split.type) {
|
|
- case CACHE_TYPE_DATA:
|
|
- return sprintf(buf, "Data\n");
|
|
- case CACHE_TYPE_INST:
|
|
- return sprintf(buf, "Instruction\n");
|
|
- case CACHE_TYPE_UNIFIED:
|
|
- return sprintf(buf, "Unified\n");
|
|
- default:
|
|
- return sprintf(buf, "Unknown\n");
|
|
- }
|
|
-}
|
|
-
|
|
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
|
|
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
|
|
-
|
|
-#define define_one_ro(_name) \
|
|
-static struct _cache_attr _name = \
|
|
- __ATTR(_name, 0444, show_##_name, NULL)
|
|
-
|
|
-define_one_ro(id);
|
|
-define_one_ro(level);
|
|
-define_one_ro(type);
|
|
-define_one_ro(coherency_line_size);
|
|
-define_one_ro(physical_line_partition);
|
|
-define_one_ro(ways_of_associativity);
|
|
-define_one_ro(number_of_sets);
|
|
-define_one_ro(size);
|
|
-define_one_ro(shared_cpu_map);
|
|
-define_one_ro(shared_cpu_list);
|
|
-
|
|
-static struct attribute *default_attrs[] = {
|
|
- &id.attr,
|
|
- &type.attr,
|
|
- &level.attr,
|
|
- &coherency_line_size.attr,
|
|
- &physical_line_partition.attr,
|
|
- &ways_of_associativity.attr,
|
|
- &number_of_sets.attr,
|
|
- &size.attr,
|
|
- &shared_cpu_map.attr,
|
|
- &shared_cpu_list.attr,
|
|
- NULL
|
|
-};
|
|
-
|
|
-#ifdef CONFIG_AMD_NB
|
|
-static struct attribute **amd_l3_attrs(void)
|
|
-{
|
|
- static struct attribute **attrs;
|
|
- int n;
|
|
-
|
|
- if (attrs)
|
|
- return attrs;
|
|
-
|
|
- n = ARRAY_SIZE(default_attrs);
|
|
-
|
|
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
- n += 2;
|
|
-
|
|
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
- n += 1;
|
|
-
|
|
- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
|
|
- if (attrs == NULL)
|
|
- return attrs = default_attrs;
|
|
-
|
|
- for (n = 0; default_attrs[n]; n++)
|
|
- attrs[n] = default_attrs[n];
|
|
-
|
|
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
|
|
- attrs[n++] = &cache_disable_0.attr;
|
|
- attrs[n++] = &cache_disable_1.attr;
|
|
- }
|
|
-
|
|
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
- attrs[n++] = &subcaches.attr;
|
|
-
|
|
- return attrs;
|
|
-}
|
|
-#endif
|
|
-
|
|
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
|
-{
|
|
- struct _cache_attr *fattr = to_attr(attr);
|
|
- struct _index_kobject *this_leaf = to_object(kobj);
|
|
- ssize_t ret;
|
|
-
|
|
- ret = fattr->show ?
|
|
- fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
|
- buf, this_leaf->cpu) :
|
|
- 0;
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
|
- const char *buf, size_t count)
|
|
-{
|
|
- struct _cache_attr *fattr = to_attr(attr);
|
|
- struct _index_kobject *this_leaf = to_object(kobj);
|
|
- ssize_t ret;
|
|
-
|
|
- ret = fattr->store ?
|
|
- fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
|
- buf, count, this_leaf->cpu) :
|
|
- 0;
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static const struct sysfs_ops sysfs_ops = {
|
|
- .show = show,
|
|
- .store = store,
|
|
-};
|
|
-
|
|
-static struct kobj_type ktype_cache = {
|
|
- .sysfs_ops = &sysfs_ops,
|
|
- .default_attrs = default_attrs,
|
|
-};
|
|
-
|
|
-static struct kobj_type ktype_percpu_entry = {
|
|
- .sysfs_ops = &sysfs_ops,
|
|
-};
|
|
-
|
|
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
|
|
-{
|
|
- kfree(per_cpu(ici_cache_kobject, cpu));
|
|
- kfree(per_cpu(ici_index_kobject, cpu));
|
|
- per_cpu(ici_cache_kobject, cpu) = NULL;
|
|
- per_cpu(ici_index_kobject, cpu) = NULL;
|
|
- free_cache_attributes(cpu);
|
|
-}
|
|
-
|
|
-static int cpuid4_cache_sysfs_init(unsigned int cpu)
|
|
-{
|
|
- int err;
|
|
-
|
|
- if (num_cache_leaves == 0)
|
|
- return -ENOENT;
|
|
-
|
|
- err = detect_cache_attributes(cpu);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- /* Allocate all required memory */
|
|
- per_cpu(ici_cache_kobject, cpu) =
|
|
- kzalloc(sizeof(struct kobject), GFP_KERNEL);
|
|
- if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
|
|
- goto err_out;
|
|
-
|
|
- per_cpu(ici_index_kobject, cpu) = kzalloc(
|
|
- sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
|
- if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
|
|
- goto err_out;
|
|
-
|
|
- return 0;
|
|
-
|
|
-err_out:
|
|
- cpuid4_cache_sysfs_exit(cpu);
|
|
- return -ENOMEM;
|
|
-}
|
|
-
|
|
-static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
|
|
-
|
|
-/* Add/Remove cache interface for CPU device */
|
|
-static int cache_add_dev(struct device *dev)
|
|
+static int __populate_cache_leaves(unsigned int cpu)
|
|
{
|
|
- unsigned int cpu = dev->id;
|
|
- unsigned long i, j;
|
|
- struct _index_kobject *this_object;
|
|
- struct _cpuid4_info *this_leaf;
|
|
- int retval;
|
|
-
|
|
- retval = cpuid4_cache_sysfs_init(cpu);
|
|
- if (unlikely(retval < 0))
|
|
- return retval;
|
|
-
|
|
- retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
|
|
- &ktype_percpu_entry,
|
|
- &dev->kobj, "%s", "cache");
|
|
- if (retval < 0) {
|
|
- cpuid4_cache_sysfs_exit(cpu);
|
|
- return retval;
|
|
- }
|
|
-
|
|
- for (i = 0; i < num_cache_leaves; i++) {
|
|
- this_object = INDEX_KOBJECT_PTR(cpu, i);
|
|
- this_object->cpu = cpu;
|
|
- this_object->index = i;
|
|
+ unsigned int idx, ret;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
|
+ struct _cpuid4_info_regs id4_regs = {};
|
|
|
|
- this_leaf = CPUID4_INFO_IDX(cpu, i);
|
|
-
|
|
- ktype_cache.default_attrs = default_attrs;
|
|
-#ifdef CONFIG_AMD_NB
|
|
- if (this_leaf->base.nb)
|
|
- ktype_cache.default_attrs = amd_l3_attrs();
|
|
-#endif
|
|
- retval = kobject_init_and_add(&(this_object->kobj),
|
|
- &ktype_cache,
|
|
- per_cpu(ici_cache_kobject, cpu),
|
|
- "index%1lu", i);
|
|
- if (unlikely(retval)) {
|
|
- for (j = 0; j < i; j++)
|
|
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
|
- kobject_put(per_cpu(ici_cache_kobject, cpu));
|
|
- cpuid4_cache_sysfs_exit(cpu);
|
|
- return retval;
|
|
- }
|
|
- kobject_uevent(&(this_object->kobj), KOBJ_ADD);
|
|
+ for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
|
|
+ ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ get_cache_id(cpu, &id4_regs);
|
|
+ ci_leaf_init(this_leaf++, &id4_regs);
|
|
+ __cache_cpumap_setup(cpu, idx, &id4_regs);
|
|
}
|
|
- cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
|
|
+ this_cpu_ci->cpu_map_populated = true;
|
|
|
|
- kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
|
|
return 0;
|
|
}
|
|
|
|
-static void cache_remove_dev(struct device *dev)
|
|
-{
|
|
- unsigned int cpu = dev->id;
|
|
- unsigned long i;
|
|
-
|
|
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
|
- return;
|
|
- if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
|
|
- return;
|
|
- cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
|
|
-
|
|
- for (i = 0; i < num_cache_leaves; i++)
|
|
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
|
- kobject_put(per_cpu(ici_cache_kobject, cpu));
|
|
- cpuid4_cache_sysfs_exit(cpu);
|
|
-}
|
|
-
|
|
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
|
|
- unsigned long action, void *hcpu)
|
|
-{
|
|
- unsigned int cpu = (unsigned long)hcpu;
|
|
- struct device *dev;
|
|
-
|
|
- dev = get_cpu_device(cpu);
|
|
- switch (action) {
|
|
- case CPU_ONLINE:
|
|
- case CPU_ONLINE_FROZEN:
|
|
- cache_add_dev(dev);
|
|
- break;
|
|
- case CPU_DEAD:
|
|
- case CPU_DEAD_FROZEN:
|
|
- cache_remove_dev(dev);
|
|
- break;
|
|
- }
|
|
- return NOTIFY_OK;
|
|
-}
|
|
-
|
|
-static struct notifier_block cacheinfo_cpu_notifier = {
|
|
- .notifier_call = cacheinfo_cpu_callback,
|
|
-};
|
|
-
|
|
-static int __init cache_sysfs_init(void)
|
|
-{
|
|
- int i, err = 0;
|
|
-
|
|
- if (num_cache_leaves == 0)
|
|
- return 0;
|
|
-
|
|
- cpu_notifier_register_begin();
|
|
- for_each_online_cpu(i) {
|
|
- struct device *dev = get_cpu_device(i);
|
|
-
|
|
- err = cache_add_dev(dev);
|
|
- if (err)
|
|
- goto out;
|
|
- }
|
|
- __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
|
|
-
|
|
-out:
|
|
- cpu_notifier_register_done();
|
|
- return err;
|
|
-}
|
|
-
|
|
-device_initcall(cache_sysfs_init);
|
|
-
|
|
-#endif
|
|
+DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
|
|
+DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
|
|
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
|
|
index 53c3fe1..527d291 100644
|
|
--- a/drivers/base/Makefile
|
|
+++ b/drivers/base/Makefile
|
|
@@ -4,7 +4,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
|
|
driver.o class.o platform.o \
|
|
cpu.o firmware.o init.o map.o devres.o \
|
|
attribute_container.o transport_class.o \
|
|
- topology.o container.o property.o
|
|
+ topology.o container.o property.o cacheinfo.o
|
|
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
|
|
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
|
|
obj-y += power/
|
|
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
|
|
new file mode 100644
|
|
index 0000000..eb3af27
|
|
--- /dev/null
|
|
+++ b/drivers/base/cacheinfo.c
|
|
@@ -0,0 +1,662 @@
|
|
+/*
|
|
+ * cacheinfo support - processor cache information via sysfs
|
|
+ *
|
|
+ * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
|
|
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
|
+ * kind, whether express or implied; without even the implied warranty
|
|
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/cacheinfo.h>
|
|
+#include <linux/compiler.h>
|
|
+#include <linux/cpu.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/smp.h>
|
|
+#include <linux/sysfs.h>
|
|
+
|
|
+/* pointer to per cpu cacheinfo */
|
|
+static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
|
|
+#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
|
|
+#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
|
|
+#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
|
|
+
|
|
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
|
|
+{
|
|
+ return ci_cacheinfo(cpu);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static int cache_setup_of_node(unsigned int cpu)
|
|
+{
|
|
+ struct device_node *np;
|
|
+ struct cacheinfo *this_leaf;
|
|
+ struct device *cpu_dev = get_cpu_device(cpu);
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ unsigned int index = 0;
|
|
+
|
|
+ /* skip if of_node is already populated */
|
|
+ if (this_cpu_ci->info_list->of_node)
|
|
+ return 0;
|
|
+
|
|
+ if (!cpu_dev) {
|
|
+ pr_err("No cpu device for CPU %d\n", cpu);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ np = cpu_dev->of_node;
|
|
+ if (!np) {
|
|
+ pr_err("Failed to find cpu%d device node\n", cpu);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ while (index < cache_leaves(cpu)) {
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ if (this_leaf->level != 1)
|
|
+ np = of_find_next_cache_node(np);
|
|
+ else
|
|
+ np = of_node_get(np);/* cpu node itself */
|
|
+ if (!np)
|
|
+ break;
|
|
+ this_leaf->of_node = np;
|
|
+ index++;
|
|
+ }
|
|
+
|
|
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
|
|
+ return -ENOENT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
+ struct cacheinfo *sib_leaf)
|
|
+{
|
|
+ return sib_leaf->of_node == this_leaf->of_node;
|
|
+}
|
|
+
|
|
+/* OF properties to query for a given cache type */
|
|
+struct cache_type_info {
|
|
+ const char *size_prop;
|
|
+ const char *line_size_props[2];
|
|
+ const char *nr_sets_prop;
|
|
+};
|
|
+
|
|
+static const struct cache_type_info cache_type_info[] = {
|
|
+ {
|
|
+ .size_prop = "cache-size",
|
|
+ .line_size_props = { "cache-line-size",
|
|
+ "cache-block-size", },
|
|
+ .nr_sets_prop = "cache-sets",
|
|
+ }, {
|
|
+ .size_prop = "i-cache-size",
|
|
+ .line_size_props = { "i-cache-line-size",
|
|
+ "i-cache-block-size", },
|
|
+ .nr_sets_prop = "i-cache-sets",
|
|
+ }, {
|
|
+ .size_prop = "d-cache-size",
|
|
+ .line_size_props = { "d-cache-line-size",
|
|
+ "d-cache-block-size", },
|
|
+ .nr_sets_prop = "d-cache-sets",
|
|
+ },
|
|
+};
|
|
+
|
|
+static inline int get_cacheinfo_idx(enum cache_type type)
|
|
+{
|
|
+ if (type == CACHE_TYPE_UNIFIED)
|
|
+ return 0;
|
|
+ return type;
|
|
+}
|
|
+
|
|
+static void cache_size(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ const char *propname;
|
|
+ const __be32 *cache_size;
|
|
+ int ct_idx;
|
|
+
|
|
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
+ propname = cache_type_info[ct_idx].size_prop;
|
|
+
|
|
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
|
|
+ if (cache_size)
|
|
+ this_leaf->size = of_read_number(cache_size, 1);
|
|
+}
|
|
+
|
|
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
|
|
+static void cache_get_line_size(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ const __be32 *line_size;
|
|
+ int i, lim, ct_idx;
|
|
+
|
|
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
|
|
+
|
|
+ for (i = 0; i < lim; i++) {
|
|
+ const char *propname;
|
|
+
|
|
+ propname = cache_type_info[ct_idx].line_size_props[i];
|
|
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
|
|
+ if (line_size)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (line_size)
|
|
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
|
|
+}
|
|
+
|
|
+static void cache_nr_sets(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ const char *propname;
|
|
+ const __be32 *nr_sets;
|
|
+ int ct_idx;
|
|
+
|
|
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
+ propname = cache_type_info[ct_idx].nr_sets_prop;
|
|
+
|
|
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
|
|
+ if (nr_sets)
|
|
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
|
|
+}
|
|
+
|
|
+static void cache_associativity(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ unsigned int line_size = this_leaf->coherency_line_size;
|
|
+ unsigned int nr_sets = this_leaf->number_of_sets;
|
|
+ unsigned int size = this_leaf->size;
|
|
+
|
|
+ /*
|
|
+ * If the cache is fully associative, there is no need to
|
|
+ * check the other properties.
|
|
+ */
|
|
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
|
|
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
|
|
+}
|
|
+
|
|
+static void cache_of_override_properties(unsigned int cpu)
|
|
+{
|
|
+ int index;
|
|
+ struct cacheinfo *this_leaf;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+
|
|
+ for (index = 0; index < cache_leaves(cpu); index++) {
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ cache_size(this_leaf);
|
|
+ cache_get_line_size(this_leaf);
|
|
+ cache_nr_sets(this_leaf);
|
|
+ cache_associativity(this_leaf);
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static void cache_of_override_properties(unsigned int cpu) { }
|
|
+static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
|
|
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
+ struct cacheinfo *sib_leaf)
|
|
+{
|
|
+ /*
|
|
+ * For non-DT systems, assume unique level 1 cache, system-wide
|
|
+ * shared caches for all other levels. This will be used only if
|
|
+ * arch specific code has not populated shared_cpu_map
|
|
+ */
|
|
+ return !(this_leaf->level == 1);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|
+{
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ struct cacheinfo *this_leaf, *sib_leaf;
|
|
+ unsigned int index;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (this_cpu_ci->cpu_map_populated)
|
|
+ return 0;
|
|
+
|
|
+ if (of_have_populated_dt())
|
|
+ ret = cache_setup_of_node(cpu);
|
|
+ else if (!acpi_disabled)
|
|
+ /* No cache property/hierarchy support yet in ACPI */
|
|
+ ret = -ENOTSUPP;
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ for (index = 0; index < cache_leaves(cpu); index++) {
|
|
+ unsigned int i;
|
|
+
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ /* skip if shared_cpu_map is already populated */
|
|
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
|
|
+ continue;
|
|
+
|
|
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
|
+ for_each_online_cpu(i) {
|
|
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
|
|
+
|
|
+ if (i == cpu || !sib_cpu_ci->info_list)
|
|
+ continue;/* skip if itself or no cacheinfo */
|
|
+ sib_leaf = sib_cpu_ci->info_list + index;
|
|
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
|
|
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
|
|
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cache_shared_cpu_map_remove(unsigned int cpu)
|
|
+{
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ struct cacheinfo *this_leaf, *sib_leaf;
|
|
+ unsigned int sibling, index;
|
|
+
|
|
+ for (index = 0; index < cache_leaves(cpu); index++) {
|
|
+ this_leaf = this_cpu_ci->info_list + index;
|
|
+ for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
|
|
+ struct cpu_cacheinfo *sib_cpu_ci;
|
|
+
|
|
+ if (sibling == cpu) /* skip itself */
|
|
+ continue;
|
|
+
|
|
+ sib_cpu_ci = get_cpu_cacheinfo(sibling);
|
|
+ if (!sib_cpu_ci->info_list)
|
|
+ continue;
|
|
+
|
|
+ sib_leaf = sib_cpu_ci->info_list + index;
|
|
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
|
|
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
|
|
+ }
|
|
+ of_node_put(this_leaf->of_node);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void cache_override_properties(unsigned int cpu)
|
|
+{
|
|
+ if (of_have_populated_dt())
|
|
+ return cache_of_override_properties(cpu);
|
|
+}
|
|
+
|
|
+static void free_cache_attributes(unsigned int cpu)
|
|
+{
|
|
+ if (!per_cpu_cacheinfo(cpu))
|
|
+ return;
|
|
+
|
|
+ cache_shared_cpu_map_remove(cpu);
|
|
+
|
|
+ kfree(per_cpu_cacheinfo(cpu));
|
|
+ per_cpu_cacheinfo(cpu) = NULL;
|
|
+}
|
|
+
|
|
+int __weak init_cache_level(unsigned int cpu)
|
|
+{
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
+int __weak populate_cache_leaves(unsigned int cpu)
|
|
+{
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
+static int detect_cache_attributes(unsigned int cpu)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (init_cache_level(cpu) || !cache_leaves(cpu))
|
|
+ return -ENOENT;
|
|
+
|
|
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
|
+ sizeof(struct cacheinfo), GFP_KERNEL);
|
|
+ if (per_cpu_cacheinfo(cpu) == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ret = populate_cache_leaves(cpu);
|
|
+ if (ret)
|
|
+ goto free_ci;
|
|
+ /*
|
|
+ * For systems using DT for cache hierarchy, of_node and shared_cpu_map
|
|
+ * will be set up here only if they are not populated already
|
|
+ */
|
|
+ ret = cache_shared_cpu_map_setup(cpu);
|
|
+ if (ret) {
|
|
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
|
|
+ goto free_ci;
|
|
+ }
|
|
+
|
|
+ cache_override_properties(cpu);
|
|
+ return 0;
|
|
+
|
|
+free_ci:
|
|
+ free_cache_attributes(cpu);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* pointer to cpuX/cache device */
|
|
+static DEFINE_PER_CPU(struct device *, ci_cache_dev);
|
|
+#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
|
|
+
|
|
+static cpumask_t cache_dev_map;
|
|
+
|
|
+/* pointer to array of devices for cpuX/cache/indexY */
|
|
+static DEFINE_PER_CPU(struct device **, ci_index_dev);
|
|
+#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
|
|
+#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
|
|
+
|
|
+#define show_one(file_name, object) \
|
|
+static ssize_t file_name##_show(struct device *dev, \
|
|
+ struct device_attribute *attr, char *buf) \
|
|
+{ \
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
|
|
+ return sprintf(buf, "%u\n", this_leaf->object); \
|
|
+}
|
|
+
|
|
+show_one(id, id);
|
|
+show_one(level, level);
|
|
+show_one(coherency_line_size, coherency_line_size);
|
|
+show_one(number_of_sets, number_of_sets);
|
|
+show_one(physical_line_partition, physical_line_partition);
|
|
+show_one(ways_of_associativity, ways_of_associativity);
|
|
+
|
|
+static ssize_t size_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+
|
|
+ return sprintf(buf, "%uK\n", this_leaf->size >> 10);
|
|
+}
|
|
+
|
|
+static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
|
|
+{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
|
|
+
|
|
+ return cpumap_print_to_pagebuf(list, buf, mask);
|
|
+}
|
|
+
|
|
+static ssize_t shared_cpu_map_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return shared_cpumap_show_func(dev, false, buf);
|
|
+}
|
|
+
|
|
+static ssize_t shared_cpu_list_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return shared_cpumap_show_func(dev, true, buf);
|
|
+}
|
|
+
|
|
+static ssize_t type_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+
|
|
+ switch (this_leaf->type) {
|
|
+ case CACHE_TYPE_DATA:
|
|
+ return sprintf(buf, "Data\n");
|
|
+ case CACHE_TYPE_INST:
|
|
+ return sprintf(buf, "Instruction\n");
|
|
+ case CACHE_TYPE_UNIFIED:
|
|
+ return sprintf(buf, "Unified\n");
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static ssize_t allocation_policy_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ unsigned int ci_attr = this_leaf->attributes;
|
|
+ int n = 0;
|
|
+
|
|
+ if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
|
|
+ n = sprintf(buf, "ReadWriteAllocate\n");
|
|
+ else if (ci_attr & CACHE_READ_ALLOCATE)
|
|
+ n = sprintf(buf, "ReadAllocate\n");
|
|
+ else if (ci_attr & CACHE_WRITE_ALLOCATE)
|
|
+ n = sprintf(buf, "WriteAllocate\n");
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static ssize_t write_policy_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ unsigned int ci_attr = this_leaf->attributes;
|
|
+ int n = 0;
|
|
+
|
|
+ if (ci_attr & CACHE_WRITE_THROUGH)
|
|
+ n = sprintf(buf, "WriteThrough\n");
|
|
+ else if (ci_attr & CACHE_WRITE_BACK)
|
|
+ n = sprintf(buf, "WriteBack\n");
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR_RO(id);
|
|
+static DEVICE_ATTR_RO(level);
|
|
+static DEVICE_ATTR_RO(type);
|
|
+static DEVICE_ATTR_RO(coherency_line_size);
|
|
+static DEVICE_ATTR_RO(ways_of_associativity);
|
|
+static DEVICE_ATTR_RO(number_of_sets);
|
|
+static DEVICE_ATTR_RO(size);
|
|
+static DEVICE_ATTR_RO(allocation_policy);
|
|
+static DEVICE_ATTR_RO(write_policy);
|
|
+static DEVICE_ATTR_RO(shared_cpu_map);
|
|
+static DEVICE_ATTR_RO(shared_cpu_list);
|
|
+static DEVICE_ATTR_RO(physical_line_partition);
|
|
+
|
|
+static struct attribute *cache_default_attrs[] = {
|
|
+ &dev_attr_id.attr,
|
|
+ &dev_attr_type.attr,
|
|
+ &dev_attr_level.attr,
|
|
+ &dev_attr_shared_cpu_map.attr,
|
|
+ &dev_attr_shared_cpu_list.attr,
|
|
+ &dev_attr_coherency_line_size.attr,
|
|
+ &dev_attr_ways_of_associativity.attr,
|
|
+ &dev_attr_number_of_sets.attr,
|
|
+ &dev_attr_size.attr,
|
|
+ &dev_attr_allocation_policy.attr,
|
|
+ &dev_attr_write_policy.attr,
|
|
+ &dev_attr_physical_line_partition.attr,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static umode_t
|
|
+cache_default_attrs_is_visible(struct kobject *kobj,
|
|
+ struct attribute *attr, int unused)
|
|
+{
|
|
+ struct device *dev = kobj_to_dev(kobj);
|
|
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
|
|
+ umode_t mode = attr->mode;
|
|
+
|
|
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_type.attr) && this_leaf->type)
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_level.attr) && this_leaf->level)
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_coherency_line_size.attr) &&
|
|
+ this_leaf->coherency_line_size)
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_ways_of_associativity.attr) &&
|
|
+ this_leaf->size) /* allow 0 = full associativity */
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_number_of_sets.attr) &&
|
|
+ this_leaf->number_of_sets)
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_size.attr) && this_leaf->size)
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_write_policy.attr) &&
|
|
+ (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_allocation_policy.attr) &&
|
|
+ (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
|
|
+ return mode;
|
|
+ if ((attr == &dev_attr_physical_line_partition.attr) &&
|
|
+ this_leaf->physical_line_partition)
|
|
+ return mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct attribute_group cache_default_group = {
|
|
+ .attrs = cache_default_attrs,
|
|
+ .is_visible = cache_default_attrs_is_visible,
|
|
+};
|
|
+
|
|
+static const struct attribute_group *cache_default_groups[] = {
|
|
+ &cache_default_group,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static const struct attribute_group *cache_private_groups[] = {
|
|
+ &cache_default_group,
|
|
+ NULL, /* Place holder for private group */
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+const struct attribute_group *
|
|
+__weak cache_get_priv_group(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static const struct attribute_group **
|
|
+cache_get_attribute_groups(struct cacheinfo *this_leaf)
|
|
+{
|
|
+ const struct attribute_group *priv_group =
|
|
+ cache_get_priv_group(this_leaf);
|
|
+
|
|
+ if (!priv_group)
|
|
+ return cache_default_groups;
|
|
+
|
|
+ if (!cache_private_groups[1])
|
|
+ cache_private_groups[1] = priv_group;
|
|
+
|
|
+ return cache_private_groups;
|
|
+}
|
|
+
|
|
+/* Add/Remove cache interface for CPU device */
|
|
+static void cpu_cache_sysfs_exit(unsigned int cpu)
|
|
+{
|
|
+ int i;
|
|
+ struct device *ci_dev;
|
|
+
|
|
+ if (per_cpu_index_dev(cpu)) {
|
|
+ for (i = 0; i < cache_leaves(cpu); i++) {
|
|
+ ci_dev = per_cache_index_dev(cpu, i);
|
|
+ if (!ci_dev)
|
|
+ continue;
|
|
+ device_unregister(ci_dev);
|
|
+ }
|
|
+ kfree(per_cpu_index_dev(cpu));
|
|
+ per_cpu_index_dev(cpu) = NULL;
|
|
+ }
|
|
+ device_unregister(per_cpu_cache_dev(cpu));
|
|
+ per_cpu_cache_dev(cpu) = NULL;
|
|
+}
|
|
+
|
|
+static int cpu_cache_sysfs_init(unsigned int cpu)
|
|
+{
|
|
+ struct device *dev = get_cpu_device(cpu);
|
|
+
|
|
+ if (per_cpu_cacheinfo(cpu) == NULL)
|
|
+ return -ENOENT;
|
|
+
|
|
+ per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
|
|
+ if (IS_ERR(per_cpu_cache_dev(cpu)))
|
|
+ return PTR_ERR(per_cpu_cache_dev(cpu));
|
|
+
|
|
+ /* Allocate all required memory */
|
|
+ per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
|
|
+ sizeof(struct device *), GFP_KERNEL);
|
|
+ if (unlikely(per_cpu_index_dev(cpu) == NULL))
|
|
+ goto err_out;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_out:
|
|
+ cpu_cache_sysfs_exit(cpu);
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+static int cache_add_dev(unsigned int cpu)
|
|
+{
|
|
+ unsigned int i;
|
|
+ int rc;
|
|
+ struct device *ci_dev, *parent;
|
|
+ struct cacheinfo *this_leaf;
|
|
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
+ const struct attribute_group **cache_groups;
|
|
+
|
|
+ rc = cpu_cache_sysfs_init(cpu);
|
|
+ if (unlikely(rc < 0))
|
|
+ return rc;
|
|
+
|
|
+ parent = per_cpu_cache_dev(cpu);
|
|
+ for (i = 0; i < cache_leaves(cpu); i++) {
|
|
+ this_leaf = this_cpu_ci->info_list + i;
|
|
+ if (this_leaf->disable_sysfs)
|
|
+ continue;
|
|
+ cache_groups = cache_get_attribute_groups(this_leaf);
|
|
+ ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
|
|
+ "index%1u", i);
|
|
+ if (IS_ERR(ci_dev)) {
|
|
+ rc = PTR_ERR(ci_dev);
|
|
+ goto err;
|
|
+ }
|
|
+ per_cache_index_dev(cpu, i) = ci_dev;
|
|
+ }
|
|
+ cpumask_set_cpu(cpu, &cache_dev_map);
|
|
+
|
|
+ return 0;
|
|
+err:
|
|
+ cpu_cache_sysfs_exit(cpu);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int cacheinfo_cpu_online(unsigned int cpu)
|
|
+{
|
|
+ int rc = detect_cache_attributes(cpu);
|
|
+
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ rc = cache_add_dev(cpu);
|
|
+ if (rc)
|
|
+ free_cache_attributes(cpu);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
|
|
+{
|
|
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
|
|
+ cpu_cache_sysfs_exit(cpu);
|
|
+
|
|
+ free_cache_attributes(cpu);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __init cacheinfo_sysfs_init(void)
|
|
+{
|
|
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
|
|
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
|
|
+}
|
|
+device_initcall(cacheinfo_sysfs_init);
|
|
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
|
|
index 4263273..9cc92a2 100644
|
|
--- a/drivers/base/cpu.c
|
|
+++ b/drivers/base/cpu.c
|
|
@@ -338,6 +338,60 @@ struct device *get_cpu_device(unsigned cpu)
|
|
}
|
|
EXPORT_SYMBOL_GPL(get_cpu_device);
|
|
|
|
+static void device_create_release(struct device *dev)
|
|
+{
|
|
+ kfree(dev);
|
|
+}
|
|
+
|
|
+static struct device *
|
|
+__cpu_device_create(struct device *parent, void *drvdata,
|
|
+ const struct attribute_group **groups,
|
|
+ const char *fmt, va_list args)
|
|
+{
|
|
+ struct device *dev = NULL;
|
|
+ int retval = -ENODEV;
|
|
+
|
|
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
+ if (!dev) {
|
|
+ retval = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ device_initialize(dev);
|
|
+ dev->parent = parent;
|
|
+ dev->groups = groups;
|
|
+ dev->release = device_create_release;
|
|
+ dev_set_drvdata(dev, drvdata);
|
|
+
|
|
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
|
|
+ if (retval)
|
|
+ goto error;
|
|
+
|
|
+ retval = device_add(dev);
|
|
+ if (retval)
|
|
+ goto error;
|
|
+
|
|
+ return dev;
|
|
+
|
|
+error:
|
|
+ put_device(dev);
|
|
+ return ERR_PTR(retval);
|
|
+}
|
|
+
|
|
+struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
+ const struct attribute_group **groups,
|
|
+ const char *fmt, ...)
|
|
+{
|
|
+ va_list vargs;
|
|
+ struct device *dev;
|
|
+
|
|
+ va_start(vargs, fmt);
|
|
+ dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
|
|
+ va_end(vargs);
|
|
+ return dev;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(cpu_device_create);
|
|
+
|
|
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
|
|
static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
|
|
#endif
|
|
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
|
|
new file mode 100644
|
|
index 0000000..6a524bf
|
|
--- /dev/null
|
|
+++ b/include/linux/cacheinfo.h
|
|
@@ -0,0 +1,104 @@
|
|
+#ifndef _LINUX_CACHEINFO_H
|
|
+#define _LINUX_CACHEINFO_H
|
|
+
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/cpumask.h>
|
|
+#include <linux/smp.h>
|
|
+
|
|
+struct device_node;
|
|
+struct attribute;
|
|
+
|
|
+enum cache_type {
|
|
+ CACHE_TYPE_NOCACHE = 0,
|
|
+ CACHE_TYPE_INST = BIT(0),
|
|
+ CACHE_TYPE_DATA = BIT(1),
|
|
+ CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
|
|
+ CACHE_TYPE_UNIFIED = BIT(2),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct cacheinfo - represent a cache leaf node
|
|
+ * @id: This cache's id. It is unique among caches with the same (type, level).
|
|
+ * @type: type of the cache - data, inst or unified
|
|
+ * @level: represents the hierarchy in the multi-level cache
|
|
+ * @coherency_line_size: size of each cache line usually representing
|
|
+ * the minimum amount of data that gets transferred from memory
|
|
+ * @number_of_sets: total number of sets, a set is a collection of cache
|
|
+ * lines sharing the same index
|
|
+ * @ways_of_associativity: number of ways in which a particular memory
|
|
+ * block can be placed in the cache
|
|
+ * @physical_line_partition: number of physical cache lines sharing the
|
|
+ * same cachetag
|
|
+ * @size: Total size of the cache
|
|
+ * @shared_cpu_map: logical cpumask representing all the cpus sharing
|
|
+ * this cache node
|
|
+ * @attributes: bitfield representing various cache attributes
|
|
+ * @of_node: if devicetree is used, this represents either the cpu node in
|
|
+ * case there's no explicit cache node or the cache node itself in the
|
|
+ * device tree
|
|
+ * @disable_sysfs: indicates whether this node is visible to the user via
|
|
+ * sysfs or not
|
|
+ * @priv: pointer to any private data structure specific to particular
|
|
+ * cache design
|
|
+ *
|
|
+ * While @of_node, @disable_sysfs and @priv are used for internal book
|
|
+ * keeping, the remaining members form the core properties of the cache
|
|
+ */
|
|
+struct cacheinfo {
|
|
+ unsigned int id;
|
|
+ enum cache_type type;
|
|
+ unsigned int level;
|
|
+ unsigned int coherency_line_size;
|
|
+ unsigned int number_of_sets;
|
|
+ unsigned int ways_of_associativity;
|
|
+ unsigned int physical_line_partition;
|
|
+ unsigned int size;
|
|
+ cpumask_t shared_cpu_map;
|
|
+ unsigned int attributes;
|
|
+#define CACHE_WRITE_THROUGH BIT(0)
|
|
+#define CACHE_WRITE_BACK BIT(1)
|
|
+#define CACHE_WRITE_POLICY_MASK \
|
|
+ (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK)
|
|
+#define CACHE_READ_ALLOCATE BIT(2)
|
|
+#define CACHE_WRITE_ALLOCATE BIT(3)
|
|
+#define CACHE_ALLOCATE_POLICY_MASK \
|
|
+ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
|
|
+#define CACHE_ID BIT(4)
|
|
+
|
|
+ struct device_node *of_node;
|
|
+ bool disable_sysfs;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct cpu_cacheinfo {
|
|
+ struct cacheinfo *info_list;
|
|
+ unsigned int num_levels;
|
|
+ unsigned int num_leaves;
|
|
+ bool cpu_map_populated;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Helpers to make sure "func" is executed on the cpu whose cache
|
|
+ * attributes are being detected
|
|
+ */
|
|
+#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
|
|
+static inline void _##func(void *ret) \
|
|
+{ \
|
|
+ int cpu = smp_processor_id(); \
|
|
+ *(int *)ret = __##func(cpu); \
|
|
+} \
|
|
+ \
|
|
+int func(unsigned int cpu) \
|
|
+{ \
|
|
+ int ret; \
|
|
+ smp_call_function_single(cpu, _##func, &ret, true); \
|
|
+ return ret; \
|
|
+}
|
|
+
|
|
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
|
|
+int init_cache_level(unsigned int cpu);
|
|
+int populate_cache_leaves(unsigned int cpu);
|
|
+
|
|
+const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
|
|
+
|
|
+#endif /* _LINUX_CACHEINFO_H */
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index 0523265..e328af8 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -43,6 +43,9 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
|
|
extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
|
|
+extern struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
+ const struct attribute_group **groups,
|
|
+ const char *fmt, ...);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void unregister_cpu(struct cpu *cpu);
|
|
extern ssize_t arch_cpu_probe(const char *, size_t);
|
|
--
|
|
1.8.3.1
|
|
|