Search code examples
x86-64intelperformancecountermemory-barriersintel-pmu

PMC to count if software prefetch hit L1 cache


I am trying to find a PMC (Performance Monitoring Counter) that will display the amount of times that a prefetcht0 instruction hits L1 dcache (or misses).

icelake-client: Intel(R) Core(TM) i7-1065G7 CPU @ 1.30GHz

I am trying to make this fine grain i.e (note should include lfence around prefetcht0)

    xorl %ecx, %ecx
    rdpmc
    movl %eax, %edi
    prefetcht0 (%rsi)
    rdpmc
    testl %eax, %edi
    // jump depending on if it was a miss or not

The goal is to check if a prefetch hit L1. If didn't execute some code that is ready, otherwise proceed.

It seems that it will have to be a miss event just based on what is available.

I have tried a few events from libpfm4 and intel manual with no luck:

L1-DCACHE-LOAD-MISSES, emask=0x00, umask=0x10000
L1D.REPLACEMENT, emask=0x51, umask=0x1 
L2_RQSTS.SWPF_HIT, emask=0x24, umask=0xc8
L2_RQSTS.SWPF_MISS, emask=0x24, umask=0x28
LOAD_HIT_PREFETCH.SWPF, emask=0x01, umask=0x4c  (this very misleadingly is non-sw prefetch hits)

L1D.REPLACEMENT and L1-DCACHE-LOAD-MISSES kind of works, it works if I delay the rdpmc but if they are one after another it seems unreliable at best. The other ones are complete busts.

Questions:

  1. Should any of these work for detecting if prefetches hit L1 dcache? (i.e my testing is bad)
  2. If not. Whats events could be used to detect if a prefetch hit L1 dcache?

Edit: MEM_LOAD_RETIRED.L1_HIT does not appear to work for software prefetch.

Here is the code I am using to do test:

#include <asm/unistd.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/perf_event.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>


#define HIT  0
#define MISS 1

#define TODO MISS


#define PAGE_SIZE 4096

// to force hit make TSIZE low
#define TSIZE     10000

#define err_assert(cond)                                                       \
    if (__builtin_expect(!(cond), 0)) {                                        \
        fprintf(stderr, "%d:%d: %s\n", __LINE__, errno, strerror(errno));      \
        exit(-1);                                                              \
    }


uint64_t
get_addr() {
    uint8_t * addr =
        (uint8_t *)mmap(NULL, TSIZE * PAGE_SIZE, PROT_READ | PROT_WRITE,
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    err_assert(addr != NULL);


    for (uint32_t i = 0; i < TSIZE; ++i) {
        addr[i * PAGE_SIZE + (PAGE_SIZE - 1)] = 0;
        #if TODO == HIT
        addr[i * PAGE_SIZE] = 0;
        #endif
    }

    return uint64_t(addr);
}

int
perf_event_open(struct perf_event_attr * hw_event,
                pid_t                    pid,
                int                      cpu,
                int                      group_fd,
                unsigned long            flags) {
    int ret;

    ret = syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
    return ret;
}

void
init_perf_event_struct(struct perf_event_attr * pe,
                       const uint32_t           type,
                       const uint64_t           ev_config,
                       int                      lead) {
    __builtin_memset(pe, 0, sizeof(struct perf_event_attr));

    pe->type           = type;
    pe->size           = sizeof(struct perf_event_attr);
    pe->config         = ev_config;
    pe->disabled       = !!lead;
    pe->exclude_kernel = 1;
    pe->exclude_hv     = 1;
}


/* Fixed Counters */
static constexpr uint32_t core_instruction_ev  = 0x003c;
static constexpr uint32_t core_instruction_idx = (1 << 30) + 0;

static constexpr uint32_t core_cycles_ev  = 0x00c0;
static constexpr uint32_t core_cycles_idx = (1 << 30) + 1;

static constexpr uint32_t ref_cycles_ev  = 0x0300;
static constexpr uint32_t ref_cycles_idx = (1 << 30) + 2;

/* programmable counters */
static constexpr uint32_t mem_load_retired_l1_hit  = 0x01d1;
static constexpr uint32_t mem_load_retired_l1_miss = 0x08d1;


int
init_perf_tracking() {
    struct perf_event_attr pe;

    init_perf_event_struct(&pe, PERF_TYPE_RAW, core_instruction_ev, 1);
    int leadfd = perf_event_open(&pe, 0, -1, -1, 0);
    err_assert(leadfd >= 0);

    init_perf_event_struct(&pe, PERF_TYPE_RAW, core_cycles_ev, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);

    init_perf_event_struct(&pe, PERF_TYPE_RAW, ref_cycles_ev, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);


    init_perf_event_struct(&pe, PERF_TYPE_RAW, mem_load_retired_l1_hit, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);

    return leadfd;
}

void
start_perf_tracking(int leadfd) {
    ioctl(leadfd, PERF_EVENT_IOC_RESET, 0);
    ioctl(leadfd, PERF_EVENT_IOC_ENABLE, 0);
}

#define _V_TO_STR(X) #X
#define V_TO_STR(X)  _V_TO_STR(X)

//#define DO_PREFETCH
#ifdef DO_PREFETCH
#define DO_MEMORY_OP(addr) "prefetcht0 (%[" V_TO_STR(addr) "])\n\t"
#else
#define DO_MEMORY_OP(addr) "movl (%[" V_TO_STR(addr) "]), %%eax\n\t"
#endif


int
main() {
    int fd = init_perf_tracking();
    start_perf_tracking(fd);

    uint64_t addr = get_addr();

    uint32_t prefetch_miss, cycles_to_detect;
    asm volatile(
        "lfence\n\t"
        "movl %[core_cycles_idx], %%ecx\n\t"
        "rdpmc\n\t"
        "movl %%eax, %[cycles_to_detect]\n\t"
        "xorl %%ecx, %%ecx\n\t"
        "rdpmc\n\t"
        "movl %%eax, %[prefetch_miss]\n\t"
        "lfence\n\t"
        DO_MEMORY_OP(prefetch_addr)
        "lfence\n\t"
        "xorl %%ecx, %%ecx\n\t"
        "rdpmc\n\t"
        "subl %[prefetch_miss], %%eax\n\t"
        "movl %%eax, %[prefetch_miss]\n\t"
        "movl %[core_cycles_idx], %%ecx\n\t"
        "rdpmc\n\t"
        "subl %[cycles_to_detect], %%eax\n\t"
        "movl %%eax, %[cycles_to_detect]\n\t"
        "lfence\n\t"
        : [ prefetch_miss ] "=&r"(prefetch_miss),
          [ cycles_to_detect ] "=&r"(cycles_to_detect)
        : [ prefetch_addr ] "r"(addr), [ core_cycles_idx ] "i"(core_cycles_idx)
        : "eax", "edx", "ecx");

    fprintf(stderr, "Hit    : %d\n", prefetch_miss);
    fprintf(stderr, "Cycles : %d\n", cycles_to_detect);
}

if I define DO_PREFETCH the results for MEM_LOAD_RETIRED.L1_HIT are always 1 (always appears to get a hit). If I comment out DO_PREFETCH the results correspond with what I would expect (when the address is clearly not in cache reports miss, when it clearly is reports hit).

With DO_PREFETCH:

g++ -DDO_PREFETCH -O3 -march=native -mtune=native prefetch_hits.cc -o prefetch_hits
$> ./prefetch_hits
Hit    : 1
Cycles : 554

and without DO_PREFETCH

g++ -DDO_PREFETCH -O3 -march=native -mtune=native prefetch_hits.cc -o prefetch_hits
$> ./prefetch_hits
Hit    : 0
Cycles : 888

With L2_RQSTS.SWPF_HIT and L2_RQSTS.SWPF_MISS was able to get it to work. Big thanks to Hadi Brais. Worth noting that the reason L1D_PEND_MISS.PENDING didn't work might be related to Icelake. Hadi Brais reported getting it to work for predicting L1D cached misses on Haswell.

In the interest of trying to determine why L1_PEND_MISS.PENDING and MEM_LOAD_RETIRED.L1_HIT do not work posted the exact code I'm using for testing them:

#include <asm/unistd.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/perf_event.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>


#define HIT  0
#define MISS 1

#define TODO MISS


#define PAGE_SIZE 4096

#define TSIZE 1000

#define err_assert(cond)                                                       \
    if (__builtin_expect(!(cond), 0)) {                                        \
        fprintf(stderr, "%d:%d: %s\n", __LINE__, errno, strerror(errno));      \
        exit(-1);                                                              \
    }


uint64_t
get_addr() {
    uint8_t * addr =
        (uint8_t *)mmap(NULL, TSIZE * PAGE_SIZE, PROT_READ | PROT_WRITE,
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    err_assert(addr != NULL);
    __builtin_memset(addr, -1, TSIZE * PAGE_SIZE);
    return uint64_t(addr);
}

int
perf_event_open(struct perf_event_attr * hw_event,
                pid_t                    pid,
                int                      cpu,
                int                      group_fd,
                unsigned long            flags) {
    int ret;

    ret = syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
    return ret;
}

void
init_perf_event_struct(struct perf_event_attr * pe,
                       const uint32_t           type,
                       const uint64_t           ev_config,
                       int                      lead) {
    __builtin_memset(pe, 0, sizeof(struct perf_event_attr));

    pe->type           = type;
    pe->size           = sizeof(struct perf_event_attr);
    pe->config         = ev_config;
    pe->disabled       = !!lead;
    pe->exclude_kernel = 1;
    pe->exclude_hv     = 1;
}


/* Fixed Counters */
static constexpr uint32_t core_instruction_ev  = 0x003c;
static constexpr uint32_t core_instruction_idx = (1 << 30) + 0;

static constexpr uint32_t core_cycles_ev  = 0x00c0;
static constexpr uint32_t core_cycles_idx = (1 << 30) + 1;

static constexpr uint32_t ref_cycles_ev  = 0x0300;
static constexpr uint32_t ref_cycles_idx = (1 << 30) + 2;

/* programmable counters */
static constexpr uint32_t mem_load_retired_l1_hit  = 0x01d1;
static constexpr uint32_t mem_load_retired_l1_miss = 0x08d1;
static constexpr uint32_t l1d_pending              = 0x0148;
static constexpr uint32_t swpf_hit                 = 0xc824;
static constexpr uint32_t swpf_miss                = 0x2824;
static constexpr uint32_t ev0                      = l1d_pending;

#define NEVENTS 1
#if NEVENTS > 1
static constexpr uint32_t ev1 = swpf_miss;
#endif

int
init_perf_tracking() {
    struct perf_event_attr pe;

    init_perf_event_struct(&pe, PERF_TYPE_RAW, core_instruction_ev, 1);
    int leadfd = perf_event_open(&pe, 0, -1, -1, 0);
    err_assert(leadfd >= 0);

    init_perf_event_struct(&pe, PERF_TYPE_RAW, core_cycles_ev, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);

    init_perf_event_struct(&pe, PERF_TYPE_RAW, ref_cycles_ev, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);

    init_perf_event_struct(&pe, PERF_TYPE_RAW, ev0, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);

#if NEVENTS > 1
    init_perf_event_struct(&pe, PERF_TYPE_RAW, ev1, 0);
    err_assert(perf_event_open(&pe, 0, -1, leadfd, 0) >= 0);
#endif

    return leadfd;
}

void
start_perf_tracking(int leadfd) {
    ioctl(leadfd, PERF_EVENT_IOC_RESET, 0);
    ioctl(leadfd, PERF_EVENT_IOC_ENABLE, 0);
}

#define _V_TO_STR(X) #X
#define V_TO_STR(X)  _V_TO_STR(X)

//#define LFENCE
#ifdef LFENCE
#define SERIALIZER() "lfence\n\t"
#else
#define SERIALIZER()                                                           \
    "xorl %%ecx, %%ecx\n\t"                                                    \
    "xorl %%eax, %%eax\n\t"                                                    \
    "cpuid\n\t"

#endif

#define DO_PREFETCH

#ifdef DO_PREFETCH
#define DO_MEMORY_OP(addr) "prefetcht0 (%[" V_TO_STR(addr) "])\n\t"
#else
#define DO_MEMORY_OP(addr) "movl (%[" V_TO_STR(addr) "]), %%eax\n\t"
#endif


int
main() {
    int fd = init_perf_tracking();
    start_perf_tracking(fd);

    uint64_t addr = get_addr();

    // to ensure page in TLB
    *((volatile uint64_t *)(addr + (PAGE_SIZE - 8))) = 0;
    
#if TODO == HIT
    // loading from 0 offset to check cache miss / hit
    *((volatile uint64_t *)addr) = 0;
#endif

    uint32_t ecount0 = 0, ecount1 = 0, cycles_to_detect = 0;
    asm volatile(
        SERIALIZER()
        "movl %[core_cycles_idx], %%ecx\n\t"
        "rdpmc\n\t"
        "movl %%eax, %[cycles_to_detect]\n\t"
        "xorl %%ecx, %%ecx\n\t"
        "rdpmc\n\t"
        "movl %%eax, %[ecount0]\n\t"
#if NEVENTS > 1
        "movl $1, %%ecx\n\t"
        "rdpmc\n\t"
        "movl %%eax, %[ecount1]\n\t"
#endif
        SERIALIZER()
        DO_MEMORY_OP(prefetch_addr)
        SERIALIZER()
        "xorl %%ecx, %%ecx\n\t"
        "rdpmc\n\t"
        "subl %[ecount0], %%eax\n\t"
        "movl %%eax, %[ecount0]\n\t"
#if NEVENTS > 1
        "movl $1, %%ecx\n\t"
        "rdpmc\n\t"
        "subl %[ecount1], %%eax\n\t"
        "movl %%eax, %[ecount1]\n\t"
#endif
        "movl %[core_cycles_idx], %%ecx\n\t"
        "rdpmc\n\t"
        "subl %[cycles_to_detect], %%eax\n\t"
        "movl %%eax, %[cycles_to_detect]\n\t"
        SERIALIZER()
        : [ ecount0 ] "=&r"(ecount0),
#if NEVENTS > 1
          [ ecount1 ] "=&r"(ecount1),
#endif
          [ cycles_to_detect ] "=&r"(cycles_to_detect)
        : [ prefetch_addr ] "r"(addr), [ core_cycles_idx ] "i"(core_cycles_idx)
        : "eax", "edx", "ecx");

    fprintf(stderr, "E0     : %d\n", ecount0);
    fprintf(stderr, "E1     : %d\n", ecount1);
    fprintf(stderr, "Cycles : %d\n", cycles_to_detect);
}

Solution

  • The rdpmc is not ordered with the events that may occur before it or after it in program order. A fully serializing instruction, such as cpuid, is required to obtain the desired ordering guarantees with respect to prefetcht0. The code should be as follows:

    xor  %eax, %eax         # CPUID leaf eax=0 should be fast.  Doing this before each CPUID might be a good idea, but omitted for clarity
    cpuid
    xorl %ecx, %ecx
    rdpmc
    movl %eax, %edi         # save RDPMC result before CPUID overwrites EAX..EDX
    cpuid
    prefetcht0 (%rsi)
    cpuid
    xorl %ecx, %ecx
    rdpmc
    testl %eax, %edi        # CPUID doesn't affect FLAGS
    cpuid
    

    Each of the rdpmc instructions are sandwiched between cpuid instructions. This ensures that any events and only these events that occur between the two rdpmc instructions are counted.

    The prefetch operation of the prefetcht0 instruction may either be ignored or performed. If it was performed, it may either hit in a cache line that is in a valid state in the L1D or not. These are the cases that have to be considered.

    The sum of L2_RQSTS.SWPF_HIT and L2_RQSTS.SWPF_MISS cannot be used to count or derive the number of prefetcht0 hits in the L1D, but their sum can be subtracted from SW_PREFETCH_ACCESS.T0 to get an upper bound on the number of prefetcht0 hits in the L1D. With the properly serialized sequence shown above, I think the only case where a non-ignored prefetcht0 doesn't hit in the L1D and is not counted by the sum SWPF_HIT+SWPF_MISS is if the software prefetch operation hits in an LFB allocated for a hardware prefetch.

    L1-DCACHE-LOAD-MISSES is just another name for L1D.REPLACEMENT. The event code and umask you've shown for L1-DCACHE-LOAD-MISSES is incorrect. The L1D.REPLACEMENT event only occurs if the prefetch operation misses in the L1D (which causes a request to be sent to the L2) and causes a valid line in the L1D to be replaced. Usually most fills cause a replacement, but the event still cannot be used to distinguish between a prefetcht0 that hits in the L1D, a prefetcht0 that hits in an LFB allocated for a hardware prefetch, and an ignored prefetcht0.

    The event LOAD_HIT_PREFETCH.SWPF occurs when a demand load hits in an LFB allocated for a software prefetch. This is obviously not useful here.

    The event L1D_PEND_MISS.PENDING (event=0x48, umask=0x01) should work. According to the documentation, this event increments the counter by the number of pending L1D misses every cycle. I think it works for demand loads and prefetches. This is really an approximation, so it may count even if there are zero pending L1D misses. But I think it can still be used to determine with very high confidence whether a single prefetcht0 missed in the L1D by following these steps:

    • First, add the line uint64_t value = *(volatile uint64_t*)addr; just before the inline assembly. This is to increase the probability to near 100% that the line to be prefetched is in the L1D.
    • Second, measure the minimum increment of L1D_PEND_MISS.PENDING for a prefetcht0 that is very highly likely to hit in the L1D.
    • Run the experiment many times to build high confidence that the minimum increment is highly stable to the extent the the same exact value is observed in almost every run.
    • Comment out the line added in the first step so that the prefetcht0 misses and check that the event count change is always or almost always larger than the minimum increment measured previously.

    So far, I've only been concerned with making a distinction between a prefetch that hits in the L1D and a non-ignored prefetch that misses in both the L1D and the LFBs. Now I'll consider the rest of the cases:

    • If the prefetch results in a page fault or if the memory type of the target cache line is WC or UC, the prefetch is ignored. I don't know whether the L1D_PEND_MISS.PENDING event can be used to distinguish between a hit and this case. You can run experiment where the target address of the prefetch instruction to is in a virtual page with no valid mapping or mapped to a kernel page. Check if the change in the event count is unique with high probability.
    • If no LFBs are available, the prefetch is ignored. This case can be eliminated by switching off the sibling logical core and using cpuid instead of lfence before the first rdpmc.
    • If the prefetch hits in an LFB allocated for an RFO, ItoM, or a hardware prefetch request, then the prefetch is effectively redundant. For all of these types of requests, the change in the L1D_PEND_MISS.PENDING count may or not be distinguishable from a hit in the L1D. This case can be eliminated by using cpuid instead of lfence before the first rdpmc and turning of the two L1D hardware prefetchers.
    • I don't think a prefetch to a prefetchable memory type can hit in a WCB because changing the memory type of a location is a fully serializing operation, so this case is not a problem.

    One obvious advantage of using L1D_PEND_MISS.PENDING instead of the sum SWPF_HIT+SWPF_MISS is the smaller number of events. Another advantage is that L1D_PEND_MISS.PENDING is supported on some of the earlier the microarchitectures. Also, as discussed above, it can be more powerful. It works on my Haswell with a threshold of 69-70 cycles.

    If the L1D_PEND_MISS.PENDING event changes in different cases are not distinguishable, then the sum SWPF_HIT+SWPF_MISS can be used. These two events occur at the L2 and so they only tell you whether the prefetch missed in the L1D and a request is sent and accepted by the L2. If the request is rejected or hit in the L2's SQ, none of the two events may occur. In addition, all of the aforementioned cases will not be distinguishable from an L1D hit.

    For normal demand loads, you can use MEM_LOAD_RETIRED.L1_HIT. If the load hits in the L1D, a single L1_HIT occurs. Otherwise, in any other case, no L1_HIT events occur, assuming that no other instruction between the two rdpmcs, such as cpuid, can generate L1_HIT events. You'll have to verify that cpuid doesn't generate L1_HIT events. Don't forget to count only user-mode events because an interrupt can occur between any two instructions and the interrupt handler may generate one or more L1_HIT events in kernel mode. While it's very unlikely, if you want to be 100% sure, check also whether the occurrence of an interrupt itself generates L1_HIT events.