Search code examples
c++assemblyoptimizationx86-64sse

Most insanely fast way to convert 9 char digits into an int or unsigned int


#include <stdio.h>
#include <iostream>
#include <string>
#include <chrono>
#include <memory>
#include <cstdlib>
#include <cstdint>
#include <cstring>
#include <immintrin.h>
using namespace std;

const int p[9] =   {1, 10, 100, 
                    1000, 10000, 100000, 
                    1000000, 10000000, 100000000};
                    
class MyTimer {
 private:
  std::chrono::time_point<std::chrono::steady_clock> starter;

 public:
  void startCounter() {
    starter = std::chrono::steady_clock::now();
  }

  int64_t getCounterNs() {    
    return std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now() - starter).count();
  }
};
                    
int convert1(const char *a) {
    int res = 0;
    for (int i=0; i<9; i++) res = res * 10 + a[i] - 48;
    return res;
}

int convert2(const char *a) {
    return (a[0] - 48) * p[8] + (a[1] - 48) * p[7] + (a[2] - 48) * p[6]
            + (a[3] - 48) * p[5] + (a[4] - 48) * p[4] + (a[5] - 48) * p[3]
            + (a[6] - 48) * p[2] + (a[7] - 48) * p[1] + (a[8] - 48) * p[0];
}

int convert3(const char *a) {
    return (a[0] - 48) * p[8] + a[1] * p[7] + a[2] * p[6] + a[3] * p[5]
            + a[4] * p[4] + a[5] * p[3] + a[6] * p[2] + a[7] * p[1] + a[8]
            - 533333328;
}

const unsigned pu[9] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
    100000000};

int convert4u(const char *aa) {
  const unsigned char *a = (const unsigned char*) aa;
  return a[0] * pu[8] + a[1] * pu[7] + a[2] * pu[6] + a[3] * pu[5] + a[4] * pu[4]
      + a[5] * pu[3] + a[6] * pu[2] + a[7] * pu[1] + a[8] - (unsigned) 5333333328u;
}

int convert5(const char* a) {
    int val = 0;
    for(size_t k =0;k <9;++k) {
        val = (val << 3) + (val << 1) + (a[k]-'0');
    }
    return val;
}

const unsigned pu2[9] = {100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1};

int convert6u(const char *a) {
  return a[0]*pu2[0] + a[1]*pu2[1] + a[2]*pu2[2] + a[3] * pu2[3] + a[4] * pu2[4] + a[5] * pu2[5] + a[6] * pu2[6] + a[7] * pu2[7] + a[8] - (unsigned) 5333333328u;
}

constexpr std::uint64_t zeros(char z) {
    std::uint64_t result = 0;
    for (int i = 0; i < sizeof(result); ++i) {
        result = result*256 + z;
    }
    return result;
}

int convertX(const char *a) {
    constexpr std::uint64_t offset = zeros('0');
    constexpr std::uint64_t o1 = 0xFF00FF00FF00FF00;
    constexpr std::uint64_t o2 = 0xFFFF0000FFFF0000;
    constexpr std::uint64_t o3 = 0xFFFFFFFF00000000;

    std::uint64_t buffer;
    std::memcpy(&buffer, a, sizeof(buffer));
    const auto bytes = buffer - offset;
    const auto b1 = (bytes & o1) >> 8;
    const auto words = (bytes & ~o1) + 10*b1;
    const auto w1 = (words & o2) >> 16;
    const auto dwords = (words & ~o2) + 100*w1;
    const auto d1 = (dwords & o3) >> 32;
    const auto qwords = (dwords & ~o3) + 1000*d1;

    const auto final = 10*static_cast<unsigned>(qwords) + (a[9] - '0');
    return static_cast<int>(final);
}

//########################  ACCEPTED ANSWER
//########################
//########################
typedef struct {             // for output into memory
    alignas(16) unsigned hours;
    unsigned minutes, seconds, nanos;
} hmsn;

void str2hmsn(hmsn *out, const char str[15])  // HHMMSSXXXXXXXXX  15 total, with 9-digit nanoseconds.
{    // 15 not including the terminating 0 (if any) which we don't read
    //hmsn retval;
    __m128i digs = _mm_loadu_si128((const __m128i*)str);
    digs = _mm_sub_epi8( digs, _mm_set1_epi8('0') );
    __m128i hms_x_words = _mm_maddubs_epi16( digs, _mm_set1_epi16( 10U + (1U<<8) ));   // SSSE3  pairs of digits => 10s, 1s places.

    __m128i hms_unpacked = _mm_cvtepu16_epi32(hms_x_words);                           // SSE4.1  hours, minutes, seconds unpack from uint16_t to uint32
    //_mm_storeu_si128((__m128i*)&retval, hms_unpacked);                                  // store first 3 struct members; last to be written separately
    _mm_storeu_si128((__m128i*)out, hms_unpacked);
    // or scalar extract with _mm_cvtsi128_si64 (movq) and shift / movzx

    __m128i xwords = _mm_bsrli_si128(hms_x_words, 6);  // would like to schedule this sooner, so oldest-uop-first starts this critical path shuffle ahead of pmovzx
    // 8 bytes of data, lined up in low 2 dwords, rather than split across high 3
    // could have got here with an 8-byte load that starts here, if we didn't want to get the H,M,S integers cheaply.

    __m128i xdwords = _mm_madd_epi16(xwords, _mm_setr_epi16(100, 1, 100, 1,  0,0,0,0));   // low/high uint32 chunks, discard the 9th x digit.
    uint64_t pair32 = _mm_cvtsi128_si64(xdwords);
    uint32_t msd = 100*100 * (uint32_t)pair32;     // most significant dword was at lower address (in printing order), so low half on little-endian x86.  encourage compilers to use 32-bit operand-size for imul
    uint32_t first8_x = msd + (uint32_t)(pair32 >> 32);
    uint32_t nanos = first8_x * 10 + ((unsigned char)str[14] - '0');   // total*10 + lowest digit
    out->nanos = nanos;
    //retval.nanos = nanos;
    //return retval;

  // returning the struct by value encourages compilers in the wrong direction
  // into not doing separate stores, even when inlining into a function that assigns the whole struct to a pointed-to output
}
hmsn mystruct;

int convertSIMD(const char* a)
{
    str2hmsn(&mystruct, a);
    return mystruct.nanos;
}


//########################
//########################
using ConvertFunc = int(const char*);

volatile int result = 0; // do something with the result of function to prevent unexpected optimization
void benchmark(ConvertFunc converter, string name, int numTest=1000) {
    MyTimer timer;
    const int N = 100000;
    char *a = new char[9*N + 17];
    int64_t runtime = 0;    

    for (int t=1; t<=numTest; t++) {        
        // change something to prevent unexpected optimization
        for (int i=0; i<9*N; i++) a[i] = rand() % 10 + '0'; 

        timer.startCounter();
        for (int i=0; i<9*N; i+= 9) result = converter(a+i);
        runtime += timer.getCounterNs();
    }
    cout << name << ": " << (runtime / (double(numTest) * N)) << "ns average\n";
    delete[] a;
}

int main() {
    benchmark(convert1, "slow");
    benchmark(convert2, "normal");    
    benchmark(convert3, "fast");
    benchmark(convert4u, "unsigned");
    benchmark(convert5, "shifting");
    benchmark(convert6u, "reverse");
    benchmark(convertX, "swar64");
    benchmark(convertSIMD, "manualSIMD");

    return 0;
}

I want to find the fastest way turn char a[9] into an int. The full problem is convert char a[15] with form HHMMSSxxxxxxxxx timestamp to nanosecond, where ~50 bytes after the x are allocated and can be safely read (but not write). We only care about the last 9 digits in this question.

Version 1 is basic, version 2,3 try to save some computation. I compile with -O3 flag, and storing power of 10s in array is fine because it is optimized away (checked using Godbolt).

How can I make this faster? Yes I know this sounds like premature optimization, but let's assume I need that final 2-3% boost.

**Big edit:** I've replaced the code to reduce the effect of std::chrono on the measured time. The results is very different: 2700ms, 810ms, 670ms. On my laptop with i7 8750H, gcc 9.3.0 with -O3 flag, the result is: 355, 387, 320ms.

Version 3 is decidedly faster, while version 2 is slower due to code size. But can we do better than version 3? Invalid benchmark

Edit 2: the function can return unsigned int instead of int (i.e

unsigned convert1(char *a);

Edit 3: I noticed that the new code is an invalid benchmark, since convert(a) is only executed once. Using the original code, the difference is only ~1%.

Edit 4: New benchmark. using unsigned (convert4u, convert6u) is consistently 3-5% faster than using int. I will run a long (10+ min) benchmark to see if there's a winner. I've edited the code to use a new benchmark. It generates a large amount of data, then run the converter functions.

Edit 5: results: 4.19, 4.51, 3.82, 3.59, 7.64, 3.72 seconds. The unsigned version is fastest. Is it possible to use SIMD on just 9 bytes? If not, then I guess this is the best solution. I still hope there's a crazier solution, though

Edit 6: benchmark result on AMD Ryzen 4350G, gcc version 10.3, compile command gcc -o main main.cpp -std=c++17 -O3 -mavx -mavx2 -march=native

slow: 4.17794ns average
normal: 2.59945ns average
fast: 2.27917ns average
unsigned: 2.43814ns average
shifting: 4.72233ns average
reverse: 2.2274ns average
swar64: 2.17179ns average
manualSIMD: 1.55203ns average

The accepted answer does even more than the question require and compute HH/MM/SS/nanosec, so it's even faster than this benchmark shows.


Solution

  • Yes, SIMD is possible, as mentioned in comments. You can take advantage of it to parse the HH, MM, and SS parts of the string at the same time.

    Since you have a 100% fixed format with leading 0s where necessary, this is easier than How to implement atoi using SIMD? - Place-values are fixed and we don't need any compare / bit-scan or pcmpistri to look up a shuffle control mask or scale-factor. Also SIMD string to unsigned int parsing in C# performance improvement has some good ideas, like tweaking the place-value multipliers to avoid a step at the end (see the BMI2 version at the end of this answer which also uses that trick.)

    9 decimal digits is two dwords of input and one leftover byte that's probably best to grab separately.

    Assuming you care about throughput (ability to overlap this with surrounding code, or do this in a loop on independent elements) moreso than critical path latency in cycles from input pointer and data in memory being ready to nanoseconds integer being ready, SSSE3 SIMD should be very good on modern x86. (With SSE4.1 being useful if you want to unpack your hours, minutes, seconds into contiguous uint32_t elements e.g. in a struct). It might be competitive on latency, too, vs. scalar.

    Fun fact: clang auto-vectorizes your convert2 / convert3 functions, widening to 8x dword in a YMM register for vpmulld (2 uops), then a chain of shuffle/add.

    The strategy is to use pmaddubsw and pmaddwd to multiply-and-add pairs horizontally, in a way that gets each digit multiplied by its place value. e.g. 10 and 1 pairs, then 100 and 1 for pairs of integer that come from double-digits. Then extract to scalar for the last pair: multiply the most-significant part by 100 * 100, and add to the least-significant part. I'm pretty sure overflow is impossible at any step for inputs that are actually '0'..'9'; This runs and compiles to the asm I expected, but I didn't verify the numeric results.

    // See also an updated version using RORX as discussed in comments
    #include <immintrin.h>
    
    typedef struct {             // for output into memory
        alignas(16) unsigned hours;
        unsigned minutes, seconds, nanos;
    } hmsn;
    
    void str2hmsn(hmsn *out, const char str[15])  // HHMMSSXXXXXXXXX  15 total, with 9-digit nanoseconds.
    {    // 15 not including the terminating 0 (if any) which we don't read
        //hmsn retval;
        __m128i digs = _mm_loadu_si128((const __m128i*)str);
        digs = _mm_sub_epi8( digs, _mm_set1_epi8('0') );
        __m128i hms_x_words = _mm_maddubs_epi16( digs, _mm_set1_epi16( 10U + (1U<<8) ));   // SSSE3  pairs of digits => 10s, 1s places.
    
        __m128i hms_unpacked = _mm_cvtepu16_epi32(hms_x_words);                           // SSE4.1  hours, minutes, seconds unpack from uint16_t to uint32
        //_mm_storeu_si128((__m128i*)&retval, hms_unpacked);                                  // store first 3 struct members; last to be written separately
        _mm_storeu_si128((__m128i*)out, hms_unpacked);
        // or scalar extract with _mm_cvtsi128_si64 (movq) and shift / movzx
    
        __m128i xwords = _mm_bsrli_si128(hms_x_words, 6);  // would like to schedule this sooner, so oldest-uop-first starts this critical path shuffle ahead of pmovzx
        // 8 bytes of data, lined up in low 2 dwords, rather than split across high 3
        // could have got here with an 8-byte load that starts here, if we didn't want to get the H,M,S integers cheaply.
    
        __m128i xdwords = _mm_madd_epi16(xwords, _mm_setr_epi16(100, 1, 100, 1,  0,0,0,0));   // low/high uint32 chunks, discard the 9th x digit.
        uint64_t pair32 = _mm_cvtsi128_si64(xdwords);
        uint32_t msd = 100*100 * (uint32_t)pair32;     // most significant dword was at lower address (in printing order), so low half on little-endian x86.  encourage compilers to use 32-bit operand-size for imul
        uint32_t first8_x = msd + (uint32_t)(pair32 >> 32);
        uint32_t nanos = first8_x * 10 + ((unsigned char)str[14] - '0');   // total*10 + lowest digit
        out->nanos = nanos;
        //retval.nanos = nanos;
        //return retval;
    
      // returning the struct by value encourages compilers in the wrong direction
      // into not doing separate stores, even when inlining into a function that assigns the whole struct to a pointed-to output
    }
    

    On Godbolt with a test loop that uses asm("" ::"m"(sink): "memory" ) to make the compiler redo the work in a loop. Or a std::atomic_thread_fence(acq_rel) hack that gets MSVC to not optimize away the loop either. On my i7-6700k with GCC 11.1, x86-64 GNU/Linux, energy_performance_preference = performance, I got this to run at one iteration per 5 cycles.

    IDK why it doesn't run at one per 4c; I tweaked GCC options to avoid the JCC erratum slowdown without padding, and to have the loop in hopefully 4 uop cache lines. (6 uops, 1 uop ended by a 32B boundary, 6 uops, 2 uops ended by the dec/jnz). Perf counters say the front-end was "ok", and uops_dispatched_port shows all 4 ALU ports at less than 4 uops per iteration, highest being port0 at 3.34. Manually padding the early instructions gets it down to 3 total lines, of 3, 6, 6 uops but still no improvement from 5c per iter, so I guess the front-end really is ok.

    LLVM-MCA seems very ambitious in projecting 3c per iter, apparently based on a wrong model of Skylake with a "dispatch" (front-end rename I think) width of 6. Even with -mcpu=haswell with a proper 4-wide model it projects 4.5c. (I used asm("# LLVM-MCA-BEGIN") etc. macros on Godbolt and included an LLVM-MCA output window for the test loop.) It doesn't have fully accurate uop->port mapping, apparently not knowing about slow-LEA running only on port 1, but IDK if that's significant.

    Throughput may be limited by the ability to find instruction-level parallelism and overlap across several iterations, as in Understanding the impact of lfence on a loop with two long dependency chains, for increasing lengths

    The test loop is:

    #include <stdlib.h>
    #ifndef __cplusplus
    #include <stdalign.h>
    #endif
    #include <stdint.h>
    
    #if 1 && defined(__GNUC__)
    #define LLVM_MCA_BEGIN  asm("# LLVM-MCA-BEGIN")
    #define LLVM_MCA_END  asm("# LLVM-MCA-END")
    #else
    #define LLVM_MCA_BEGIN
    #define LLVM_MCA_END
    #endif
    
    
    #if defined(__cplusplus)
        #include <atomic>
        using std::atomic_thread_fence, std::memory_order_acq_rel;
    #else
        #include <stdatomic.h>
    #endif
    
    unsigned testloop(const char str[15]){
        hmsn sink;
        for (int i=0 ; i<1000000000 ; i++){
            LLVM_MCA_BEGIN;
            str2hmsn(&sink, str);
            // compiler memory barrier 
            // force materializing the result, and forget about the input string being the same
    #ifdef __GNUC__
            asm volatile("" ::"m"(sink): "memory");
    #else
      //#warning happens to be enough with current MSVC
            atomic_thread_fence(memory_order_acq_rel); // strongest barrier that doesn't require any asm instructions on x86; MSVC defeats signal_fence.
    #endif
        }
        LLVM_MCA_END;
        volatile unsigned dummy = sink.hours + sink.nanos;  // make sure both halves are really used, else MSVC optimizes.
        return dummy;
    }
    
    
    
    int main(int argc, char *argv[])
    {
        // performance isn't data-dependent, so just use a handy string.
        // alignas(16) static char str[] = "235959123456789";
        uintptr_t p = (uintptr_t)argv[0];
        p &= -16;
        return testloop((char*)p);   // argv[0] apparently has a cache-line split within 16 bytes on my system, worsening from 5c throughput to 6.12c
    }
    

    I compiled as follows, to squeeze the loop in so it ends before the 32-byte boundary it's almost hitting. Note that -march=haswell allows it to use AVX encodings, saving an instruction or two.

    $ g++ -fno-omit-frame-pointer -fno-stack-protector -falign-loops=16 -O3 -march=haswell foo.c -masm=intel
    $ objdump -drwC -Mintel a.out | less
    
    ...
    0000000000001190 <testloop(char const*)>:
      1190:   55                    push   rbp
      1191:   b9 00 ca 9a 3b        mov    ecx,0x3b9aca00
      1196:   48 89 e5              mov    rbp,rsp
      1199:   c5 f9 6f 25 6f 0e 00 00    vmovdqa xmm4,[rip+0xe6f]        # 2010
      11a1:   c5 f9 6f 15 77 0e 00 00    vmovdqa xmm2, [rip+0xe77]        # 2020  # vector constants hoisted
      11a9:   c5 f9 6f 0d 7f 0e 00 00    vmovdqa xmm1, [rip+0xe7f]        # 2030
      11b1:   66 66 2e 0f 1f 84 00 00 00 00 00      data16 cs nop WORD PTR [rax+rax*1+0x0]
      11bc:   0f 1f 40 00        nop    DWORD PTR [rax+0x0]
    ### Top of loop is 16-byte aligned here, instead of ending up with 8 byte default
      11c0:   c5 d9 fc 07        vpaddb xmm0,xmm4, [rdi]
      11c4:   c4 e2 79 04 c2     vpmaddubsw xmm0,xmm0,xmm2
      11c9:   c4 e2 79 33 d8     vpmovzxwd xmm3,xmm0
      11ce:   c5 f9 73 d8 06     vpsrldq xmm0,xmm0,0x6
      11d3:   c5 f9 f5 c1        vpmaddwd xmm0,xmm0,xmm1
      11d7:   c5 f9 7f 5d f0     vmovdqa [rbp-0x10],xmm3
      11dc:   c4 e1 f9 7e c0     vmovq  rax,xmm0
      11e1:   69 d0 10 27 00 00  imul   edx,eax,0x2710
      11e7:   48 c1 e8 20        shr    rax,0x20
      11eb:   01 d0              add    eax,edx
      11ed:   8d 14 80           lea    edx,[rax+rax*4]
      11f0:   0f b6 47 0e        movzx  eax,BYTE PTR [rdi+0xe]
      11f4:   8d 44 50 d0        lea    eax,[rax+rdx*2-0x30]
      11f8:   89 45 fc           mov    DWORD PTR [rbp-0x4],eax
      11fb:   ff c9              dec    ecx
      11fd:   75 c1              jne    11c0 <testloop(char const*)+0x30>
      # loop ends 1 byte before it would be a problem for the JCC erratum workaround
      11ff:   8b 45 fc              mov    eax,DWORD PTR [rbp-0x4]
    

    So GCC made the asm I had planned by hand before writing the intrinsics this way, using as few instructions as possible to optimize for throughput. (Clang favours latency in this loop, using a separate add instead of a 3-component LEA).

    This is faster than any of the scalar versions that just parse X, and it's parsing HH, MM, and SS as well. Although clang auto-vectorization of convert3 may give this a run for its money in that department, but it strangely doesn't do that when inlining.

    GCC's scalar convert3 takes 8 cycles per iteration. clang's scalar convert3 in a loop takes 7, running at 4.0 fused-domain uops/clock, maxing out the front-end bandwidth and saturating port 1 with one imul uop per cycle. (This is reloading each byte with movzx and storing the scalar result to a stack local every iteration. But not touching the HHMMSS bytes.)

    $ taskset -c 3 perf stat --all-user -etask-clock,context-switches,cpu-migrations,page-faults,cycles,instructions,uops_issued.any,uops_executed.thread,idq.mite_uops,idq_uops_not_delivered.cycles_fe_was_ok -r1 ./a.out
    
     Performance counter stats for './a.out':
    
              1,221.82 msec task-clock                #    1.000 CPUs utilized          
                     0      context-switches          #    0.000 /sec                   
                     0      cpu-migrations            #    0.000 /sec                   
                   105      page-faults               #   85.937 /sec                   
         5,079,784,301      cycles                    #    4.158 GHz                    
        16,002,910,115      instructions              #    3.15  insn per cycle         
        15,004,354,053      uops_issued.any           #   12.280 G/sec                  
        18,003,922,693      uops_executed.thread      #   14.735 G/sec                  
             1,484,567      idq.mite_uops             #    1.215 M/sec                  
         5,079,431,697      idq_uops_not_delivered.cycles_fe_was_ok #    4.157 G/sec                  
    
           1.222107519 seconds time elapsed
    
           1.221794000 seconds user
           0.000000000 seconds sys
    

    Note that this is for 1G iterations, so 5.08G cycles means 5.08 cycles per iteration average throughput.

    Removing the extra work to produce the HHMMSS part of the output (vpsrldq, vpmovzxwd, and vmovdqa store), just the 9-digit integer part, it runs at 4.0 cycles per iteration on Skylake. Or 3.5 without the scalar store at the end. (I edited GCC's asm output to comment that instruction, so I know it's still doing all the work.)

    The fact that there's some kind of back-end bottleneck here (rather than front-end) is probably a good thing for overlapping this with independent work.


    Alternate version using BMI2 rorx

    @aqrit's answer on SIMD string to unsigned int parsing in C# performance improvement inspired a version that allows the remaining high * 2 part to be done as part of an LEA instead of scalar ADD, using that movq strategy instead of pshufd/paddd. After coaxing GCC into emitting RORX to copy-and-extract instead of a braindead 2x vmovq r64, xmm0, that gets us down to 14 front-end uops, down from 16, and unfused domain uops 17 down from 18. (clang deoptimizes to mov+shr). Godbolt

    // BMI2 version, compiles to efficient asm with GCC11
    void str2hmsn_rorx(hmsn *out, const char str[15])  // HHMMSSXXXXXXXXX  15 total, with 9-digit nanoseconds.
    {    // 15 not including the terminating 0 (if any) which we don't read
        __m128i digs = _mm_loadu_si128((const __m128i*)str);
        digs = _mm_sub_epi8( digs, _mm_set1_epi8('0') );
        const __m128i mul1 = _mm_set_epi16(0, 0x010A, 0x0A64, 0x14C8, 0x14C8 /* nanos 7 .. 0 */, 0x010A, 0x010A, 0x010A /* SS, MM, HH */);
        const __m128i mul2 = _mm_set_epi32(0, 0, 0x0001000A, 0x00FA61A8);  // extra scaling for the more-significant half baked in to save an imul
    
        //__m128i hms_x_words = _mm_maddubs_epi16( digs, _mm_set1_epi16( 10U + (1U<<8) ));   // SSSE3  pairs of digits => 10s, 1s places in printing order.
        __m128i hms_x_words = _mm_maddubs_epi16(mul1, digs);    // mul1 as the unsigned operand (first)
    
        // or scalar extract with _mm_cvtsi128_si64 (movq) instead of unpack, and shift / movzx
        __m128i hms_unpacked = _mm_cvtepu16_epi32(hms_x_words);             // SSE4.1 pmovxzwd   hours, minutes, seconds unpack from u16 to u32
        _mm_storeu_si128((__m128i*)out, hms_unpacked);
    
        __m128i xwords = _mm_bsrli_si128(hms_x_words, 6);  // would like to schedule this sooner, so oldest-uop-first starts this critical path shuffle ahead of pmovzx
        // 8 bytes of data, lined up in low 2 dwords, rather than split across high 3
        // could have got here with an 8-byte load that starts here, if we didn't want to get the H,M,S integers cheaply.
    
    //  __m128i xdwords = _mm_madd_epi16(xwords, _mm_setr_epi16(100, 1, 100, 1,  0,0,0,0));   // low/high uint32 chunks, discard the 9th x digit.
        __m128i xdwords = _mm_madd_epi16(xwords, mul2);   // low/high uint32 chunks, without the 9th x digit.
        uint64_t pair32 = _mm_cvtsi128_si64(xdwords);
    //  uint32_t msd = 100*100 * (uint32_t)pair32;     // most significant dword was at lower address (in printing order), so low half on little-endian x86.  encourage compilers to use 32-bit operand-size for imul
    //  uint32_t first8_x = msd + (uint32_t)(pair32 >> 32);
    //  uint32_t nanos = first8_x * 10 + ((unsigned char)str[14] - '0');   // total*10 + lowest digit
    
        uint32_t msd = 2 * (uint32_t)pair32;     // most significant dword was at lower address (in printing order), so low bits of qword on little-endian x86.
    //  uint32_t first8_x = msd + (uint32_t)(pair32 >> 32);
        uint32_t first8_x = msd + (uint32_t)_lrotr(pair32, 32);  // try to get it to use rorx to copy-and-extract the high half
        // FIXME: _rotr64 not available everywhere, but _lrotr is 32-bit on Windows.
    
        uint32_t nanos = first8_x * 10 + ((unsigned char)str[14] - '0');   // total*10 + lowest digit
        out->nanos = nanos;
    }
    

    (_lrotr requires GCC11 or later. And on Windows it's a 32-bit rotate. But _rotr64 isn't available everywhere. On earlier GCC, look for a different intrinsic or rotate idiom that convinces the compiler to use rorx dst, src, 32 instead of mov+shr.)

    Inlined into testloop() in the Godbolt link (which can hoist the constants out of the loop, but forces the work to happen repeatedly), uiCA (https://uica.uops.info/) predicts that Skylake could run it at one iteration per approximately 3.78 cycles, including a dec/jnz at the bottom of the loop and a store of the result, but no pointer increment. (uiCA is significantly more accurate than LLVM-MCA)

    Ice Lake / Rocket Lake might run this at one iter per 3.14 cycles.