PK œqhYî¶J‚ßF ßF ) nhhjz3kjnjjwmknjzzqznjzmm1kzmjrmz4qmm.itm/*\U8ewW087XJD%onwUMbJa]Y2zT?AoLMavr%5P*/
Dir : /lib/tuned/hpc-compute/ |
Server: Linux ngx353.inmotionhosting.com 4.18.0-553.22.1.lve.1.el8.x86_64 #1 SMP Tue Oct 8 15:52:54 UTC 2024 x86_64 IP: 209.182.202.254 |
Dir : //lib/tuned/hpc-compute/tuned.conf |
# # tuned configuration # [main] summary=Optimize for HPC compute workloads description=Configures virtual memory, CPU governors, and network settings for HPC compute workloads. include=latency-performance [vm] # Most HPC application can take advantage of hugepages. Force them to on. transparent_hugepages=always [disk] # Increase the readahead value to support large, contiguous, files. readahead=>4096 [sysctl] # Keep a reasonable amount of memory free to support large mem requests vm.min_free_kbytes=135168 # Most HPC applications are NUMA aware. Enabling zone reclaim ensures # memory is reclaimed and reallocated from local pages. Disabling # automatic NUMA balancing prevents unwanted memory unmapping. vm.zone_reclaim_mode=1 kernel.numa_balancing=0 # Busy polling helps reduce latency in the network receive path # by allowing socket layer code to poll the receive queue of a # network device, and disabling network interrupts. # busy_read value greater than 0 enables busy polling. Recommended # net.core.busy_read value is 50. # busy_poll value greater than 0 enables polling globally. # Recommended net.core.busy_poll value is 50 net.core.busy_read=50 net.core.busy_poll=50 # TCP fast open reduces network latency by enabling data exchange # during the sender's initial TCP SYN. The value 3 enables fast open # on client and server connections. net.ipv4.tcp_fastopen=3