@prefix this: . @prefix sub: . @prefix np: . @prefix dct: . @prefix pav: . @prefix rdf: . @prefix owl: . @prefix rdfg: . @prefix dce: . @prefix xsd: . @prefix rdfs: . @prefix prov: . @prefix npx: . sub:Head { this: a np:Nanopublication; np:hasAssertion sub:assertion; np:hasProvenance sub:provenance; np:hasPublicationInfo sub:pubinfo . } sub:assertion { a ; ""; "Applied sciences" . a ; "jeani@uio.no"; "Jean Iaquinta"; "0000-0002-8763-1643" . a , ; "01xtthb56"; "University of Oslo" . a . a , , , ; dct:doi "10.24424/zcq6-9r81"; ; "False"; ; "2024-01-05 15:11:39.987851+00:00"; ; ; ; "45133"^^xsd:integer; "https://api.rohub.org/api/ros/97b0167c-0cb4-457d-abe8-41d1a9d1b981/crate/download/"; ; ; "2024-01-05 14:14:55.022211+00:00"; "2024-03-05 12:22:12.345762+00:00"; "2024-01-05 14:14:55.022211+00:00"; """The Ohio State University (OSU) Micro Benchmarks (OMB) are a widely used suite of benchmarks for measuring and evaluating the performance of MPI operations for point-to-point, multi-pair, and collective communications. These benchmarks are often used for comparing different Message Passing Inerface (MPI) implementations and the underlying network interconnect. Here we use the OSU micro-benchmark (version 7.2) to assess the performance in terms of bandwidth achieved with an Apptainer container between 2 processors on different nodes with OpenMPI (version 4.1.6) on the Norwegian academic High Performance Computers (HPC) located in Tromsø (Fram) and Trondheim (Betzy)."""; "application/ld+json"; , , , , ; "https://w3id.org/ro-id/97b0167c-0cb4-457d-abe8-41d1a9d1b981"; "Apptainer", "HPC", "MPI", "OSU", "Performance", "bandwidth", "container", "interconnect"; ; "Dataset"; "OSU MPI Get Bandwidth Test v7.2 with OpenMPI 4.1.6 on Fram & Betzy"; ; ; "MANUAL"; , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ; "https://w3id.org/ro/terms/earth-science#DataCentricResearchObjectTemplate"; "Iaquinta, Jean. \"OSU MPI Get Bandwidth Test v7.2 with OpenMPI 4.1.6 on Fram & Betzy.\" ROHub. Jan 05 ,2024. https://doi.org/10.24424/zcq6-9r81." . a , ; ; "data" . a , ; "raw data" . a , ; ; "biblio" . a , ; "metadata" . a , , ; dct:doi "10.24424/7nkm-2072"; ; "36301"^^xsd:integer; "https://api.rohub.org/api/resources/55d5e2f5-c395-4da5-a7d1-9621c480d0ef/download/"; ; "2024-01-05 14:26:46.457298+00:00"; "2024-01-05 15:11:39.077450+00:00"; "Plot showing the bandwidth as a function of the message size on Fram and Betzy"; "image/png"; ; "OSU-2023Dec.png"; "2024-01-05 14:26:46.457298+00:00" . a , , ; dct:doi "10.24424/pv5n-vq62"; ; "1338"^^xsd:integer; "https://api.rohub.org/api/resources/abbe45f6-a4c4-4ec4-af82-79bb0a95440e/download/"; ; "2024-01-05 14:33:02.319503+00:00"; "2024-01-05 15:11:39.583041+00:00"; "Output of the OSU MPI Get Bandwidth Test with openMPI 4.1.6 on Fram and Betzy"; "text/csv"; "Apptainer", "OpenMPI"; ; "OSU7.2-Fram-Betzy"; "2024-01-05 14:33:02.319503+00:00" . a ; dct:conformsTo ; . a ; "NICEST-2 - the second phase of the Nordic Collaboration on e-Infrastructures for Earth System Modeling focuses on strengthening the Nordic position within climate modeling by leveraging, reinforcing and complementing ongoing initiatives."; "Nordic Collaboration on e-Infrastructures for Earth System Modeling Tools"; "https://w3id.org/ro-id/ed4e6aa2-9db8-452d-9301-ba1606361034" . a , , ; "2024-01-09 09:49:11.365811+00:00"; ; prov:wasDerivedFrom . a ; "bandwidth"; "8.112874779541444"; "4.6" . a ; "Message Passing Inerface"; "13.68421052631579"; "9.1" . a ; "Trondheim"; . a ; "Osu micro-benchmark"; "47.61321909424724"; "38.9" . a ; "Ohio State University"; "14.586466165413531"; "9.7" . a ; "High Performance Computer"; "8.721804511278195"; "5.8" . a ; "network interconnect"; "11.627906976744185"; "9.5" . a ; "benchmark"; "22.045855379188712"; "12.5" . a ; "The Ohio State University (OSU) Micro Benchmarks (OMB) are a widely used suite of benchmarks for measuring and evaluating the performance of MPI operations for point-to-point, multi-pair, and collective communications."; "47.12525667351129"; "45.9" . a ; "bench mark"; "24.867724867724867"; "14.1" . a ; "Education"; "Education" . a ; "micro benchmark"; "21.052631578947366"; "17.2" . a ; "University"; "Education/School/Higher education/University" . a ; "MPI operation"; "12.607099143206854"; "10.3" . a ; "computer network"; "9.876543209876543"; "5.6" . a ; "benchmark"; "23.60902255639098"; "15.7" . a ; "interconnect"; "10.225563909774436"; "6.8" . a ; "earth sciences"; "100.0"; "0.4361959993839264" . a ; "computer programming and software"; "100.0"; "0.6090793609619141" . a ; "mathematical and computer sciences"; "100.0"; "0.6090793609619141" . a ; "information technology"; "22.65625"; "2.9" . a ; "interconnect"; "11.28747795414462"; "6.4" . a ; "different Message Passing Inerface"; "7.099143206854345"; "5.8" . a ; "Steeple chase"; "Sport/Competition discipline/Horse racing/Steeple chase" . a ; "micro"; "21.804511278195488"; "14.5" . a ; "Here we use the OSU micro-benchmark (version 7.2) to assess the performance in terms of bandwidth achieved with an Apptainer container between 2 processors on different nodes with OpenMPI (version 4.1.6) on the Norwegian academic High Performance Computers (HPC) located in Tromsø (Fram) and Trondheim (Betzy)"; "22.689938398357288"; "22.1" . a ; "atmospheric sciences"; "100.0"; "0.4361959993839264" . a ; "computer science"; "77.34375"; "9.9" . a ; "Tromsø"; . a ; "microcomputer"; "23.809523809523807"; "13.5" . a ; "Office of Management and Budget"; . a ; "network"; "7.36842105263158"; "4.9" . a ; "These benchmarks are often used for comparing different Message Passing Inerface (MPI) implementations and the underlying network interconnect."; "30.184804928131417"; "29.4" . a , , ; ; "https://www.osti.gov/servlets/purl/1997634"; ; "2024-01-17 10:54:09.114061+00:00"; "2024-01-17 10:54:10.405249+00:00"; """Abstract—Open MPI is an open-source implementation of the MPI-3 standard that is developed and maintained by collaborators from academia, industry, and national laboratories. Oak Ridge National Laboratory (ORNL) and Los Alamos National Laboratory (LANL) are collaborating on porting and optimizing Open MPI and related components for use on HPE Cray EX systems, with a focus on the DOE Frontier and Aurora exa-scale systems. A key component of this effort involves development of a new LinkX Open Fabrics Interface (OFI) provider. In this paper, we describe enhancements to Open MPI, OpenPMIx runtime components, and the LinkX OFI provider. Performance results are presented for point to point and collective communication operations using both the vendor CXI provider and the LinkX provider, including results obtained using GPU accelerators. Recommended deployment options for EX systems will be discussed, along with future work."""; "Slingshot 11", "libfabric"; ; "Open MPI for HPE Cray EX Systems"; "2024-01-17 10:54:09.114061+00:00" . } sub:provenance { sub:assertion prov:wasDerivedFrom . } sub:pubinfo { this: a npx:RoCrateNanopub; dct:created "2026-03-03T15:15:44.181+01:00"^^xsd:dateTime; dct:creator ; npx:introduces ; rdfs:label "OSU MPI Get Bandwidth Test v7.2 with OpenMPI 4.1.6 on Fram & Betzy" . sub:sig npx:hasAlgorithm "RSA"; npx:hasPublicKey "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxszSDYX5tuCSkP7UiCtftYPFNQVTjgNu0I5fwdML2DLRDlp0xzmsQXRk8oHuvwGvG1aMjj6cpUqO+0rz2Sg/wvHOgUpkRH8VJXvmlkhafMLCMtUtk5JIx7e+fkzCby+fnmD7kMkGLrT+OaExWwEDmNlCAt0TPKcHSdwsjso2isXjtAsGevyCMke8ufnFYpjs746JES1eNzVnHnn2Kp/lqcm60GM+J8dLgRZp7fX0anW098xhKym6+xXFzqeju0vYRIHBPerv+r7skWxwk+a7Sd8msqVeYEv6NTqnyWvyWb6Yh8cvj04N6qm/T6C5FUPLQhzSaQgMVMU6yLqjPuu9DwIDAQAB"; npx:hasSignature "VKbxj4o74UWPK2L+apSYP/8k+oJZ8L5XAALPqMSTgDTBTjKa/I2TX5QLxA4efh0IGC8TvvX7EkH0z46LDbAwrZNeTaISBUMBMlyckrubIu3UD5ZcmMU/Po5/HJi6Vtb2emiqeM7sfDYwAdqOqersAvk6bn7OJQDPd6fLkWfyajRQnSl17QxCeiLhkGc20hB36mXXsRmrs+r7XgzjDDguC+8C47mxx65ppvVzYwpYM8vVx2lqnrXXt1nz/gZ0mT6GWJdPaXyljsg3BYi0J7GqzAI2NOE2WQxnp9JnMDD0A/PnUH5WRdQy5Qf2smY5zl8/Ek4MCOe5H6ok/nHbK4CM0w=="; npx:hasSignatureTarget this:; npx:signedBy . }