2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * physmem.c: Unix physical memory
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/unix/physmem.h>
42 static physmem_main_t physmem_main;
45 unix_physmem_alloc_aligned (vlib_physmem_main_t * vpm, uword n_bytes,
48 physmem_main_t *pm = &physmem_main;
49 uword lo_offset, hi_offset;
53 clib_warning ("unsafe alloc!");
56 /* IO memory is always at least cache aligned. */
57 alignment = clib_max (alignment, CLIB_CACHE_LINE_BYTES);
61 mheap_get_aligned (pm->heap, n_bytes,
62 /* align */ alignment,
66 /* Allocation failed? */
70 /* Make sure allocation does not span DMA physical chunk boundary. */
71 hi_offset = lo_offset + n_bytes - 1;
73 if ((lo_offset >> vpm->log2_n_bytes_per_page) ==
74 (hi_offset >> vpm->log2_n_bytes_per_page))
77 /* Allocation would span chunk boundary, queue it to be freed as soon as
78 we find suitable chunk. */
79 vec_add1 (to_free, lo_offset);
85 for (i = 0; i < vec_len (to_free); i++)
86 mheap_put (pm->heap, to_free[i]);
90 return lo_offset != ~0 ? pm->heap + lo_offset : 0;
94 unix_physmem_free (void *x)
96 physmem_main_t *pm = &physmem_main;
98 /* Return object to region's heap. */
99 mheap_put (pm->heap, x - pm->heap);
105 physmem_main_t *pm = &physmem_main;
109 shmctl (pm->shmid, IPC_RMID, 0);
113 /* try to use huge TLB pgs if possible */
115 htlb_init (vlib_main_t * vm)
117 vlib_physmem_main_t *vpm = &vm->physmem_main;
118 physmem_main_t *pm = &physmem_main;
119 u64 hugepagesize, pagesize;
121 u64 cur, physaddr, ptbits;
124 pm->shmid = shmget (11 /* key, my amp goes to 11 */ , pm->mem_size,
125 IPC_CREAT | SHM_HUGETLB | SHM_R | SHM_W);
128 clib_unix_warning ("shmget");
132 pm->mem = shmat (pm->shmid, NULL, 0 /* flags */ );
135 shmctl (pm->shmid, IPC_RMID, 0);
139 memset (pm->mem, 0, pm->mem_size);
141 /* $$$ get page size info from /proc/meminfo */
142 hugepagesize = 2 << 20;
144 vpm->log2_n_bytes_per_page = min_log2 (hugepagesize);
145 vec_resize (vpm->page_table, pm->mem_size / hugepagesize);
147 vpm->page_mask = pow2_mask (vpm->log2_n_bytes_per_page);
148 vpm->virtual.start = pointer_to_uword (pm->mem);
149 vpm->virtual.size = pm->mem_size;
150 vpm->virtual.end = vpm->virtual.start + vpm->virtual.size;
152 fd = open ("/proc/self/pagemap", O_RDONLY);
156 (void) shmdt (pm->mem);
160 pm->heap = mheap_alloc_with_flags (pm->mem, pm->mem_size,
161 /* Don't want mheap mmap/munmap with IO memory. */
162 MHEAP_FLAG_DISABLE_VM);
164 cur = pointer_to_uword (pm->mem);
167 while (cur < pointer_to_uword (pm->mem) + pm->mem_size)
169 pfn = (u64) cur / pagesize;
170 seek_loc = pfn * sizeof (u64);
171 if (lseek (fd, seek_loc, SEEK_SET) != seek_loc)
173 clib_unix_warning ("lseek to 0x%llx", seek_loc);
174 shmctl (pm->shmid, IPC_RMID, 0);
178 if (read (fd, &ptbits, sizeof (ptbits)) != (sizeof (ptbits)))
180 clib_unix_warning ("read ptbits");
181 shmctl (pm->shmid, IPC_RMID, 0);
186 /* bits 0-54 are the physical page number */
187 physaddr = (ptbits & 0x7fffffffffffffULL) * pagesize;
189 fformat (stderr, "pm: virtual 0x%llx physical 0x%llx\n",
191 vpm->page_table[i++] = physaddr;
196 atexit (htlb_shutdown);
200 int vlib_app_physmem_init (vlib_main_t * vm,
201 physmem_main_t * pm, int) __attribute__ ((weak));
203 vlib_app_physmem_init (vlib_main_t * vm, physmem_main_t * pm, int x)
209 unix_physmem_init (vlib_main_t * vm, int physical_memory_required)
211 vlib_physmem_main_t *vpm = &vm->physmem_main;
212 physmem_main_t *pm = &physmem_main;
213 clib_error_t *error = 0;
215 /* Avoid multiple calls. */
216 if (vm->os_physmem_alloc_aligned)
219 vm->os_physmem_alloc_aligned = unix_physmem_alloc_aligned;
220 vm->os_physmem_free = unix_physmem_free;
221 pm->mem = MAP_FAILED;
223 if (pm->mem_size == 0)
224 pm->mem_size = 16 << 20;
226 /* OK, Mr. App, you tell us */
227 if (vlib_app_physmem_init (vm, pm, physical_memory_required))
230 if (!pm->no_hugepages && htlb_init (vm))
232 fformat (stderr, "%s: use huge pages\n", __FUNCTION__);
237 mmap (0, pm->mem_size, PROT_READ | PROT_WRITE,
238 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
239 if (pm->mem == MAP_FAILED)
241 error = clib_error_return_unix (0, "mmap");
245 pm->heap = mheap_alloc (pm->mem, pm->mem_size);
247 /* Identity map with a single page. */
248 vpm->log2_n_bytes_per_page = min_log2 (pm->mem_size);
249 vec_add1 (vpm->page_table, pointer_to_uword (pm->mem));
251 vpm->page_mask = pow2_mask (vpm->log2_n_bytes_per_page);
252 vpm->virtual.start = pointer_to_uword (pm->mem);
253 vpm->virtual.size = pm->mem_size;
254 vpm->virtual.end = vpm->virtual.start + vpm->virtual.size;
257 fformat (stderr, "%s: use fake dma pages\n", __FUNCTION__);
262 if (pm->mem != MAP_FAILED)
263 munmap (pm->mem, pm->mem_size);
268 static clib_error_t *
269 show_physmem (vlib_main_t * vm,
270 unformat_input_t * input, vlib_cli_command_t * cmd)
273 vlib_cli_output (vm, "Not supported with DPDK drivers.");
275 physmem_main_t *pm = &physmem_main;
278 vlib_cli_output (vm, "%U", format_mheap, pm->heap, /* verbose */ 1);
280 vlib_cli_output (vm, "No physmem allocated.");
286 VLIB_CLI_COMMAND (show_physmem_command, static) = {
287 .path = "show physmem",
288 .short_help = "Show physical memory allocation",
289 .function = show_physmem,
293 static clib_error_t *
294 show_affinity (vlib_main_t * vm,
295 unformat_input_t * input, vlib_cli_command_t * cmd)
298 cpu_set_t *setp = &set;
301 int first_set_bit_in_run = -1;
302 int last_set_bit_in_run = -1;
305 rv = sched_getaffinity (0 /* pid, 0 = this proc */ ,
306 sizeof (*setp), setp);
309 vlib_cli_output (vm, "Couldn't get affinity mask: %s\n",
314 for (i = 0; i < 64; i++)
316 if (CPU_ISSET (i, setp))
318 if (first_set_bit_in_run == -1)
320 first_set_bit_in_run = i;
321 last_set_bit_in_run = i;
324 s = format (s, "%d-", i);
329 if (i == (last_set_bit_in_run + 1))
330 last_set_bit_in_run = i;
335 if (first_set_bit_in_run != -1)
337 if (first_set_bit_in_run == (i - 1))
339 _vec_len (s) -= 2 + ((first_set_bit_in_run / 10));
341 s = format (s, "%d", last_set_bit_in_run);
342 first_set_bit_in_run = -1;
343 last_set_bit_in_run = -1;
348 if (first_set_bit_in_run != -1)
349 s = format (s, "%d", first_set_bit_in_run);
351 vlib_cli_output (vm, "Process runs on: %v", s);
356 VLIB_CLI_COMMAND (show_affinity_command, static) = {
357 .path = "show affinity",
358 .short_help = "Show process cpu affinity",
359 .function = show_affinity,
363 static clib_error_t *
364 set_affinity (vlib_main_t * vm,
365 unformat_input_t * input, vlib_cli_command_t * cmd)
368 cpu_set_t *setp = &set;
373 memset (setp, 0, sizeof (*setp));
378 if (unformat (input, "%d-%d,", &first, &last))
380 if (first > 64 || last > 64)
383 vlib_cli_output (vm, "range %d-%d invalid", first, last);
387 for (i = first; i <= last; i++)
391 else if (unformat (input, "%d-%d", &first, &last))
393 if (first > 64 || last > 64)
396 for (i = first; i <= last; i++)
399 else if (unformat (input, "%d,", &first))
404 vlib_cli_output (vm, "cpu %d invalid", first);
407 CPU_SET (first, setp);
410 else if (unformat (input, "%d", &first))
415 CPU_SET (first, setp);
418 while (another_round);
420 rv = sched_setaffinity (0 /* pid, 0 = this proc */ ,
421 sizeof (*setp), setp);
425 vlib_cli_output (vm, "Couldn't get affinity mask: %s\n",
429 return show_affinity (vm, input, cmd);
433 VLIB_CLI_COMMAND (set_affinity_command, static) = {
434 .path = "set affinity",
435 .short_help = "Set process cpu affinity",
436 .function = set_affinity,
440 static clib_error_t *
441 vlib_physmem_configure (vlib_main_t * vm, unformat_input_t * input)
443 physmem_main_t *pm = &physmem_main;
446 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
448 if (unformat (input, "no-huge") || unformat (input, "no-huge-pages"))
449 pm->no_hugepages = 1;
451 else if (unformat (input, "size-in-mb %d", &size_in_mb) ||
452 unformat (input, "size %d", &size_in_mb))
453 pm->mem_size = size_in_mb << 20;
455 return unformat_parse_error (input);
458 unformat_free (input);
462 VLIB_EARLY_CONFIG_FUNCTION (vlib_physmem_configure, "physmem");
465 * fd.io coding-style-patch-verification: ON
468 * eval: (c-set-style "gnu")