1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
/*
* Copyright 2010 Marek Olšák <maraeo@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**
* @file
* Simple slab allocator for equally sized memory allocations.
* util_slab_alloc and util_slab_free have time complexity in O(1).
*
* Good for allocations which have very low lifetime and are allocated
* and freed very often. Use a profiler first to know if it's worth using it!
*
* Candidates: transfer_map
*
* @author Marek Olšák
*/
#ifndef U_SLAB_H
#define U_SLAB_H
#include "os/os_thread.h"
enum util_slab_threading {
UTIL_SLAB_SINGLETHREADED = FALSE,
UTIL_SLAB_MULTITHREADED = TRUE
};
/* The page is an array of blocks (allocations). */
struct util_slab_page {
/* The header (linked-list pointers). */
struct util_slab_page *prev, *next;
/* Memory after the last member is dedicated to the page itself.
* The allocated size is always larger than this structure. */
};
struct util_slab_mempool {
/* Public members. */
void *(*alloc)(struct util_slab_mempool *pool);
void (*free)(struct util_slab_mempool *pool, void *ptr);
/* Private members. */
struct util_slab_block *first_free;
struct util_slab_page list;
unsigned block_size;
unsigned page_size;
unsigned num_blocks;
unsigned num_pages;
enum util_slab_threading threading;
pipe_mutex mutex;
};
void util_slab_create(struct util_slab_mempool *pool,
unsigned item_size,
unsigned num_blocks,
enum util_slab_threading threading);
void util_slab_destroy(struct util_slab_mempool *pool);
void util_slab_set_thread_safety(struct util_slab_mempool *pool,
enum util_slab_threading threading);
static inline void *
util_slab_alloc(struct util_slab_mempool *pool)
{
return pool->alloc(pool);
}
static inline void
util_slab_free(struct util_slab_mempool *pool, void *ptr)
{
pool->free(pool, ptr);
}
#endif
|