|
19 | 19 | */
|
20 | 20 |
|
21 | 21 | #include "cbuf.h"
|
| 22 | +#include "esp32-hal-log.h" |
| 23 | + |
| 24 | +#if CONFIG_DISABLE_HAL_LOCKS |
| 25 | +#define CBUF_MUTEX_CREATE() |
| 26 | +#define CBUF_MUTEX_LOCK() |
| 27 | +#define CBUF_MUTEX_UNLOCK() |
| 28 | +#define CBUF_MUTEX_DELETE() |
| 29 | +#else |
| 30 | +#define CBUF_MUTEX_CREATE() if(_lock == NULL){_lock = xSemaphoreCreateMutex(); if(_lock == NULL){log_e("failed to create mutex");}} |
| 31 | +#define CBUF_MUTEX_LOCK() if(_lock != NULL){xSemaphoreTakeRecursive(_lock, portMAX_DELAY);} |
| 32 | +#define CBUF_MUTEX_UNLOCK() if(_lock != NULL){xSemaphoreGiveRecursive(_lock);} |
| 33 | +#define CBUF_MUTEX_DELETE() if(_lock != NULL){SemaphoreHandle_t l = _lock; _lock = NULL; vSemaphoreDelete(l);} |
| 34 | +#endif |
22 | 35 |
|
23 | 36 | cbuf::cbuf(size_t size) :
|
24 |
| - next(NULL), _size(size+1), _buf(new char[size+1]), _bufend(_buf + size + 1), _begin(_buf), _end(_begin) |
| 37 | + next(NULL), |
| 38 | + has_peek(false), |
| 39 | + peek_byte(0), |
| 40 | + _buf(xRingbufferCreate(size, RINGBUF_TYPE_BYTEBUF)) |
25 | 41 | {
|
| 42 | + if(_buf == NULL) { |
| 43 | + log_e("failed to allocate ring buffer"); |
| 44 | + } |
| 45 | + CBUF_MUTEX_CREATE(); |
26 | 46 | }
|
27 | 47 |
|
28 | 48 | cbuf::~cbuf()
|
29 | 49 | {
|
30 |
| - delete[] _buf; |
| 50 | + CBUF_MUTEX_LOCK(); |
| 51 | + if(_buf != NULL){ |
| 52 | + RingbufHandle_t b = _buf; |
| 53 | + _buf = NULL; |
| 54 | + vRingbufferDelete(b); |
| 55 | + } |
| 56 | + CBUF_MUTEX_UNLOCK(); |
| 57 | + CBUF_MUTEX_DELETE(); |
31 | 58 | }
|
32 | 59 |
|
33 | 60 | size_t cbuf::resizeAdd(size_t addSize)
|
34 | 61 | {
|
35 |
| - return resize(_size + addSize); |
| 62 | + return resize(size() + addSize); |
36 | 63 | }
|
37 | 64 |
|
38 | 65 | size_t cbuf::resize(size_t newSize)
|
39 | 66 | {
|
| 67 | + CBUF_MUTEX_LOCK(); |
| 68 | + size_t _size = size(); |
| 69 | + if(newSize == _size) { |
| 70 | + return _size; |
| 71 | + } |
40 | 72 |
|
41 |
| - size_t bytes_available = available(); |
42 |
| - newSize += 1; |
43 | 73 | // not lose any data
|
44 | 74 | // if data can be lost use remove or flush before resize
|
45 |
| - if((newSize < bytes_available) || (newSize == _size)) { |
| 75 | + size_t bytes_available = available(); |
| 76 | + if(newSize < bytes_available) { |
| 77 | + CBUF_MUTEX_UNLOCK(); |
| 78 | + log_e("new size is less than the currently available data size"); |
46 | 79 | return _size;
|
47 | 80 | }
|
48 | 81 |
|
49 |
| - char *newbuf = new char[newSize]; |
50 |
| - char *oldbuf = _buf; |
51 |
| - |
52 |
| - if(!newbuf) { |
| 82 | + RingbufHandle_t newbuf = xRingbufferCreate(newSize, RINGBUF_TYPE_BYTEBUF); |
| 83 | + if(newbuf == NULL) { |
| 84 | + CBUF_MUTEX_UNLOCK(); |
| 85 | + log_e("failed to allocate new ring buffer"); |
53 | 86 | return _size;
|
54 | 87 | }
|
55 | 88 |
|
56 |
| - if(_buf) { |
57 |
| - read(newbuf, bytes_available); |
58 |
| - memset((newbuf + bytes_available), 0x00, (newSize - bytes_available)); |
| 89 | + if(_buf != NULL) { |
| 90 | + if(bytes_available){ |
| 91 | + char * old_data = (char *)malloc(bytes_available); |
| 92 | + if(old_data == NULL){ |
| 93 | + vRingbufferDelete(newbuf); |
| 94 | + CBUF_MUTEX_UNLOCK(); |
| 95 | + log_e("failed to allocate temporary buffer"); |
| 96 | + return _size; |
| 97 | + } |
| 98 | + bytes_available = read(old_data, bytes_available); |
| 99 | + if(!bytes_available){ |
| 100 | + free(old_data); |
| 101 | + vRingbufferDelete(newbuf); |
| 102 | + CBUF_MUTEX_UNLOCK(); |
| 103 | + log_e("failed to read previous data"); |
| 104 | + return _size; |
| 105 | + } |
| 106 | + if(xRingbufferSend(newbuf, (void*)old_data, bytes_available, 0) != pdTRUE){ |
| 107 | + write(old_data, bytes_available); |
| 108 | + free(old_data); |
| 109 | + vRingbufferDelete(newbuf); |
| 110 | + CBUF_MUTEX_UNLOCK(); |
| 111 | + log_e("failed to restore previous data"); |
| 112 | + return _size; |
| 113 | + } |
| 114 | + free(old_data); |
| 115 | + } |
| 116 | + |
| 117 | + RingbufHandle_t b = _buf; |
| 118 | + _buf = newbuf; |
| 119 | + vRingbufferDelete(b); |
| 120 | + } else { |
| 121 | + _buf = newbuf; |
59 | 122 | }
|
60 |
| - |
61 |
| - _begin = newbuf; |
62 |
| - _end = newbuf + bytes_available; |
63 |
| - _bufend = newbuf + newSize; |
64 |
| - _size = newSize; |
65 |
| - |
66 |
| - _buf = newbuf; |
67 |
| - delete[] oldbuf; |
68 |
| - |
69 |
| - return _size; |
| 123 | + CBUF_MUTEX_UNLOCK(); |
| 124 | + return newSize; |
70 | 125 | }
|
71 | 126 |
|
72 | 127 | size_t cbuf::available() const
|
73 | 128 | {
|
74 |
| - if(_end >= _begin) { |
75 |
| - return _end - _begin; |
| 129 | + size_t available = 0; |
| 130 | + if(_buf != NULL){ |
| 131 | + vRingbufferGetInfo(_buf, NULL, NULL, NULL, NULL, (UBaseType_t *)&available); |
76 | 132 | }
|
77 |
| - return _size - (_begin - _end); |
| 133 | + if (has_peek) available++; |
| 134 | + return available; |
78 | 135 | }
|
79 | 136 |
|
80 | 137 | size_t cbuf::size()
|
81 | 138 | {
|
| 139 | + size_t _size = 0; |
| 140 | + if(_buf != NULL){ |
| 141 | + _size = xRingbufferGetMaxItemSize(_buf); |
| 142 | + } |
82 | 143 | return _size;
|
83 | 144 | }
|
84 | 145 |
|
85 | 146 | size_t cbuf::room() const
|
86 | 147 | {
|
87 |
| - if(_end >= _begin) { |
88 |
| - return _size - (_end - _begin) - 1; |
| 148 | + size_t _room = 0; |
| 149 | + if(_buf != NULL){ |
| 150 | + _room = xRingbufferGetCurFreeSize(_buf); |
89 | 151 | }
|
90 |
| - return _begin - _end - 1; |
| 152 | + return _room; |
| 153 | +} |
| 154 | + |
| 155 | +bool cbuf::empty() const |
| 156 | +{ |
| 157 | + return available() == 0; |
| 158 | +} |
| 159 | + |
| 160 | +bool cbuf::full() const |
| 161 | +{ |
| 162 | + return room() == 0; |
91 | 163 | }
|
92 | 164 |
|
93 | 165 | int cbuf::peek()
|
94 | 166 | {
|
95 |
| - if(empty()) { |
| 167 | + if (!available()) { |
96 | 168 | return -1;
|
97 | 169 | }
|
98 | 170 |
|
99 |
| - return static_cast<int>(*_begin); |
100 |
| -} |
| 171 | + int c; |
101 | 172 |
|
102 |
| -size_t cbuf::peek(char *dst, size_t size) |
103 |
| -{ |
104 |
| - size_t bytes_available = available(); |
105 |
| - size_t size_to_read = (size < bytes_available) ? size : bytes_available; |
106 |
| - size_t size_read = size_to_read; |
107 |
| - char * begin = _begin; |
108 |
| - if(_end < _begin && size_to_read > (size_t) (_bufend - _begin)) { |
109 |
| - size_t top_size = _bufend - _begin; |
110 |
| - memcpy(dst, _begin, top_size); |
111 |
| - begin = _buf; |
112 |
| - size_to_read -= top_size; |
113 |
| - dst += top_size; |
114 |
| - } |
115 |
| - memcpy(dst, begin, size_to_read); |
116 |
| - return size_read; |
| 173 | + CBUF_MUTEX_LOCK(); |
| 174 | + if (has_peek) { |
| 175 | + c = peek_byte; |
| 176 | + } else { |
| 177 | + c = read(); |
| 178 | + if (c >= 0) { |
| 179 | + has_peek = true; |
| 180 | + peek_byte = c; |
| 181 | + } |
| 182 | + } |
| 183 | + CBUF_MUTEX_UNLOCK(); |
| 184 | + return c; |
117 | 185 | }
|
118 | 186 |
|
119 | 187 | int cbuf::read()
|
120 | 188 | {
|
121 |
| - if(empty()) { |
| 189 | + char result = 0; |
| 190 | + if(!read(&result, 1)){ |
122 | 191 | return -1;
|
123 | 192 | }
|
124 |
| - |
125 |
| - char result = *_begin; |
126 |
| - _begin = wrap_if_bufend(_begin + 1); |
127 | 193 | return static_cast<int>(result);
|
128 | 194 | }
|
129 | 195 |
|
130 | 196 | size_t cbuf::read(char* dst, size_t size)
|
131 | 197 | {
|
| 198 | + CBUF_MUTEX_LOCK(); |
132 | 199 | size_t bytes_available = available();
|
133 |
| - size_t size_to_read = (size < bytes_available) ? size : bytes_available; |
134 |
| - size_t size_read = size_to_read; |
135 |
| - if(_end < _begin && size_to_read > (size_t) (_bufend - _begin)) { |
136 |
| - size_t top_size = _bufend - _begin; |
137 |
| - memcpy(dst, _begin, top_size); |
138 |
| - _begin = _buf; |
139 |
| - size_to_read -= top_size; |
140 |
| - dst += top_size; |
141 |
| - } |
142 |
| - memcpy(dst, _begin, size_to_read); |
143 |
| - _begin = wrap_if_bufend(_begin + size_to_read); |
| 200 | + if(!bytes_available || !size){ |
| 201 | + CBUF_MUTEX_UNLOCK(); |
| 202 | + return 0; |
| 203 | + } |
| 204 | + |
| 205 | + if (has_peek) { |
| 206 | + if (dst != NULL) { |
| 207 | + *dst++ = peek_byte; |
| 208 | + } |
| 209 | + size--; |
| 210 | + } |
| 211 | + |
| 212 | + size_t size_read = 0; |
| 213 | + if (size) { |
| 214 | + size_t received_size = 0; |
| 215 | + size_t size_to_read = (size < bytes_available) ? size : bytes_available; |
| 216 | + uint8_t *received_buff = (uint8_t *)xRingbufferReceiveUpTo(_buf, &received_size, 0, size_to_read); |
| 217 | + if (received_buff != NULL) { |
| 218 | + if(dst != NULL){ |
| 219 | + memcpy(dst, received_buff, received_size); |
| 220 | + } |
| 221 | + vRingbufferReturnItem(_buf, received_buff); |
| 222 | + size_read = received_size; |
| 223 | + size_to_read -= received_size; |
| 224 | + // wrap around data |
| 225 | + if(size_to_read){ |
| 226 | + received_size = 0; |
| 227 | + received_buff = (uint8_t *)xRingbufferReceiveUpTo(_buf, &received_size, 0, size_to_read); |
| 228 | + if (received_buff != NULL) { |
| 229 | + if(dst != NULL){ |
| 230 | + memcpy(dst+size_read, received_buff, received_size); |
| 231 | + } |
| 232 | + vRingbufferReturnItem(_buf, received_buff); |
| 233 | + size_read += received_size; |
| 234 | + } else { |
| 235 | + log_e("failed to read wrap around data from ring buffer"); |
| 236 | + } |
| 237 | + } |
| 238 | + } else { |
| 239 | + log_e("failed to read from ring buffer"); |
| 240 | + } |
| 241 | + } |
| 242 | + |
| 243 | + if (has_peek) { |
| 244 | + has_peek = false; |
| 245 | + size_read++; |
| 246 | + } |
| 247 | + |
| 248 | + CBUF_MUTEX_UNLOCK(); |
144 | 249 | return size_read;
|
145 | 250 | }
|
146 | 251 |
|
147 | 252 | size_t cbuf::write(char c)
|
148 | 253 | {
|
149 |
| - if(full()) { |
150 |
| - return 0; |
151 |
| - } |
152 |
| - |
153 |
| - *_end = c; |
154 |
| - _end = wrap_if_bufend(_end + 1); |
155 |
| - return 1; |
| 254 | + return write(&c, 1); |
156 | 255 | }
|
157 | 256 |
|
158 | 257 | size_t cbuf::write(const char* src, size_t size)
|
159 | 258 | {
|
| 259 | + CBUF_MUTEX_LOCK(); |
160 | 260 | size_t bytes_available = room();
|
| 261 | + if(!bytes_available || !size){ |
| 262 | + CBUF_MUTEX_UNLOCK(); |
| 263 | + return 0; |
| 264 | + } |
161 | 265 | size_t size_to_write = (size < bytes_available) ? size : bytes_available;
|
162 |
| - size_t size_written = size_to_write; |
163 |
| - if(_end >= _begin && size_to_write > (size_t) (_bufend - _end)) { |
164 |
| - size_t top_size = _bufend - _end; |
165 |
| - memcpy(_end, src, top_size); |
166 |
| - _end = _buf; |
167 |
| - size_to_write -= top_size; |
168 |
| - src += top_size; |
| 266 | + if(xRingbufferSend(_buf, (void*)src, size_to_write, 0) != pdTRUE){ |
| 267 | + CBUF_MUTEX_UNLOCK(); |
| 268 | + log_e("failed to write to ring buffer"); |
| 269 | + return 0; |
169 | 270 | }
|
170 |
| - memcpy(_end, src, size_to_write); |
171 |
| - _end = wrap_if_bufend(_end + size_to_write); |
172 |
| - return size_written; |
| 271 | + CBUF_MUTEX_UNLOCK(); |
| 272 | + return size_to_write; |
173 | 273 | }
|
174 | 274 |
|
175 | 275 | void cbuf::flush()
|
176 | 276 | {
|
177 |
| - _begin = _buf; |
178 |
| - _end = _buf; |
| 277 | + read(NULL, available()); |
179 | 278 | }
|
180 | 279 |
|
181 | 280 | size_t cbuf::remove(size_t size)
|
182 | 281 | {
|
| 282 | + CBUF_MUTEX_LOCK(); |
183 | 283 | size_t bytes_available = available();
|
184 |
| - if(size >= bytes_available) { |
185 |
| - flush(); |
186 |
| - return 0; |
187 |
| - } |
188 |
| - size_t size_to_remove = (size < bytes_available) ? size : bytes_available; |
189 |
| - if(_end < _begin && size_to_remove > (size_t) (_bufend - _begin)) { |
190 |
| - size_t top_size = _bufend - _begin; |
191 |
| - _begin = _buf; |
192 |
| - size_to_remove -= top_size; |
| 284 | + if(bytes_available && size){ |
| 285 | + size_t size_to_remove = (size < bytes_available) ? size : bytes_available; |
| 286 | + bytes_available -= read(NULL, size_to_remove); |
193 | 287 | }
|
194 |
| - _begin = wrap_if_bufend(_begin + size_to_remove); |
195 |
| - return available(); |
| 288 | + CBUF_MUTEX_UNLOCK(); |
| 289 | + return bytes_available; |
196 | 290 | }
|
0 commit comments