Skip to content

Remove usage of register keyword in Extension Modules #26264

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
May 5, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.25.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,7 @@ Other
^^^^^

- Removed unused C functions from vendored UltraJSON implementation (:issue:`26198`)
- Removed usage of ``register`` keyword in C extension modules (:issue:`26263`)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not necessary as the introduction was in 0.25 (u can add this issue number to the other note if u want)



.. _whatsnew_0.250.contributors:
Expand Down
48 changes: 24 additions & 24 deletions pandas/_libs/src/parser/tokenizer.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ void coliter_setup(coliter_t *self, parser_t *parser, int i, int start) {
self->line_start = parser->line_start + start;
}

coliter_t *coliter_new(register parser_t *self, int i) {
coliter_t *coliter_new(parser_t *self, int i) {
// column i, starting at 0
coliter_t *iter = (coliter_t *)malloc(sizeof(coliter_t));

Expand Down Expand Up @@ -97,7 +97,7 @@ static void *grow_buffer(void *buffer, int64_t length, int64_t *capacity,
return newbuffer;
}

void parser_set_default_options(register parser_t *self) {
void parser_set_default_options(parser_t *self) {
self->decimal = '.';
self->sci = 'E';

Expand Down Expand Up @@ -131,11 +131,11 @@ void parser_set_default_options(register parser_t *self) {
self->skip_footer = 0;
}

int get_parser_memory_footprint(register parser_t *self) { return 0; }
int get_parser_memory_footprint(parser_t *self) { return 0; }

parser_t *parser_new() { return (parser_t *)calloc(1, sizeof(parser_t)); }

int parser_clear_data_buffers(register parser_t *self) {
int parser_clear_data_buffers(parser_t *self) {
free_if_not_null((void *)&self->stream);
free_if_not_null((void *)&self->words);
free_if_not_null((void *)&self->word_starts);
Expand All @@ -144,7 +144,7 @@ int parser_clear_data_buffers(register parser_t *self) {
return 0;
}

int parser_cleanup(register parser_t *self) {
int parser_cleanup(parser_t *self) {
int status = 0;

// XXX where to put this
Expand All @@ -170,7 +170,7 @@ int parser_cleanup(register parser_t *self) {
return status;
}

int parser_init(register parser_t *self) {
int parser_init(parser_t *self) {
int64_t sz;

/*
Expand Down Expand Up @@ -240,16 +240,16 @@ int parser_init(register parser_t *self) {
return 0;
}

void parser_free(register parser_t *self) {
void parser_free(parser_t *self) {
// opposite of parser_init
parser_cleanup(self);
}

void parser_del(register parser_t *self) {
void parser_del(parser_t *self) {
free(self);
}

static int make_stream_space(register parser_t *self, size_t nbytes) {
static int make_stream_space(parser_t *self, size_t nbytes) {
int64_t i, cap, length;
int status;
void *orig_ptr, *newptr;
Expand Down Expand Up @@ -363,7 +363,7 @@ static int make_stream_space(register parser_t *self, size_t nbytes) {
return 0;
}

static int push_char(register parser_t *self, char c) {
static int push_char(parser_t *self, char c) {
TRACE(("push_char: self->stream[%zu] = %x, stream_cap=%zu\n",
self->stream_len + 1, c, self->stream_cap))
if (self->stream_len >= self->stream_cap) {
Expand All @@ -381,7 +381,7 @@ static int push_char(register parser_t *self, char c) {
return 0;
}

int PANDAS_INLINE end_field(register parser_t *self) {
int PANDAS_INLINE end_field(parser_t *self) {
// XXX cruft
if (self->words_len >= self->words_cap) {
TRACE(
Expand Down Expand Up @@ -419,7 +419,7 @@ int PANDAS_INLINE end_field(register parser_t *self) {
return 0;
}

static void append_warning(register parser_t *self, const char *msg) {
static void append_warning(parser_t *self, const char *msg) {
int64_t ex_length;
int64_t length = strlen(msg);
void *newptr;
Expand All @@ -437,7 +437,7 @@ static void append_warning(register parser_t *self, const char *msg) {
}
}

static int end_line(register parser_t *self) {
static int end_line(parser_t *self) {
char *msg;
int64_t fields;
int ex_fields = self->expected_fields;
Expand Down Expand Up @@ -556,7 +556,7 @@ static int end_line(register parser_t *self) {
return 0;
}

int parser_add_skiprow(register parser_t *self, int64_t row) {
int parser_add_skiprow(parser_t *self, int64_t row) {
khiter_t k;
kh_int64_t *set;
int ret = 0;
Expand All @@ -573,7 +573,7 @@ int parser_add_skiprow(register parser_t *self, int64_t row) {
return 0;
}

int parser_set_skipfirstnrows(register parser_t *self, int64_t nrows) {
int parser_set_skipfirstnrows(parser_t *self, int64_t nrows) {
// self->file_lines is zero based so subtract 1 from nrows
if (nrows > 0) {
self->skip_first_N_rows = nrows - 1;
Expand All @@ -582,7 +582,7 @@ int parser_set_skipfirstnrows(register parser_t *self, int64_t nrows) {
return 0;
}

static int parser_buffer_bytes(register parser_t *self, size_t nbytes) {
static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
int status;
size_t bytes_read;

Expand Down Expand Up @@ -708,7 +708,7 @@ static int parser_buffer_bytes(register parser_t *self, size_t nbytes) {
self->datapos += 3; \
}

int skip_this_line(register parser_t *self, int64_t rownum) {
int skip_this_line(parser_t *self, int64_t rownum) {
int should_skip;
PyObject *result;
PyGILState_STATE state;
Expand Down Expand Up @@ -737,7 +737,7 @@ int skip_this_line(register parser_t *self, int64_t rownum) {
}
}

int tokenize_bytes(register parser_t *self,
int tokenize_bytes(parser_t *self,
size_t line_limit, int64_t start_lines) {
int64_t i, slen;
int should_skip;
Expand Down Expand Up @@ -1159,7 +1159,7 @@ int tokenize_bytes(register parser_t *self,
return 0;
}

static int parser_handle_eof(register parser_t *self) {
static int parser_handle_eof(parser_t *self) {
int64_t bufsize = 100;

TRACE(
Expand Down Expand Up @@ -1204,7 +1204,7 @@ static int parser_handle_eof(register parser_t *self) {
return 0;
}

int parser_consume_rows(register parser_t *self, size_t nrows) {
int parser_consume_rows(parser_t *self, size_t nrows) {
int64_t i, offset, word_deletions, char_count;

if (nrows > self->lines) {
Expand Down Expand Up @@ -1260,7 +1260,7 @@ static size_t _next_pow2(size_t sz) {
return result;
}

int parser_trim_buffers(register parser_t *self) {
int parser_trim_buffers(parser_t *self) {
/*
Free memory
*/
Expand Down Expand Up @@ -1363,7 +1363,7 @@ int parser_trim_buffers(register parser_t *self) {
all : tokenize all the data vs. certain number of rows
*/

int _tokenize_helper(register parser_t *self, size_t nrows, int all) {
int _tokenize_helper(parser_t *self, size_t nrows, int all) {
int status = 0;
int64_t start_lines = self->lines;

Expand Down Expand Up @@ -1412,12 +1412,12 @@ int _tokenize_helper(register parser_t *self, size_t nrows, int all) {
return status;
}

int tokenize_nrows(register parser_t *self, size_t nrows) {
int tokenize_nrows(parser_t *self, size_t nrows) {
int status = _tokenize_helper(self, nrows, 0);
return status;
}

int tokenize_all_rows(register parser_t *self) {
int tokenize_all_rows(parser_t *self) {
int status = _tokenize_helper(self, -1, 1);
return status;
}
Expand Down
22 changes: 11 additions & 11 deletions pandas/_libs/src/parser/tokenizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ typedef struct coliter_t {
} coliter_t;

void coliter_setup(coliter_t *self, parser_t *parser, int i, int start);
coliter_t *coliter_new(register parser_t *self, int i);
coliter_t *coliter_new(parser_t *self, int i);

#define COLITER_NEXT(iter, word) \
do { \
Expand All @@ -222,25 +222,25 @@ coliter_t *coliter_new(register parser_t *self, int i);

parser_t *parser_new(void);

int parser_init(register parser_t *self);
int parser_init(parser_t *self);

int parser_consume_rows(register parser_t *self, size_t nrows);
int parser_consume_rows(parser_t *self, size_t nrows);

int parser_trim_buffers(register parser_t *self);
int parser_trim_buffers(parser_t *self);

int parser_add_skiprow(register parser_t *self, int64_t row);
int parser_add_skiprow(parser_t *self, int64_t row);

int parser_set_skipfirstnrows(register parser_t *self, int64_t nrows);
int parser_set_skipfirstnrows(parser_t *self, int64_t nrows);

void parser_free(register parser_t *self);
void parser_free(parser_t *self);

void parser_del(register parser_t *self);
void parser_del(parser_t *self);

void parser_set_default_options(register parser_t *self);
void parser_set_default_options(parser_t *self);

int tokenize_nrows(register parser_t *self, size_t nrows);
int tokenize_nrows(parser_t *self, size_t nrows);

int tokenize_all_rows(register parser_t *self);
int tokenize_all_rows(parser_t *self);

// Have parsed / type-converted a chunk of data
// and want to free memory from the token stream
Expand Down