diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/addr2line.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/addr2line.h new file mode 100644 index 0000000..d99f010 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/addr2line.h @@ -0,0 +1,21 @@ +/********************************************************************** + + addr2line.h - + + $Author$ + + Copyright (C) 2010 Shinichiro Hamaji + +**********************************************************************/ + +#ifndef RUBY_ADDR2LINE_H +#define RUBY_ADDR2LINE_H + +#ifdef USE_ELF + +void +rb_dump_backtrace_with_lines(int num_traces, void **traces); + +#endif /* USE_ELF */ + +#endif /* RUBY_ADDR2LINE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/build_assert/build_assert.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/build_assert/build_assert.h new file mode 100644 index 0000000..a04d1d4 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/build_assert/build_assert.h @@ -0,0 +1,40 @@ +/* CC0 (Public domain) - see ccan/licenses/CC0 file for details */ +#ifndef CCAN_BUILD_ASSERT_H +#define CCAN_BUILD_ASSERT_H + +/** + * BUILD_ASSERT - assert a build-time dependency. + * @cond: the compile-time condition which must be true. + * + * Your compile will fail if the condition isn't true, or can't be evaluated + * by the compiler. This can only be used within a function. + * + * Example: + * #include + * ... + * static char *foo_to_char(struct foo *foo) + * { + * // This code needs string to be at start of foo. + * BUILD_ASSERT(offsetof(struct foo, string) == 0); + * return (char *)foo; + * } + */ +#define BUILD_ASSERT(cond) \ + do { (void) sizeof(char [1 - 2*!(cond)]); } while(0) + +/** + * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression. + * @cond: the compile-time condition which must be true. + * + * Your compile will fail if the condition isn't true, or can't be evaluated + * by the compiler. This can be used in an expression: its value is "0". + * + * Example: + * #define foo_to_char(foo) \ + * ((char *)(foo) \ + * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0)) + */ +#define BUILD_ASSERT_OR_ZERO(cond) \ + (sizeof(char [1 - 2*!(cond)]) - 1) + +#endif /* CCAN_BUILD_ASSERT_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/check_type/check_type.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/check_type/check_type.h new file mode 100644 index 0000000..1f77a53 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/check_type/check_type.h @@ -0,0 +1,63 @@ +/* CC0 (Public domain) - see ccan/licenses/CC0 file for details */ +#ifndef CCAN_CHECK_TYPE_H +#define CCAN_CHECK_TYPE_H + +/** + * check_type - issue a warning or build failure if type is not correct. + * @expr: the expression whose type we should check (not evaluated). + * @type: the exact type we expect the expression to be. + * + * This macro is usually used within other macros to try to ensure that a macro + * argument is of the expected type. No type promotion of the expression is + * done: an unsigned int is not the same as an int! + * + * check_type() always evaluates to 0. + * + * If your compiler does not support typeof, then the best we can do is fail + * to compile if the sizes of the types are unequal (a less complete check). + * + * Example: + * // They should always pass a 64-bit value to _set_some_value! + * #define set_some_value(expr) \ + * _set_some_value((check_type((expr), uint64_t), (expr))) + */ + +/** + * check_types_match - issue a warning or build failure if types are not same. + * @expr1: the first expression (not evaluated). + * @expr2: the second expression (not evaluated). + * + * This macro is usually used within other macros to try to ensure that + * arguments are of identical types. No type promotion of the expressions is + * done: an unsigned int is not the same as an int! + * + * check_types_match() always evaluates to 0. + * + * If your compiler does not support typeof, then the best we can do is fail + * to compile if the sizes of the types are unequal (a less complete check). + * + * Example: + * // Do subtraction to get to enclosing type, but make sure that + * // pointer is of correct type for that member. + * #define container_of(mbr_ptr, encl_type, mbr) \ + * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \ + * ((encl_type *) \ + * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr)))) + */ +#if HAVE_TYPEOF +#define check_type(expr, type) \ + ((typeof(expr) *)0 != (type *)0) + +#define check_types_match(expr1, expr2) \ + ((typeof(expr1) *)0 != (typeof(expr2) *)0) +#else +#include "ccan/build_assert/build_assert.h" +/* Without typeof, we can only test the sizes. */ +#define check_type(expr, type) \ + BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type)) + +#define check_types_match(expr1, expr2) \ + BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2)) +#endif /* HAVE_TYPEOF */ + +#endif /* CCAN_CHECK_TYPE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/container_of/container_of.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/container_of/container_of.h new file mode 100644 index 0000000..ae3e1fc --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/container_of/container_of.h @@ -0,0 +1,142 @@ +/* CC0 (Public domain) - see ccan/licenses/CC0 file for details */ +#ifndef CCAN_CONTAINER_OF_H +#define CCAN_CONTAINER_OF_H +#include "ccan/check_type/check_type.h" + +/** + * container_of - get pointer to enclosing structure + * @member_ptr: pointer to the structure member + * @containing_type: the type this member is within + * @member: the name of this member within the structure. + * + * Given a pointer to a member of a structure, this macro does pointer + * subtraction to return the pointer to the enclosing type. + * + * Example: + * struct foo { + * int fielda, fieldb; + * // ... + * }; + * struct info { + * int some_other_field; + * struct foo my_foo; + * }; + * + * static struct info *foo_to_info(struct foo *foo) + * { + * return container_of(foo, struct info, my_foo); + * } + */ +#define container_of(member_ptr, containing_type, member) \ + ((containing_type *) \ + ((char *)(member_ptr) \ + - container_off(containing_type, member)) \ + + check_types_match(*(member_ptr), ((containing_type *)0)->member)) + + +/** + * container_of_or_null - get pointer to enclosing structure, or NULL + * @member_ptr: pointer to the structure member + * @containing_type: the type this member is within + * @member: the name of this member within the structure. + * + * Given a pointer to a member of a structure, this macro does pointer + * subtraction to return the pointer to the enclosing type, unless it + * is given NULL, in which case it also returns NULL. + * + * Example: + * struct foo { + * int fielda, fieldb; + * // ... + * }; + * struct info { + * int some_other_field; + * struct foo my_foo; + * }; + * + * static struct info *foo_to_info_allowing_null(struct foo *foo) + * { + * return container_of_or_null(foo, struct info, my_foo); + * } + */ +static inline char *container_of_or_null_(void *member_ptr, size_t offset) +{ + return member_ptr ? (char *)member_ptr - offset : NULL; +} +#define container_of_or_null(member_ptr, containing_type, member) \ + ((containing_type *) \ + container_of_or_null_(member_ptr, \ + container_off(containing_type, member)) \ + + check_types_match(*(member_ptr), ((containing_type *)0)->member)) + +/** + * container_off - get offset to enclosing structure + * @containing_type: the type this member is within + * @member: the name of this member within the structure. + * + * Given a pointer to a member of a structure, this macro does + * typechecking and figures out the offset to the enclosing type. + * + * Example: + * struct foo { + * int fielda, fieldb; + * // ... + * }; + * struct info { + * int some_other_field; + * struct foo my_foo; + * }; + * + * static struct info *foo_to_info(struct foo *foo) + * { + * size_t off = container_off(struct info, my_foo); + * return (void *)((char *)foo - off); + * } + */ +#define container_off(containing_type, member) \ + offsetof(containing_type, member) + +/** + * container_of_var - get pointer to enclosing structure using a variable + * @member_ptr: pointer to the structure member + * @container_var: a pointer of same type as this member's container + * @member: the name of this member within the structure. + * + * Given a pointer to a member of a structure, this macro does pointer + * subtraction to return the pointer to the enclosing type. + * + * Example: + * static struct info *foo_to_i(struct foo *foo) + * { + * struct info *i = container_of_var(foo, i, my_foo); + * return i; + * } + */ +#if HAVE_TYPEOF +#define container_of_var(member_ptr, container_var, member) \ + container_of(member_ptr, typeof(*container_var), member) +#else +#define container_of_var(member_ptr, container_var, member) \ + ((void *)((char *)(member_ptr) - \ + container_off_var(container_var, member))) +#endif + +/** + * container_off_var - get offset of a field in enclosing structure + * @container_var: a pointer to a container structure + * @member: the name of a member within the structure. + * + * Given (any) pointer to a structure and a its member name, this + * macro does pointer subtraction to return offset of member in a + * structure memory layout. + * + */ +#if HAVE_TYPEOF +#define container_off_var(var, member) \ + container_off(typeof(*var), member) +#else +#define container_off_var(var, member) \ + ((const char *)&(var)->member - (const char *)(var)) +#endif + +#endif /* CCAN_CONTAINER_OF_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/list/list.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/list/list.h new file mode 100644 index 0000000..ca9f9f1 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/list/list.h @@ -0,0 +1,773 @@ +/* Licensed under BSD-MIT - see ccan/licenses/BSD-MIT file for details */ +#ifndef CCAN_LIST_H +#define CCAN_LIST_H +#include +#include "ccan/str/str.h" +#include "ccan/container_of/container_of.h" +#include "ccan/check_type/check_type.h" + +/** + * struct list_node - an entry in a doubly-linked list + * @next: next entry (self if empty) + * @prev: previous entry (self if empty) + * + * This is used as an entry in a linked list. + * Example: + * struct child { + * const char *name; + * // Linked list of all us children. + * struct list_node list; + * }; + */ +struct list_node +{ + struct list_node *next, *prev; +}; + +/** + * struct list_head - the head of a doubly-linked list + * @h: the list_head (containing next and prev pointers) + * + * This is used as the head of a linked list. + * Example: + * struct parent { + * const char *name; + * struct list_head children; + * unsigned int num_children; + * }; + */ +struct list_head +{ + struct list_node n; +}; + +#define LIST_LOC __FILE__ ":" stringify(__LINE__) +#define list_debug(h, loc) ((void)loc, h) +#define list_debug_node(n, loc) ((void)loc, n) + +/** + * LIST_HEAD_INIT - initializer for an empty list_head + * @name: the name of the list. + * + * Explicit initializer for an empty list. + * + * See also: + * LIST_HEAD, list_head_init() + * + * Example: + * static struct list_head my_list = LIST_HEAD_INIT(my_list); + */ +#define LIST_HEAD_INIT(name) { { &name.n, &name.n } } + +/** + * LIST_HEAD - define and initialize an empty list_head + * @name: the name of the list. + * + * The LIST_HEAD macro defines a list_head and initializes it to an empty + * list. It can be prepended by "static" to define a static list_head. + * + * See also: + * LIST_HEAD_INIT, list_head_init() + * + * Example: + * static LIST_HEAD(my_global_list); + */ +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +/** + * list_head_init - initialize a list_head + * @h: the list_head to set to the empty list + * + * Example: + * ... + * struct parent *parent = malloc(sizeof(*parent)); + * + * list_head_init(&parent->children); + * parent->num_children = 0; + */ +static inline void list_head_init(struct list_head *h) +{ + h->n.next = h->n.prev = &h->n; +} + +/** + * list_node_init - initialize a list_node + * @n: the list_node to link to itself. + * + * You don't need to use this normally! But it lets you list_del(@n) + * safely. + */ +static inline void list_node_init(struct list_node *n) +{ + n->next = n->prev = n; +} + +/** + * list_add_after - add an entry after an existing node in a linked list + * @h: the list_head to add the node to (for debugging) + * @p: the existing list_node to add the node after + * @n: the new list_node to add to the list. + * + * The existing list_node must already be a member of the list. + * The new list_node does not need to be initialized; it will be overwritten. + * + * Example: + * struct child c1, c2, c3; + * LIST_HEAD(h); + * + * list_add_tail(&h, &c1.list); + * list_add_tail(&h, &c3.list); + * list_add_after(&h, &c1.list, &c2.list); + */ +#define list_add_after(h, p, n) list_add_after_(h, p, n, LIST_LOC) +static inline void list_add_after_(struct list_head *h, + struct list_node *p, + struct list_node *n, + const char *abortstr) +{ + n->next = p->next; + n->prev = p; + p->next->prev = n; + p->next = n; + (void)list_debug(h, abortstr); +} + +/** + * list_add - add an entry at the start of a linked list. + * @h: the list_head to add the node to + * @n: the list_node to add to the list. + * + * The list_node does not need to be initialized; it will be overwritten. + * Example: + * struct child *child = malloc(sizeof(*child)); + * + * child->name = "marvin"; + * list_add(&parent->children, &child->list); + * parent->num_children++; + */ +#define list_add(h, n) list_add_(h, n, LIST_LOC) +static inline void list_add_(struct list_head *h, + struct list_node *n, + const char *abortstr) +{ + list_add_after_(h, &h->n, n, abortstr); +} + +/** + * list_add_before - add an entry before an existing node in a linked list + * @h: the list_head to add the node to (for debugging) + * @p: the existing list_node to add the node before + * @n: the new list_node to add to the list. + * + * The existing list_node must already be a member of the list. + * The new list_node does not need to be initialized; it will be overwritten. + * + * Example: + * list_head_init(&h); + * list_add_tail(&h, &c1.list); + * list_add_tail(&h, &c3.list); + * list_add_before(&h, &c3.list, &c2.list); + */ +#define list_add_before(h, p, n) list_add_before_(h, p, n, LIST_LOC) +static inline void list_add_before_(struct list_head *h, + struct list_node *p, + struct list_node *n, + const char *abortstr) +{ + n->next = p; + n->prev = p->prev; + p->prev->next = n; + p->prev = n; + (void)list_debug(h, abortstr); +} + +/** + * list_add_tail - add an entry at the end of a linked list. + * @h: the list_head to add the node to + * @n: the list_node to add to the list. + * + * The list_node does not need to be initialized; it will be overwritten. + * Example: + * list_add_tail(&parent->children, &child->list); + * parent->num_children++; + */ +#define list_add_tail(h, n) list_add_tail_(h, n, LIST_LOC) +static inline void list_add_tail_(struct list_head *h, + struct list_node *n, + const char *abortstr) +{ + list_add_before_(h, &h->n, n, abortstr); +} + +/** + * list_empty - is a list empty? + * @h: the list_head + * + * If the list is empty, returns true. + * + * Example: + * assert(list_empty(&parent->children) == (parent->num_children == 0)); + */ +#define list_empty(h) list_empty_(h, LIST_LOC) +static inline int list_empty_(const struct list_head *h, const char* abortstr) +{ + (void)list_debug(h, abortstr); + return h->n.next == &h->n; +} + +/** + * list_empty_nodebug - is a list empty (and don't perform debug checks)? + * @h: the list_head + * + * If the list is empty, returns true. + * This differs from list_empty() in that if CCAN_LIST_DEBUG is set it + * will NOT perform debug checks. Only use this function if you REALLY + * know what you're doing. + * + * Example: + * assert(list_empty_nodebug(&parent->children) == (parent->num_children == 0)); + */ +#ifndef CCAN_LIST_DEBUG +#define list_empty_nodebug(h) list_empty(h) +#else +static inline int list_empty_nodebug(const struct list_head *h) +{ + return h->n.next == &h->n; +} +#endif + +/** + * list_del - delete an entry from an (unknown) linked list. + * @n: the list_node to delete from the list. + * + * Note that this leaves @n in an undefined state; it can be added to + * another list, but not deleted again. + * + * See also: + * list_del_from(), list_del_init() + * + * Example: + * list_del(&child->list); + * parent->num_children--; + */ +#define list_del(n) list_del_(n, LIST_LOC) +static inline void list_del_(struct list_node *n, const char* abortstr) +{ + (void)list_debug_node(n, abortstr); + n->next->prev = n->prev; + n->prev->next = n->next; +#ifdef CCAN_LIST_DEBUG + /* Catch use-after-del. */ + n->next = n->prev = NULL; +#endif +} + +/** + * list_del_init - delete a node, and reset it so it can be deleted again. + * @n: the list_node to be deleted. + * + * list_del(@n) or list_del_init() again after this will be safe, + * which can be useful in some cases. + * + * See also: + * list_del_from(), list_del() + * + * Example: + * list_del_init(&child->list); + * parent->num_children--; + */ +#define list_del_init(n) list_del_init_(n, LIST_LOC) +static inline void list_del_init_(struct list_node *n, const char *abortstr) +{ + list_del_(n, abortstr); + list_node_init(n); +} + +/** + * list_del_from - delete an entry from a known linked list. + * @h: the list_head the node is in. + * @n: the list_node to delete from the list. + * + * This explicitly indicates which list a node is expected to be in, + * which is better documentation and can catch more bugs. + * + * See also: list_del() + * + * Example: + * list_del_from(&parent->children, &child->list); + * parent->num_children--; + */ +static inline void list_del_from(struct list_head *h, struct list_node *n) +{ +#ifdef CCAN_LIST_DEBUG + { + /* Thorough check: make sure it was in list! */ + struct list_node *i; + for (i = h->n.next; i != n; i = i->next) + assert(i != &h->n); + } +#endif /* CCAN_LIST_DEBUG */ + + /* Quick test that catches a surprising number of bugs. */ + assert(!list_empty(h)); + list_del(n); +} + +/** + * list_swap - swap out an entry from an (unknown) linked list for a new one. + * @o: the list_node to replace from the list. + * @n: the list_node to insert in place of the old one. + * + * Note that this leaves @o in an undefined state; it can be added to + * another list, but not deleted/swapped again. + * + * See also: + * list_del() + * + * Example: + * struct child x1, x2; + * LIST_HEAD(xh); + * + * list_add(&xh, &x1.list); + * list_swap(&x1.list, &x2.list); + */ +#define list_swap(o, n) list_swap_(o, n, LIST_LOC) +static inline void list_swap_(struct list_node *o, + struct list_node *n, + const char* abortstr) +{ + (void)list_debug_node(o, abortstr); + *n = *o; + n->next->prev = n; + n->prev->next = n; +#ifdef CCAN_LIST_DEBUG + /* Catch use-after-del. */ + o->next = o->prev = NULL; +#endif +} + +/** + * list_entry - convert a list_node back into the structure containing it. + * @n: the list_node + * @type: the type of the entry + * @member: the list_node member of the type + * + * Example: + * // First list entry is children.next; convert back to child. + * child = list_entry(parent->children.n.next, struct child, list); + * + * See Also: + * list_top(), list_for_each() + */ +#define list_entry(n, type, member) container_of(n, type, member) + +/** + * list_top - get the first entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *first; + * first = list_top(&parent->children, struct child, list); + * if (!first) + * printf("Empty list!\n"); + */ +#define list_top(h, type, member) \ + ((type *)list_top_((h), list_off_(type, member))) + +static inline const void *list_top_(const struct list_head *h, size_t off) +{ + if (list_empty(h)) + return NULL; + return (const char *)h->n.next - off; +} + +/** + * list_pop - remove the first entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *one; + * one = list_pop(&parent->children, struct child, list); + * if (!one) + * printf("Empty list!\n"); + */ +#define list_pop(h, type, member) \ + ((type *)list_pop_((h), list_off_(type, member))) + +static inline const void *list_pop_(const struct list_head *h, size_t off) +{ + struct list_node *n; + + if (list_empty(h)) + return NULL; + n = h->n.next; + list_del(n); + return (const char *)n - off; +} + +/** + * list_tail - get the last entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *last; + * last = list_tail(&parent->children, struct child, list); + * if (!last) + * printf("Empty list!\n"); + */ +#define list_tail(h, type, member) \ + ((type *)list_tail_((h), list_off_(type, member))) + +static inline const void *list_tail_(const struct list_head *h, size_t off) +{ + if (list_empty(h)) + return NULL; + return (const char *)h->n.prev - off; +} + +/** + * list_for_each - iterate through a list. + * @h: the list_head (warning: evaluated multiple times!) + * @i: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. + * + * Example: + * list_for_each(&parent->children, child, list) + * printf("Name: %s\n", child->name); + */ +#define list_for_each(h, i, member) \ + list_for_each_off(h, i, list_off_var_(i, member)) + +/** + * list_for_each_rev - iterate through a list backwards. + * @h: the list_head + * @i: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. + * + * Example: + * list_for_each_rev(&parent->children, child, list) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_rev(h, i, member) \ + list_for_each_rev_off(h, i, list_off_var_(i, member)) + +/** + * list_for_each_rev_safe - iterate through a list backwards, + * maybe during deletion + * @h: the list_head + * @i: the structure containing the list_node + * @nxt: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list backwards. + * It's a for loop, so you can break and continue as normal. The extra + * variable * @nxt is used to hold the next element, so you can delete @i + * from the list. + * + * Example: + * struct child *next; + * list_for_each_rev_safe(&parent->children, child, next, list) { + * printf("Name: %s\n", child->name); + * } + */ +#define list_for_each_rev_safe(h, i, nxt, member) \ + list_for_each_rev_safe_off(h, i, nxt, list_off_var_(i, member)) + +/** + * list_for_each_safe - iterate through a list, maybe during deletion + * @h: the list_head + * @i: the structure containing the list_node + * @nxt: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. The extra variable + * @nxt is used to hold the next element, so you can delete @i from the list. + * + * Example: + * list_for_each_safe(&parent->children, child, next, list) { + * list_del(&child->list); + * parent->num_children--; + * } + */ +#define list_for_each_safe(h, i, nxt, member) \ + list_for_each_safe_off(h, i, nxt, list_off_var_(i, member)) + +/** + * list_next - get the next entry in a list + * @h: the list_head + * @i: a pointer to an entry in the list. + * @member: the list_node member of the structure + * + * If @i was the last entry in the list, returns NULL. + * + * Example: + * struct child *second; + * second = list_next(&parent->children, first, list); + * if (!second) + * printf("No second child!\n"); + */ +#define list_next(h, i, member) \ + ((list_typeof(i))list_entry_or_null(list_debug(h, \ + __FILE__ ":" stringify(__LINE__)), \ + (i)->member.next, \ + list_off_var_((i), member))) + +/** + * list_prev - get the previous entry in a list + * @h: the list_head + * @i: a pointer to an entry in the list. + * @member: the list_node member of the structure + * + * If @i was the first entry in the list, returns NULL. + * + * Example: + * first = list_prev(&parent->children, second, list); + * if (!first) + * printf("Can't go back to first child?!\n"); + */ +#define list_prev(h, i, member) \ + ((list_typeof(i))list_entry_or_null(list_debug(h, \ + __FILE__ ":" stringify(__LINE__)), \ + (i)->member.prev, \ + list_off_var_((i), member))) + +/** + * list_append_list - empty one list onto the end of another. + * @to: the list to append into + * @from: the list to empty. + * + * This takes the entire contents of @from and moves it to the end of + * @to. After this @from will be empty. + * + * Example: + * struct list_head adopter; + * + * list_append_list(&adopter, &parent->children); + * assert(list_empty(&parent->children)); + * parent->num_children = 0; + */ +#define list_append_list(t, f) list_append_list_(t, f, \ + __FILE__ ":" stringify(__LINE__)) +static inline void list_append_list_(struct list_head *to, + struct list_head *from, + const char *abortstr) +{ + struct list_node *from_tail = list_debug(from, abortstr)->n.prev; + struct list_node *to_tail = list_debug(to, abortstr)->n.prev; + + /* Sew in head and entire list. */ + to->n.prev = from_tail; + from_tail->next = &to->n; + to_tail->next = &from->n; + from->n.prev = to_tail; + + /* Now remove head. */ + list_del(&from->n); + list_head_init(from); +} + +/** + * list_prepend_list - empty one list into the start of another. + * @to: the list to prepend into + * @from: the list to empty. + * + * This takes the entire contents of @from and moves it to the start + * of @to. After this @from will be empty. + * + * Example: + * list_prepend_list(&adopter, &parent->children); + * assert(list_empty(&parent->children)); + * parent->num_children = 0; + */ +#define list_prepend_list(t, f) list_prepend_list_(t, f, LIST_LOC) +static inline void list_prepend_list_(struct list_head *to, + struct list_head *from, + const char *abortstr) +{ + struct list_node *from_tail = list_debug(from, abortstr)->n.prev; + struct list_node *to_head = list_debug(to, abortstr)->n.next; + + /* Sew in head and entire list. */ + to->n.next = &from->n; + from->n.prev = &to->n; + to_head->prev = from_tail; + from_tail->next = to_head; + + /* Now remove head. */ + list_del(&from->n); + list_head_init(from); +} + +/* internal macros, do not use directly */ +#define list_for_each_off_dir_(h, i, off, dir) \ + for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \ + (off)); \ + list_node_from_off_((void *)i, (off)) != &(h)->n; \ + i = list_node_to_off_(list_node_from_off_((void *)i, (off))->dir, \ + (off))) + +#define list_for_each_safe_off_dir_(h, i, nxt, off, dir) \ + for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \ + (off)), \ + nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \ + (off)); \ + list_node_from_off_(i, (off)) != &(h)->n; \ + i = nxt, \ + nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \ + (off))) + +/** + * list_for_each_off - iterate through a list of memory regions. + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @off: offset(relative to @i) at which list node data resides. + * + * This is a low-level wrapper to iterate @i over the entire list, used to + * implement all oher, more high-level, for-each constructs. It's a for loop, + * so you can break and continue as normal. + * + * WARNING! Being the low-level macro that it is, this wrapper doesn't know + * nor care about the type of @i. The only assumtion made is that @i points + * to a chunk of memory that at some @offset, relative to @i, contains a + * properly filled `struct node_list' which in turn contains pointers to + * memory chunks and it's turtles all the way down. Whith all that in mind + * remember that given the wrong pointer/offset couple this macro will + * happilly churn all you memory untill SEGFAULT stops it, in other words + * caveat emptor. + * + * It is worth mentioning that one of legitimate use-cases for that wrapper + * is operation on opaque types with known offset for `struct list_node' + * member(preferably 0), because it allows you not to disclose the type of + * @i. + * + * Example: + * list_for_each_off(&parent->children, child, + * offsetof(struct child, list)) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_off(h, i, off) \ + list_for_each_off_dir_((h),(i),(off),next) + +/** + * list_for_each_rev_off - iterate through a list of memory regions backwards + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @off: offset(relative to @i) at which list node data resides. + * + * See list_for_each_off for details + */ +#define list_for_each_rev_off(h, i, off) \ + list_for_each_off_dir_((h),(i),(off),prev) + +/** + * list_for_each_safe_off - iterate through a list of memory regions, maybe + * during deletion + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @nxt: the structure containing the list_node + * @off: offset(relative to @i) at which list node data resides. + * + * For details see `list_for_each_off' and `list_for_each_safe' + * descriptions. + * + * Example: + * list_for_each_safe_off(&parent->children, child, + * next, offsetof(struct child, list)) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_safe_off(h, i, nxt, off) \ + list_for_each_safe_off_dir_((h),(i),(nxt),(off),next) + +/** + * list_for_each_rev_safe_off - iterate backwards through a list of + * memory regions, maybe during deletion + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @nxt: the structure containing the list_node + * @off: offset(relative to @i) at which list node data resides. + * + * For details see `list_for_each_rev_off' and `list_for_each_rev_safe' + * descriptions. + * + * Example: + * list_for_each_rev_safe_off(&parent->children, child, + * next, offsetof(struct child, list)) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_rev_safe_off(h, i, nxt, off) \ + list_for_each_safe_off_dir_((h),(i),(nxt),(off),prev) + +/* Other -off variants. */ +#define list_entry_off(n, type, off) \ + ((type *)list_node_from_off_((n), (off))) + +#define list_head_off(h, type, off) \ + ((type *)list_head_off((h), (off))) + +#define list_tail_off(h, type, off) \ + ((type *)list_tail_((h), (off))) + +#define list_add_off(h, n, off) \ + list_add((h), list_node_from_off_((n), (off))) + +#define list_del_off(n, off) \ + list_del(list_node_from_off_((n), (off))) + +#define list_del_from_off(h, n, off) \ + list_del_from(h, list_node_from_off_((n), (off))) + +/* Offset helper functions so we only single-evaluate. */ +static inline void *list_node_to_off_(struct list_node *node, size_t off) +{ + return (void *)((char *)node - off); +} +static inline struct list_node *list_node_from_off_(void *ptr, size_t off) +{ + return (struct list_node *)((char *)ptr + off); +} + +/* Get the offset of the member, but make sure it's a list_node. */ +#define list_off_(type, member) \ + (container_off(type, member) + \ + check_type(((type *)0)->member, struct list_node)) + +#define list_off_var_(var, member) \ + (container_off_var(var, member) + \ + check_type(var->member, struct list_node)) + +#if HAVE_TYPEOF +#define list_typeof(var) typeof(var) +#else +#define list_typeof(var) void * +#endif + +/* Returns member, or NULL if at end of list. */ +static inline void *list_entry_or_null(const struct list_head *h, + const struct list_node *n, + size_t off) +{ + if (n == &h->n) + return NULL; + return (char *)n - off; +} +#endif /* CCAN_LIST_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/str/str.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/str/str.h new file mode 100644 index 0000000..9a9da9c --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ccan/str/str.h @@ -0,0 +1,16 @@ +/* CC0 (Public domain) - see ccan/licenses/CC0 file for details */ +#ifndef CCAN_STR_H +#define CCAN_STR_H +/** + * stringify - Turn expression into a string literal + * @expr: any C expression + * + * Example: + * #define PRINT_COND_IF_FALSE(cond) \ + * ((cond) || printf("%s is false!", stringify(cond))) + */ +#define stringify(expr) stringify_1(expr) +/* Double-indirection required to stringify expansions */ +#define stringify_1(expr) #expr + +#endif /* CCAN_STR_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/constant.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/constant.h new file mode 100644 index 0000000..fcccf07 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/constant.h @@ -0,0 +1,51 @@ +/********************************************************************** + + constant.h - + + $Author$ + created at: Sun Nov 15 00:09:33 2009 + + Copyright (C) 2009 Yusuke Endoh + +**********************************************************************/ +#ifndef CONSTANT_H +#define CONSTANT_H + +typedef enum { + CONST_DEPRECATED = 0x100, + + CONST_VISIBILITY_MASK = 0xff, + CONST_PUBLIC = 0x00, + CONST_PRIVATE, + CONST_VISIBILITY_MAX +} rb_const_flag_t; + +#define RB_CONST_PRIVATE_P(ce) \ + (((ce)->flag & CONST_VISIBILITY_MASK) == CONST_PRIVATE) +#define RB_CONST_PUBLIC_P(ce) \ + (((ce)->flag & CONST_VISIBILITY_MASK) == CONST_PUBLIC) + +#define RB_CONST_DEPRECATED_P(ce) \ + ((ce)->flag & CONST_DEPRECATED) + +typedef struct rb_const_entry_struct { + rb_const_flag_t flag; + int line; + const VALUE value; /* should be mark */ + const VALUE file; /* should be mark */ +} rb_const_entry_t; + +VALUE rb_mod_private_constant(int argc, const VALUE *argv, VALUE obj); +VALUE rb_mod_public_constant(int argc, const VALUE *argv, VALUE obj); +VALUE rb_mod_deprecate_constant(int argc, const VALUE *argv, VALUE obj); +void rb_free_const_table(struct rb_id_table *tbl); +VALUE rb_public_const_get(VALUE klass, ID id); +VALUE rb_public_const_get_at(VALUE klass, ID id); +VALUE rb_public_const_get_from(VALUE klass, ID id); +int rb_public_const_defined(VALUE klass, ID id); +int rb_public_const_defined_at(VALUE klass, ID id); +int rb_public_const_defined_from(VALUE klass, ID id); +rb_const_entry_t *rb_const_lookup(VALUE klass, ID id); +int rb_autoloading_value(VALUE mod, ID id, VALUE *value, rb_const_flag_t *flag); + +#endif /* CONSTANT_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/debug_counter.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/debug_counter.h new file mode 100644 index 0000000..f0f4e5e --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/debug_counter.h @@ -0,0 +1,109 @@ +/********************************************************************** + + debug_counter.h - + + created at: Tue Feb 21 16:51:18 2017 + + Copyright (C) 2017 Koichi Sasada + +**********************************************************************/ + +#ifndef USE_DEBUG_COUNTER +#define USE_DEBUG_COUNTER 0 +#endif + +#ifdef RB_DEBUG_COUNTER + +/* method search */ +RB_DEBUG_COUNTER(mc_inline_hit) +RB_DEBUG_COUNTER(mc_inline_miss) +RB_DEBUG_COUNTER(mc_global_hit) +RB_DEBUG_COUNTER(mc_global_miss) +RB_DEBUG_COUNTER(mc_global_state_miss) +RB_DEBUG_COUNTER(mc_class_serial_miss) +RB_DEBUG_COUNTER(mc_cme_complement) +RB_DEBUG_COUNTER(mc_cme_complement_hit) +RB_DEBUG_COUNTER(mc_search_super) + +/* ivar access */ +RB_DEBUG_COUNTER(ivar_get_ic_hit) +RB_DEBUG_COUNTER(ivar_get_ic_miss) +RB_DEBUG_COUNTER(ivar_get_ic_miss_serial) +RB_DEBUG_COUNTER(ivar_get_ic_miss_unset) +RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject) +RB_DEBUG_COUNTER(ivar_set_ic_hit) +RB_DEBUG_COUNTER(ivar_set_ic_miss) +RB_DEBUG_COUNTER(ivar_set_ic_miss_serial) +RB_DEBUG_COUNTER(ivar_set_ic_miss_unset) +RB_DEBUG_COUNTER(ivar_set_ic_miss_oorange) +RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject) +RB_DEBUG_COUNTER(ivar_get_base) +RB_DEBUG_COUNTER(ivar_set_base) + +/* lvar access */ +RB_DEBUG_COUNTER(lvar_get) +RB_DEBUG_COUNTER(lvar_get_dynamic) +RB_DEBUG_COUNTER(lvar_set) +RB_DEBUG_COUNTER(lvar_set_dynamic) +RB_DEBUG_COUNTER(lvar_set_slowpath) + +/* object counts */ +RB_DEBUG_COUNTER(obj_free) + +RB_DEBUG_COUNTER(obj_str_ptr) +RB_DEBUG_COUNTER(obj_str_embed) +RB_DEBUG_COUNTER(obj_str_shared) +RB_DEBUG_COUNTER(obj_str_nofree) +RB_DEBUG_COUNTER(obj_str_fstr) + +RB_DEBUG_COUNTER(obj_ary_ptr) +RB_DEBUG_COUNTER(obj_ary_embed) + +RB_DEBUG_COUNTER(obj_obj_ptr) +RB_DEBUG_COUNTER(obj_obj_embed) + +/* load */ +RB_DEBUG_COUNTER(load_files) +RB_DEBUG_COUNTER(load_path_is_not_realpath) + +#endif + +#ifndef RUBY_DEBUG_COUNTER_H +#define RUBY_DEBUG_COUNTER_H 1 + +#if !defined(__GNUC__) && USE_DEBUG_COUNTER +#error "USE_DEBUG_COUNTER is not supported by other than __GNUC__" +#endif + +enum rb_debug_counter_type { +#define RB_DEBUG_COUNTER(name) RB_DEBUG_COUNTER_##name, +#include "debug_counter.h" + RB_DEBUG_COUNTER_MAX +#undef RB_DEBUG_COUNTER +}; + +#if USE_DEBUG_COUNTER +#include "ruby/ruby.h" + +extern size_t rb_debug_counter[]; + +inline static int +rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond) +{ + if (cond) { + rb_debug_counter[(int)type] += add; + } + return cond; +} + +#define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1) +#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond))) +#define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, (cond)) + +#else +#define RB_DEBUG_COUNTER_INC(type) ((void)0) +#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (cond) +#define RB_DEBUG_COUNTER_INC_IF(type, cond) (cond) +#endif + +#endif /* RUBY_DEBUG_COUNTER_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/dln.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/dln.h new file mode 100644 index 0000000..25aaa7f --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/dln.h @@ -0,0 +1,51 @@ +/********************************************************************** + + dln.h - + + $Author: nobu $ + created at: Wed Jan 19 16:53:09 JST 1994 + + Copyright (C) 1993-2007 Yukihiro Matsumoto + +**********************************************************************/ + +#ifndef DLN_H +#define DLN_H + +#ifdef __cplusplus +# ifndef HAVE_PROTOTYPES +# define HAVE_PROTOTYPES 1 +# endif +# ifndef HAVE_STDARG_PROTOTYPES +# define HAVE_STDARG_PROTOTYPES 1 +# endif +#endif + +#undef _ +#ifdef HAVE_PROTOTYPES +# define _(args) args +#else +# define _(args) () +#endif + +RUBY_SYMBOL_EXPORT_BEGIN + +#ifndef DLN_FIND_EXTRA_ARG +#define DLN_FIND_EXTRA_ARG +#endif +#ifndef DLN_FIND_EXTRA_ARG_DECL +#define DLN_FIND_EXTRA_ARG_DECL +#endif + +char *dln_find_exe_r(const char*,const char*,char*,size_t DLN_FIND_EXTRA_ARG_DECL); +char *dln_find_file_r(const char*,const char*,char*,size_t DLN_FIND_EXTRA_ARG_DECL); + +#ifdef USE_DLN_A_OUT +extern char *dln_argv0; +#endif + +void *dln_load(const char*); + +RUBY_SYMBOL_EXPORT_END + +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/encindex.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/encindex.h new file mode 100644 index 0000000..658b60a --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/encindex.h @@ -0,0 +1,69 @@ +/********************************************************************** + + encindex.h - + + $Author$ + created at: Tue Sep 15 13:21:14 JST 2015 + + Copyright (C) 2015 Yukihiro Matsumoto + +**********************************************************************/ + +#ifndef RUBY_ENCINDEX_H +#define RUBY_ENCINDEX_H 1 +#if defined(__cplusplus) +extern "C" { +#if 0 +} /* satisfy cc-mode */ +#endif +#endif + +enum ruby_preserved_encindex { + RUBY_ENCINDEX_ASCII, + RUBY_ENCINDEX_UTF_8, + RUBY_ENCINDEX_US_ASCII, + + /* preserved indexes */ + RUBY_ENCINDEX_UTF_16BE, + RUBY_ENCINDEX_UTF_16LE, + RUBY_ENCINDEX_UTF_32BE, + RUBY_ENCINDEX_UTF_32LE, + RUBY_ENCINDEX_UTF_16, + RUBY_ENCINDEX_UTF_32, + RUBY_ENCINDEX_UTF8_MAC, + + /* for old options of regexp */ + RUBY_ENCINDEX_EUC_JP, + RUBY_ENCINDEX_Windows_31J, + + RUBY_ENCINDEX_BUILTIN_MAX +}; + +#define ENCINDEX_ASCII RUBY_ENCINDEX_ASCII +#define ENCINDEX_UTF_8 RUBY_ENCINDEX_UTF_8 +#define ENCINDEX_US_ASCII RUBY_ENCINDEX_US_ASCII +#define ENCINDEX_UTF_16BE RUBY_ENCINDEX_UTF_16BE +#define ENCINDEX_UTF_16LE RUBY_ENCINDEX_UTF_16LE +#define ENCINDEX_UTF_32BE RUBY_ENCINDEX_UTF_32BE +#define ENCINDEX_UTF_32LE RUBY_ENCINDEX_UTF_32LE +#define ENCINDEX_UTF_16 RUBY_ENCINDEX_UTF_16 +#define ENCINDEX_UTF_32 RUBY_ENCINDEX_UTF_32 +#define ENCINDEX_UTF8_MAC RUBY_ENCINDEX_UTF8_MAC +#define ENCINDEX_EUC_JP RUBY_ENCINDEX_EUC_JP +#define ENCINDEX_Windows_31J RUBY_ENCINDEX_Windows_31J +#define ENCINDEX_BUILTIN_MAX RUBY_ENCINDEX_BUILTIN_MAX + +#define rb_ascii8bit_encindex() RUBY_ENCINDEX_ASCII +#define rb_utf8_encindex() RUBY_ENCINDEX_UTF_8 +#define rb_usascii_encindex() RUBY_ENCINDEX_US_ASCII + +int rb_enc_find_index2(const char *name, long len); + +#if defined(__cplusplus) +#if 0 +{ /* satisfy cc-mode */ +#endif +} /* extern "C" { */ +#endif + +#endif /* RUBY_ENCINDEX_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/eval_intern.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/eval_intern.h new file mode 100644 index 0000000..420f3d3 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/eval_intern.h @@ -0,0 +1,334 @@ +#ifndef RUBY_EVAL_INTERN_H +#define RUBY_EVAL_INTERN_H + +#include "ruby/ruby.h" +#include "vm_core.h" + +static inline void +vm_passed_block_handler_set(rb_execution_context_t *ec, VALUE block_handler) +{ + vm_block_handler_verify(block_handler); + ec->passed_block_handler = block_handler; +} + +static inline void +pass_passed_block_handler(rb_execution_context_t *ec) +{ + VALUE block_handler = rb_vm_frame_block_handler(ec->cfp); + vm_block_handler_verify(block_handler); + vm_passed_block_handler_set(ec, block_handler); + VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_PASSED); +} + +#define PASS_PASSED_BLOCK_HANDLER_EC(ec) pass_passed_block_handler(ec) +#define PASS_PASSED_BLOCK_HANDLER() pass_passed_block_handler(GET_EC()) + +#ifdef HAVE_STDLIB_H +#include +#endif +#ifndef EXIT_SUCCESS +#define EXIT_SUCCESS 0 +#endif +#ifndef EXIT_FAILURE +#define EXIT_FAILURE 1 +#endif + +#include +#include + +#ifdef __APPLE__ +# ifdef HAVE_CRT_EXTERNS_H +# include +# else +# include "missing/crt_externs.h" +# endif +#endif + +#ifndef HAVE_STRING_H +char *strrchr(const char *, const char); +#endif + +#ifdef HAVE_UNISTD_H +#include +#endif + +#ifdef HAVE_NET_SOCKET_H +#include +#endif + +#define ruby_setjmp(env) RUBY_SETJMP(env) +#define ruby_longjmp(env,val) RUBY_LONGJMP((env),(val)) +#ifdef __CYGWIN__ +# ifndef _setjmp +int _setjmp(jmp_buf); +# endif +# ifndef _longjmp +NORETURN(void _longjmp(jmp_buf, int)); +# endif +#endif + +#include +#include +#include + +#ifdef HAVE_SYS_SELECT_H +#include +#endif + +/* + Solaris sys/select.h switches select to select_large_fdset to support larger + file descriptors if FD_SETSIZE is larger than 1024 on 32bit environment. + But Ruby doesn't change FD_SETSIZE because fd_set is allocated dynamically. + So following definition is required to use select_large_fdset. +*/ +#ifdef HAVE_SELECT_LARGE_FDSET +#define select(n, r, w, e, t) select_large_fdset((n), (r), (w), (e), (t)) +extern int select_large_fdset(int, fd_set *, fd_set *, fd_set *, struct timeval *); +#endif + +#ifdef HAVE_SYS_PARAM_H +#include +#endif + +#include + +#ifdef _MSC_VER +#define SAVE_ROOT_JMPBUF_BEFORE_STMT \ + __try { +#define SAVE_ROOT_JMPBUF_AFTER_STMT \ + } \ + __except (GetExceptionCode() == EXCEPTION_STACK_OVERFLOW ? \ + (rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW), \ + raise(SIGSEGV), \ + EXCEPTION_EXECUTE_HANDLER) : \ + EXCEPTION_CONTINUE_SEARCH) { \ + /* never reaches here */ \ + } +#elif defined(__MINGW32__) +LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *); +#define SAVE_ROOT_JMPBUF_BEFORE_STMT \ + do { \ + PVOID _handler = AddVectoredExceptionHandler(1, rb_w32_stack_overflow_handler); + +#define SAVE_ROOT_JMPBUF_AFTER_STMT \ + RemoveVectoredExceptionHandler(_handler); \ + } while (0); +#else +#define SAVE_ROOT_JMPBUF_BEFORE_STMT +#define SAVE_ROOT_JMPBUF_AFTER_STMT +#endif + +#define SAVE_ROOT_JMPBUF(th, stmt) do \ + if (ruby_setjmp((th)->root_jmpbuf) == 0) { \ + SAVE_ROOT_JMPBUF_BEFORE_STMT \ + stmt; \ + SAVE_ROOT_JMPBUF_AFTER_STMT \ + } \ + else { \ + rb_fiber_start(); \ + } while (0) + +#define EC_PUSH_TAG(ec) do { \ + rb_execution_context_t * const _ec = (ec); \ + struct rb_vm_tag _tag; \ + _tag.state = TAG_NONE; \ + _tag.tag = Qundef; \ + _tag.prev = _ec->tag; + +#define EC_POP_TAG() \ + _ec->tag = _tag.prev; \ +} while (0) + +#define EC_TMPPOP_TAG() \ + _ec->tag = _tag.prev + +#define EC_REPUSH_TAG() (void)(_ec->tag = &_tag) + +#if defined __GNUC__ && __GNUC__ == 4 && (__GNUC_MINOR__ >= 6 && __GNUC_MINOR__ <= 8) || __clang__ +/* This macro prevents GCC 4.6--4.8 from emitting maybe-uninitialized warnings. + * This macro also prevents Clang from dumping core in EC_EXEC_TAG(). + * (I confirmed Clang 4.0.1 and 5.0.0.) + */ +# define VAR_FROM_MEMORY(var) __extension__(*(__typeof__(var) volatile *)&(var)) +# define VAR_INITIALIZED(var) ((var) = VAR_FROM_MEMORY(var)) +# define VAR_NOCLOBBERED(var) volatile var +#else +# define VAR_FROM_MEMORY(var) (var) +# define VAR_INITIALIZED(var) ((void)&(var)) +# define VAR_NOCLOBBERED(var) var +#endif + +#if defined(USE_UNALIGNED_MEMBER_ACCESS) && USE_UNALIGNED_MEMBER_ACCESS && \ + defined(__clang__) +# define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Waddress-of-packed-member\""); \ + typeof(expr) unaligned_member_access_result = (expr); \ + _Pragma("GCC diagnostic pop"); \ + unaligned_member_access_result; \ +}) +#else +# define UNALIGNED_MEMBER_ACCESS(expr) expr +#endif +#define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem) + +#undef RB_OBJ_WRITE +#define RB_OBJ_WRITE(a, slot, b) UNALIGNED_MEMBER_ACCESS(rb_obj_write((VALUE)(a), (VALUE *)(slot), (VALUE)(b), __FILE__, __LINE__)) + +/* clear ec->tag->state, and return the value */ +static inline int +rb_ec_tag_state(const rb_execution_context_t *ec) +{ + enum ruby_tag_type state = ec->tag->state; + ec->tag->state = TAG_NONE; + return state; +} + +NORETURN(static inline void rb_ec_tag_jump(const rb_execution_context_t *ec, enum ruby_tag_type st)); +static inline void +rb_ec_tag_jump(const rb_execution_context_t *ec, enum ruby_tag_type st) +{ + ec->tag->state = st; + ruby_longjmp(ec->tag->buf, 1); +} + +/* + setjmp() in assignment expression rhs is undefined behavior + [ISO/IEC 9899:1999] 7.13.1.1 +*/ +#define EC_EXEC_TAG() \ + (ruby_setjmp(_tag.buf) ? rb_ec_tag_state(VAR_FROM_MEMORY(_ec)) : (EC_REPUSH_TAG(), 0)) + +#define EC_JUMP_TAG(ec, st) rb_ec_tag_jump(ec, st) + +#define INTERNAL_EXCEPTION_P(exc) FIXNUM_P(exc) + +/* CREF operators */ + +#define CREF_FL_PUSHED_BY_EVAL IMEMO_FL_USER1 +#define CREF_FL_OMOD_SHARED IMEMO_FL_USER2 + +static inline VALUE +CREF_CLASS(const rb_cref_t *cref) +{ + return cref->klass; +} + +static inline rb_cref_t * +CREF_NEXT(const rb_cref_t *cref) +{ + return cref->next; +} + +static inline const rb_scope_visibility_t * +CREF_SCOPE_VISI(const rb_cref_t *cref) +{ + return &cref->scope_visi; +} + +static inline VALUE +CREF_REFINEMENTS(const rb_cref_t *cref) +{ + return cref->refinements; +} + +static inline void +CREF_REFINEMENTS_SET(rb_cref_t *cref, VALUE refs) +{ + RB_OBJ_WRITE(cref, &cref->refinements, refs); +} + +static inline int +CREF_PUSHED_BY_EVAL(const rb_cref_t *cref) +{ + return cref->flags & CREF_FL_PUSHED_BY_EVAL; +} + +static inline void +CREF_PUSHED_BY_EVAL_SET(rb_cref_t *cref) +{ + cref->flags |= CREF_FL_PUSHED_BY_EVAL; +} + +static inline int +CREF_OMOD_SHARED(const rb_cref_t *cref) +{ + return cref->flags & CREF_FL_OMOD_SHARED; +} + +static inline void +CREF_OMOD_SHARED_SET(rb_cref_t *cref) +{ + cref->flags |= CREF_FL_OMOD_SHARED; +} + +static inline void +CREF_OMOD_SHARED_UNSET(rb_cref_t *cref) +{ + cref->flags &= ~CREF_FL_OMOD_SHARED; +} + +void rb_thread_cleanup(void); +void rb_thread_wait_other_threads(void); + +enum { + RAISED_EXCEPTION = 1, + RAISED_STACKOVERFLOW = 2, + RAISED_NOMEMORY = 4 +}; +#define rb_ec_raised_set(ec, f) ((ec)->raised_flag |= (f)) +#define rb_ec_raised_reset(ec, f) ((ec)->raised_flag &= ~(f)) +#define rb_ec_raised_p(ec, f) (((ec)->raised_flag & (f)) != 0) +#define rb_ec_raised_clear(ec) ((ec)->raised_flag = 0) +int rb_ec_set_raised(rb_execution_context_t *ec); +int rb_ec_reset_raised(rb_execution_context_t *ec); +int rb_ec_stack_check(rb_execution_context_t *ec); + +VALUE rb_f_eval(int argc, const VALUE *argv, VALUE self); +VALUE rb_make_exception(int argc, const VALUE *argv); + +NORETURN(void rb_method_name_error(VALUE, VALUE)); + +NORETURN(void rb_fiber_start(void)); + +NORETURN(void rb_print_undef(VALUE, ID, rb_method_visibility_t)); +NORETURN(void rb_print_undef_str(VALUE, VALUE)); +NORETURN(void rb_print_inaccessible(VALUE, ID, rb_method_visibility_t)); +NORETURN(void rb_vm_localjump_error(const char *,VALUE, int)); +#if 0 +NORETURN(void rb_vm_jump_tag_but_local_jump(int)); +#endif + +VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val); +rb_cref_t *rb_vm_cref(void); +rb_cref_t *rb_vm_cref_replace_with_duplicated_cref(void); +VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename); +void rb_vm_set_progname(VALUE filename); +void rb_thread_terminate_all(void); +VALUE rb_vm_cbase(void); + +/* vm_backtrace.c */ +VALUE rb_ec_backtrace_object(const rb_execution_context_t *ec); +VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n); + +#ifndef CharNext /* defined as CharNext[AW] on Windows. */ +# ifdef HAVE_MBLEN +# define CharNext(p) ((p) + mblen((p), RUBY_MBCHAR_MAXSIZE)) +# else +# define CharNext(p) ((p) + 1) +# endif +#endif + +#if defined DOSISH || defined __CYGWIN__ +static inline void +translit_char(char *p, int from, int to) +{ + while (*p) { + if ((unsigned char)*p == from) + *p = to; + p = CharNext(p); + } +} +#endif + +#endif /* RUBY_EVAL_INTERN_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/gc.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/gc.h new file mode 100644 index 0000000..2c91e06 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/gc.h @@ -0,0 +1,116 @@ + +#ifndef RUBY_GC_H +#define RUBY_GC_H 1 + +#if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__) +#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p))) +#elif defined(__i386) && defined(__GNUC__) +#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p))) +#else +NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p)); +#define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p) +#define USE_CONSERVATIVE_STACK_END +#endif + +/* for GC debug */ + +#ifndef RUBY_MARK_FREE_DEBUG +#define RUBY_MARK_FREE_DEBUG 0 +#endif + +#if RUBY_MARK_FREE_DEBUG +extern int ruby_gc_debug_indent; + +static inline void +rb_gc_debug_indent(void) +{ + printf("%*s", ruby_gc_debug_indent, ""); +} + +static inline void +rb_gc_debug_body(const char *mode, const char *msg, int st, void *ptr) +{ + if (st == 0) { + ruby_gc_debug_indent--; + } + rb_gc_debug_indent(); + printf("%s: %s %s (%p)\n", mode, st ? "->" : "<-", msg, ptr); + + if (st) { + ruby_gc_debug_indent++; + } + + fflush(stdout); +} + +#define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr) +#define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr) +#define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr) +#define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr) +#define RUBY_GC_INFO rb_gc_debug_indent(); printf + +#else +#define RUBY_MARK_ENTER(msg) +#define RUBY_MARK_LEAVE(msg) +#define RUBY_FREE_ENTER(msg) +#define RUBY_FREE_LEAVE(msg) +#define RUBY_GC_INFO if(0)printf +#endif + +#define RUBY_MARK_UNLESS_NULL(ptr) do { \ + VALUE markobj = (ptr); \ + if (RTEST(markobj)) {rb_gc_mark(markobj);} \ +} while (0) +#define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;} + +#if STACK_GROW_DIRECTION > 0 +# define STACK_UPPER(x, a, b) (a) +#elif STACK_GROW_DIRECTION < 0 +# define STACK_UPPER(x, a, b) (b) +#else +RUBY_EXTERN int ruby_stack_grow_direction; +int ruby_get_stack_grow_direction(volatile VALUE *addr); +# define stack_growup_p(x) ( \ + (ruby_stack_grow_direction ? \ + ruby_stack_grow_direction : \ + ruby_get_stack_grow_direction(x)) > 0) +# define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b)) +#endif + +#if STACK_GROW_DIRECTION +#define STACK_GROW_DIR_DETECTION +#define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b)) +#else +#define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection +#define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b)) +#endif +#define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0) + +const char *rb_obj_info(VALUE obj); +const char *rb_raw_obj_info(char *buff, const int buff_size, VALUE obj); +void rb_obj_info_dump(VALUE obj); + +struct rb_thread_struct; + +RUBY_SYMBOL_EXPORT_BEGIN + +/* exports for objspace module */ +size_t rb_objspace_data_type_memsize(VALUE obj); +void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data); +void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *data); +int rb_objspace_markable_object_p(VALUE obj); +int rb_objspace_internal_object_p(VALUE obj); +int rb_objspace_marked_object_p(VALUE obj); +int rb_objspace_garbage_object_p(VALUE obj); + +void rb_objspace_each_objects( + int (*callback)(void *start, void *end, size_t stride, void *data), + void *data); + +void rb_objspace_each_objects_without_setup( + int (*callback)(void *, void *, size_t, void *), + void *data); + +RUBY_SYMBOL_EXPORT_END + +#endif /* RUBY_GC_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/id.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/id.h new file mode 100644 index 0000000..f1fd07a --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/id.h @@ -0,0 +1,250 @@ +/* DO NOT EDIT THIS FILE DIRECTLY */ +/********************************************************************** + + id.h - + + $Author: nobu $ + created at: Sun Oct 19 21:12:51 2008 + + Copyright (C) 2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_ID_H +#define RUBY_ID_H + +enum ruby_id_types { + RUBY_ID_STATIC_SYM = 0x01, + RUBY_ID_LOCAL = 0x00, + RUBY_ID_INSTANCE = (0x01<<1), + RUBY_ID_GLOBAL = (0x03<<1), + RUBY_ID_ATTRSET = (0x04<<1), + RUBY_ID_CONST = (0x05<<1), + RUBY_ID_CLASS = (0x06<<1), + RUBY_ID_JUNK = (0x07<<1), + RUBY_ID_INTERNAL = RUBY_ID_JUNK, + RUBY_ID_SCOPE_SHIFT = 4, + RUBY_ID_SCOPE_MASK = (~(~0U<<(RUBY_ID_SCOPE_SHIFT-1))<<1) +}; + +#define ID_STATIC_SYM RUBY_ID_STATIC_SYM +#define ID_SCOPE_SHIFT RUBY_ID_SCOPE_SHIFT +#define ID_SCOPE_MASK RUBY_ID_SCOPE_MASK +#define ID_LOCAL RUBY_ID_LOCAL +#define ID_INSTANCE RUBY_ID_INSTANCE +#define ID_GLOBAL RUBY_ID_GLOBAL +#define ID_ATTRSET RUBY_ID_ATTRSET +#define ID_CONST RUBY_ID_CONST +#define ID_CLASS RUBY_ID_CLASS +#define ID_JUNK RUBY_ID_JUNK +#define ID_INTERNAL RUBY_ID_INTERNAL + +#define symIFUNC ID2SYM(idIFUNC) +#define symCFUNC ID2SYM(idCFUNC) + +#define RUBY_TOKEN_DOT2 128 +#define RUBY_TOKEN_DOT3 129 +#define RUBY_TOKEN_UPLUS 130 +#define RUBY_TOKEN_UMINUS 131 +#define RUBY_TOKEN_POW 132 +#define RUBY_TOKEN_CMP 133 +#define RUBY_TOKEN_LSHFT 134 +#define RUBY_TOKEN_RSHFT 135 +#define RUBY_TOKEN_LEQ 136 +#define RUBY_TOKEN_GEQ 137 +#define RUBY_TOKEN_EQ 138 +#define RUBY_TOKEN_EQQ 139 +#define RUBY_TOKEN_NEQ 140 +#define RUBY_TOKEN_MATCH 141 +#define RUBY_TOKEN_NMATCH 142 +#define RUBY_TOKEN_AREF 143 +#define RUBY_TOKEN_ASET 144 +#define RUBY_TOKEN_COLON2 145 +#define RUBY_TOKEN_ANDOP 146 +#define RUBY_TOKEN_OROP 147 +#define RUBY_TOKEN_ANDDOT 148 +#define RUBY_TOKEN(t) RUBY_TOKEN_##t + +#define RUBY_TOKEN2ID_TYPE(tok, type) ((tok<> ID_SCOPE_SHIFT +}; + +#endif /* RUBY_ID_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/id_table.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/id_table.h new file mode 100644 index 0000000..b10b4ac --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/id_table.h @@ -0,0 +1,31 @@ +#ifndef RUBY_ID_TABLE_H +#define RUBY_ID_TABLE_H 1 +#include "ruby/ruby.h" + +struct rb_id_table; + +/* compatible with ST_* */ +enum rb_id_table_iterator_result { + ID_TABLE_CONTINUE = ST_CONTINUE, + ID_TABLE_STOP = ST_STOP, + ID_TABLE_DELETE = ST_DELETE, + ID_TABLE_ITERATOR_RESULT_END +}; + +struct rb_id_table *rb_id_table_create(size_t size); +void rb_id_table_free(struct rb_id_table *tbl); +void rb_id_table_clear(struct rb_id_table *tbl); + +size_t rb_id_table_size(const struct rb_id_table *tbl); +size_t rb_id_table_memsize(const struct rb_id_table *tbl); + +int rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val); +int rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp); +int rb_id_table_delete(struct rb_id_table *tbl, ID id); + +typedef enum rb_id_table_iterator_result rb_id_table_foreach_func_t(ID id, VALUE val, void *data); +typedef enum rb_id_table_iterator_result rb_id_table_foreach_values_func_t(VALUE val, void *data); +void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data); +void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data); + +#endif /* RUBY_ID_TABLE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns.inc new file mode 100644 index 0000000..d5433f0 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns.inc @@ -0,0 +1,217 @@ +/** -*-c-*- + This file contains YARV instructions list. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/insns.inc.tmpl' + or tool/insns2vm.rb + */ + + +/* BIN : Basic Instruction Name */ +#define BIN(n) YARVINSN_##n + +enum ruby_vminsn_type { + BIN(nop), + BIN(getlocal), + BIN(setlocal), + BIN(getblockparam), + BIN(setblockparam), + BIN(getspecial), + BIN(setspecial), + BIN(getinstancevariable), + BIN(setinstancevariable), + BIN(getclassvariable), + BIN(setclassvariable), + BIN(getconstant), + BIN(setconstant), + BIN(getglobal), + BIN(setglobal), + BIN(putnil), + BIN(putself), + BIN(putobject), + BIN(putspecialobject), + BIN(putiseq), + BIN(putstring), + BIN(concatstrings), + BIN(tostring), + BIN(freezestring), + BIN(toregexp), + BIN(intern), + BIN(newarray), + BIN(duparray), + BIN(expandarray), + BIN(concatarray), + BIN(splatarray), + BIN(newhash), + BIN(newrange), + BIN(pop), + BIN(dup), + BIN(dupn), + BIN(swap), + BIN(reverse), + BIN(reput), + BIN(topn), + BIN(setn), + BIN(adjuststack), + BIN(defined), + BIN(checkmatch), + BIN(checkkeyword), + BIN(tracecoverage), + BIN(defineclass), + BIN(send), + BIN(opt_str_freeze), + BIN(opt_str_uminus), + BIN(opt_newarray_max), + BIN(opt_newarray_min), + BIN(opt_send_without_block), + BIN(invokesuper), + BIN(invokeblock), + BIN(leave), + BIN(throw), + BIN(jump), + BIN(branchif), + BIN(branchunless), + BIN(branchnil), + BIN(branchiftype), + BIN(getinlinecache), + BIN(setinlinecache), + BIN(once), + BIN(opt_case_dispatch), + BIN(opt_plus), + BIN(opt_minus), + BIN(opt_mult), + BIN(opt_div), + BIN(opt_mod), + BIN(opt_eq), + BIN(opt_neq), + BIN(opt_lt), + BIN(opt_le), + BIN(opt_gt), + BIN(opt_ge), + BIN(opt_ltlt), + BIN(opt_aref), + BIN(opt_aset), + BIN(opt_aset_with), + BIN(opt_aref_with), + BIN(opt_length), + BIN(opt_size), + BIN(opt_empty_p), + BIN(opt_succ), + BIN(opt_not), + BIN(opt_regexpmatch1), + BIN(opt_regexpmatch2), + BIN(opt_call_c_function), + BIN(bitblt), + BIN(answer), + BIN(getlocal_OP__WC__0), + BIN(getlocal_OP__WC__1), + BIN(setlocal_OP__WC__0), + BIN(setlocal_OP__WC__1), + BIN(putobject_OP_INT2FIX_O_0_C_), + BIN(putobject_OP_INT2FIX_O_1_C_), + BIN(trace_nop), + BIN(trace_getlocal), + BIN(trace_setlocal), + BIN(trace_getblockparam), + BIN(trace_setblockparam), + BIN(trace_getspecial), + BIN(trace_setspecial), + BIN(trace_getinstancevariable), + BIN(trace_setinstancevariable), + BIN(trace_getclassvariable), + BIN(trace_setclassvariable), + BIN(trace_getconstant), + BIN(trace_setconstant), + BIN(trace_getglobal), + BIN(trace_setglobal), + BIN(trace_putnil), + BIN(trace_putself), + BIN(trace_putobject), + BIN(trace_putspecialobject), + BIN(trace_putiseq), + BIN(trace_putstring), + BIN(trace_concatstrings), + BIN(trace_tostring), + BIN(trace_freezestring), + BIN(trace_toregexp), + BIN(trace_intern), + BIN(trace_newarray), + BIN(trace_duparray), + BIN(trace_expandarray), + BIN(trace_concatarray), + BIN(trace_splatarray), + BIN(trace_newhash), + BIN(trace_newrange), + BIN(trace_pop), + BIN(trace_dup), + BIN(trace_dupn), + BIN(trace_swap), + BIN(trace_reverse), + BIN(trace_reput), + BIN(trace_topn), + BIN(trace_setn), + BIN(trace_adjuststack), + BIN(trace_defined), + BIN(trace_checkmatch), + BIN(trace_checkkeyword), + BIN(trace_tracecoverage), + BIN(trace_defineclass), + BIN(trace_send), + BIN(trace_opt_str_freeze), + BIN(trace_opt_str_uminus), + BIN(trace_opt_newarray_max), + BIN(trace_opt_newarray_min), + BIN(trace_opt_send_without_block), + BIN(trace_invokesuper), + BIN(trace_invokeblock), + BIN(trace_leave), + BIN(trace_throw), + BIN(trace_jump), + BIN(trace_branchif), + BIN(trace_branchunless), + BIN(trace_branchnil), + BIN(trace_branchiftype), + BIN(trace_getinlinecache), + BIN(trace_setinlinecache), + BIN(trace_once), + BIN(trace_opt_case_dispatch), + BIN(trace_opt_plus), + BIN(trace_opt_minus), + BIN(trace_opt_mult), + BIN(trace_opt_div), + BIN(trace_opt_mod), + BIN(trace_opt_eq), + BIN(trace_opt_neq), + BIN(trace_opt_lt), + BIN(trace_opt_le), + BIN(trace_opt_gt), + BIN(trace_opt_ge), + BIN(trace_opt_ltlt), + BIN(trace_opt_aref), + BIN(trace_opt_aset), + BIN(trace_opt_aset_with), + BIN(trace_opt_aref_with), + BIN(trace_opt_length), + BIN(trace_opt_size), + BIN(trace_opt_empty_p), + BIN(trace_opt_succ), + BIN(trace_opt_not), + BIN(trace_opt_regexpmatch1), + BIN(trace_opt_regexpmatch2), + BIN(trace_opt_call_c_function), + BIN(trace_bitblt), + BIN(trace_answer), + BIN(trace_getlocal_OP__WC__0), + BIN(trace_getlocal_OP__WC__1), + BIN(trace_setlocal_OP__WC__0), + BIN(trace_setlocal_OP__WC__1), + BIN(trace_putobject_OP_INT2FIX_O_0_C_), + BIN(trace_putobject_OP_INT2FIX_O_1_C_), + VM_INSTRUCTION_SIZE +}; + +#define ASSERT_VM_INSTRUCTION_SIZE(array) \ + STATIC_ASSERT(numberof_##array, numberof(array) == VM_INSTRUCTION_SIZE) diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns_info.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns_info.inc new file mode 100644 index 0000000..a0f79ca --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/insns_info.inc @@ -0,0 +1,1570 @@ +/** -*-c-*- + This file contains instruction information for yarv instruction sequence. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/insns_info.inc.tmpl' + or tool/insns2vm.rb + */ + +#define TS_OFFSET 'O' +#define TS_NUM 'N' +#define TS_LINDEX 'L' +#define TS_VALUE 'V' +#define TS_ID 'I' +#define TS_GENTRY 'G' +#define TS_IC 'K' +#define TS_CALLINFO 'C' +#define TS_CALLCACHE 'E' +#define TS_CDHASH 'H' +#define TS_ISEQ 'S' +#define TS_VARIABLE '.' +#define TS_FUNCPTR 'F' + +static const unsigned short insn_name_info_offset[] = { + 0, + 4, + 13, + 22, + 36, + 50, + 61, + 72, + 92, + 112, + 129, + 146, + 158, + 170, + 180, + 190, + 197, + 205, + 215, + 232, + 240, + 250, + 264, + 273, + 286, + 295, + 302, + 311, + 320, + 332, + 344, + 355, + 363, + 372, + 376, + 380, + 385, + 390, + 398, + 404, + 409, + 414, + 426, + 434, + 445, + 458, + 472, + 484, + 489, + 504, + 519, + 536, + 553, + 576, + 588, + 600, + 606, + 612, + 617, + 626, + 639, + 649, + 662, + 677, + 692, + 697, + 715, + 724, + 734, + 743, + 751, + 759, + 766, + 774, + 781, + 788, + 795, + 802, + 811, + 820, + 829, + 843, + 857, + 868, + 877, + 889, + 898, + 906, + 923, + 940, + 960, + 967, + 974, + 993, + 1012, + 1031, + 1050, + 1078, + 1106, + 1116, + 1131, + 1146, + 1166, + 1186, + 1203, + 1220, + 1246, + 1272, + 1295, + 1318, + 1336, + 1354, + 1370, + 1386, + 1399, + 1413, + 1429, + 1452, + 1466, + 1482, + 1502, + 1517, + 1536, + 1551, + 1564, + 1579, + 1594, + 1612, + 1630, + 1647, + 1661, + 1676, + 1686, + 1696, + 1707, + 1718, + 1732, + 1744, + 1755, + 1766, + 1784, + 1798, + 1815, + 1834, + 1854, + 1872, + 1883, + 1904, + 1925, + 1948, + 1971, + 2000, + 2018, + 2036, + 2048, + 2060, + 2071, + 2086, + 2105, + 2121, + 2140, + 2161, + 2182, + 2193, + 2217, + 2232, + 2248, + 2263, + 2277, + 2291, + 2304, + 2318, + 2331, + 2344, + 2357, + 2370, + 2385, + 2400, + 2415, + 2435, + 2455, + 2472, + 2487, + 2505, + 2520, + 2534, + 2557, + 2580, + 2606, + 2619, + 2632, + 2657, + 2682, + 2707, + 2732, + 2766, +}; + +ASSERT_VM_INSTRUCTION_SIZE(insn_name_info_offset); + +static const char insn_name_info_base[2800] = "" + "nop\0" + "getlocal\0" + "setlocal\0" + "getblockparam\0" + "setblockparam\0" + "getspecial\0" + "setspecial\0" + "getinstancevariable\0" + "setinstancevariable\0" + "getclassvariable\0" + "setclassvariable\0" + "getconstant\0" + "setconstant\0" + "getglobal\0" + "setglobal\0" + "putnil\0" + "putself\0" + "putobject\0" + "putspecialobject\0" + "putiseq\0" + "putstring\0" + "concatstrings\0" + "tostring\0" + "freezestring\0" + "toregexp\0" + "intern\0" + "newarray\0" + "duparray\0" + "expandarray\0" + "concatarray\0" + "splatarray\0" + "newhash\0" + "newrange\0" + "pop\0" + "dup\0" + "dupn\0" + "swap\0" + "reverse\0" + "reput\0" + "topn\0" + "setn\0" + "adjuststack\0" + "defined\0" + "checkmatch\0" + "checkkeyword\0" + "tracecoverage\0" + "defineclass\0" + "send\0" + "opt_str_freeze\0" + "opt_str_uminus\0" + "opt_newarray_max\0" + "opt_newarray_min\0" + "opt_send_without_block\0" + "invokesuper\0" + "invokeblock\0" + "leave\0" + "throw\0" + "jump\0" + "branchif\0" + "branchunless\0" + "branchnil\0" + "branchiftype\0" + "getinlinecache\0" + "setinlinecache\0" + "once\0" + "opt_case_dispatch\0" + "opt_plus\0" + "opt_minus\0" + "opt_mult\0" + "opt_div\0" + "opt_mod\0" + "opt_eq\0" + "opt_neq\0" + "opt_lt\0" + "opt_le\0" + "opt_gt\0" + "opt_ge\0" + "opt_ltlt\0" + "opt_aref\0" + "opt_aset\0" + "opt_aset_with\0" + "opt_aref_with\0" + "opt_length\0" + "opt_size\0" + "opt_empty_p\0" + "opt_succ\0" + "opt_not\0" + "opt_regexpmatch1\0" + "opt_regexpmatch2\0" + "opt_call_c_function\0" + "bitblt\0" + "answer\0" + "getlocal_OP__WC__0\0" + "getlocal_OP__WC__1\0" + "setlocal_OP__WC__0\0" + "setlocal_OP__WC__1\0" + "putobject_OP_INT2FIX_O_0_C_\0" + "putobject_OP_INT2FIX_O_1_C_\0" + "trace_nop\0" + "trace_getlocal\0" + "trace_setlocal\0" + "trace_getblockparam\0" + "trace_setblockparam\0" + "trace_getspecial\0" + "trace_setspecial\0" + "trace_getinstancevariable\0" + "trace_setinstancevariable\0" + "trace_getclassvariable\0" + "trace_setclassvariable\0" + "trace_getconstant\0" + "trace_setconstant\0" + "trace_getglobal\0" + "trace_setglobal\0" + "trace_putnil\0" + "trace_putself\0" + "trace_putobject\0" + "trace_putspecialobject\0" + "trace_putiseq\0" + "trace_putstring\0" + "trace_concatstrings\0" + "trace_tostring\0" + "trace_freezestring\0" + "trace_toregexp\0" + "trace_intern\0" + "trace_newarray\0" + "trace_duparray\0" + "trace_expandarray\0" + "trace_concatarray\0" + "trace_splatarray\0" + "trace_newhash\0" + "trace_newrange\0" + "trace_pop\0" + "trace_dup\0" + "trace_dupn\0" + "trace_swap\0" + "trace_reverse\0" + "trace_reput\0" + "trace_topn\0" + "trace_setn\0" + "trace_adjuststack\0" + "trace_defined\0" + "trace_checkmatch\0" + "trace_checkkeyword\0" + "trace_tracecoverage\0" + "trace_defineclass\0" + "trace_send\0" + "trace_opt_str_freeze\0" + "trace_opt_str_uminus\0" + "trace_opt_newarray_max\0" + "trace_opt_newarray_min\0" + "trace_opt_send_without_block\0" + "trace_invokesuper\0" + "trace_invokeblock\0" + "trace_leave\0" + "trace_throw\0" + "trace_jump\0" + "trace_branchif\0" + "trace_branchunless\0" + "trace_branchnil\0" + "trace_branchiftype\0" + "trace_getinlinecache\0" + "trace_setinlinecache\0" + "trace_once\0" + "trace_opt_case_dispatch\0" + "trace_opt_plus\0" + "trace_opt_minus\0" + "trace_opt_mult\0" + "trace_opt_div\0" + "trace_opt_mod\0" + "trace_opt_eq\0" + "trace_opt_neq\0" + "trace_opt_lt\0" + "trace_opt_le\0" + "trace_opt_gt\0" + "trace_opt_ge\0" + "trace_opt_ltlt\0" + "trace_opt_aref\0" + "trace_opt_aset\0" + "trace_opt_aset_with\0" + "trace_opt_aref_with\0" + "trace_opt_length\0" + "trace_opt_size\0" + "trace_opt_empty_p\0" + "trace_opt_succ\0" + "trace_opt_not\0" + "trace_opt_regexpmatch1\0" + "trace_opt_regexpmatch2\0" + "trace_opt_call_c_function\0" + "trace_bitblt\0" + "trace_answer\0" + "trace_getlocal_OP__WC__0\0" + "trace_getlocal_OP__WC__1\0" + "trace_setlocal_OP__WC__0\0" + "trace_setlocal_OP__WC__1\0" + "trace_putobject_OP_INT2FIX_O_0_C_\0" + "trace_putobject_OP_INT2FIX_O_1_C_\0" +; + +#define insn_name_info insn_name_info_base+insn_name_info_offset + +static const char insn_operand_info[][8] = { + "\1""", + "\3""LN", + "\3""LN", + "\3""LN", + "\3""LN", + "\3""NN", + "\2""N", + "\3""IK", + "\3""IK", + "\2""I", + "\2""I", + "\2""I", + "\2""I", + "\2""G", + "\2""G", + "\1""", + "\1""", + "\2""V", + "\2""N", + "\2""S", + "\2""V", + "\2""N", + "\1""", + "\2""V", + "\3""NN", + "\1""", + "\2""N", + "\2""V", + "\3""NN", + "\1""", + "\2""V", + "\2""N", + "\2""N", + "\1""", + "\1""", + "\2""N", + "\1""", + "\2""N", + "\1""", + "\2""N", + "\2""N", + "\2""N", + "\4""NVV", + "\2""N", + "\3""LL", + "\3""NV", + "\4""ISN", + "\4""CES", + "\2""V", + "\2""V", + "\2""N", + "\2""N", + "\3""CE", + "\4""CES", + "\2""C", + "\1""", + "\2""N", + "\2""O", + "\2""O", + "\2""O", + "\2""O", + "\3""NO", + "\3""OK", + "\2""K", + "\3""SK", + "\3""HO", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\5""CECE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\4""CEV", + "\4""CEV", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\2""V", + "\3""CE", + "\2""F", + "\1""", + "\1""", + "\2""L", + "\2""L", + "\2""L", + "\2""L", + "\1""", + "\1""", + "\1""", + "\3""LN", + "\3""LN", + "\3""LN", + "\3""LN", + "\3""NN", + "\2""N", + "\3""IK", + "\3""IK", + "\2""I", + "\2""I", + "\2""I", + "\2""I", + "\2""G", + "\2""G", + "\1""", + "\1""", + "\2""V", + "\2""N", + "\2""S", + "\2""V", + "\2""N", + "\1""", + "\2""V", + "\3""NN", + "\1""", + "\2""N", + "\2""V", + "\3""NN", + "\1""", + "\2""V", + "\2""N", + "\2""N", + "\1""", + "\1""", + "\2""N", + "\1""", + "\2""N", + "\1""", + "\2""N", + "\2""N", + "\2""N", + "\4""NVV", + "\2""N", + "\3""LL", + "\3""NV", + "\4""ISN", + "\4""CES", + "\2""V", + "\2""V", + "\2""N", + "\2""N", + "\3""CE", + "\4""CES", + "\2""C", + "\1""", + "\2""N", + "\2""O", + "\2""O", + "\2""O", + "\2""O", + "\3""NO", + "\3""OK", + "\2""K", + "\3""SK", + "\3""HO", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\5""CECE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\4""CEV", + "\4""CEV", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\3""CE", + "\2""V", + "\3""CE", + "\2""F", + "\1""", + "\1""", + "\2""L", + "\2""L", + "\2""L", + "\2""L", + "\1""", + "\1""", +}; + +ASSERT_VM_INSTRUCTION_SIZE(insn_operand_info); + +#ifdef USE_INSN_RET_NUM +static const unsigned short insn_stack_push_num_info[] = { + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 2, + 1, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 0, + 0, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 2, + 1, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 0, + 0, + 1, + 1, +}; + +ASSERT_VM_INSTRUCTION_SIZE(insn_stack_push_num_info); +#endif + +#ifdef USE_INSN_STACK_INCREASE +static int +insn_stack_increase(int depth, int insn, VALUE *opes) +{ + switch (insn) { + case BIN(nop): { + return depth + 0; + } + case BIN(getlocal): { + return depth + 1; + } + case BIN(setlocal): { + return depth + -1; + } + case BIN(getblockparam): { + return depth + 1; + } + case BIN(setblockparam): { + return depth + -1; + } + case BIN(getspecial): { + return depth + 1; + } + case BIN(setspecial): { + return depth + -1; + } + case BIN(getinstancevariable): { + return depth + 1; + } + case BIN(setinstancevariable): { + return depth + -1; + } + case BIN(getclassvariable): { + return depth + 1; + } + case BIN(setclassvariable): { + return depth + -1; + } + case BIN(getconstant): { + return depth + 0; + } + case BIN(setconstant): { + return depth + -2; + } + case BIN(getglobal): { + return depth + 1; + } + case BIN(setglobal): { + return depth + -1; + } + case BIN(putnil): { + return depth + 1; + } + case BIN(putself): { + return depth + 1; + } + case BIN(putobject): { + return depth + 1; + } + case BIN(putspecialobject): { + return depth + 1; + } + case BIN(putiseq): { + return depth + 1; + } + case BIN(putstring): { + return depth + 1; + } + case BIN(concatstrings): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(tostring): { + return depth + -1; + } + case BIN(freezestring): { + return depth + 0; + } + case BIN(toregexp): { + int inc = 0; + int cnt = FIX2INT(opes[1]); + inc += 1 - cnt;; + return depth + inc; + } + case BIN(intern): { + return depth + 0; + } + case BIN(newarray): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(duparray): { + return depth + 1; + } + case BIN(expandarray): { + int inc = 0; + int num = FIX2INT(opes[0]); + int flag = FIX2INT(opes[1]); + inc += num - 1 + (flag & 1 ? 1 : 0);; + return depth + inc; + } + case BIN(concatarray): { + return depth + -1; + } + case BIN(splatarray): { + return depth + 0; + } + case BIN(newhash): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(newrange): { + return depth + -1; + } + case BIN(pop): { + return depth + -1; + } + case BIN(dup): { + return depth + 1; + } + case BIN(dupn): { + int inc = 0; + int n = FIX2INT(opes[0]); + inc += n;; + return depth + inc; + } + case BIN(swap): { + return depth + 0; + } + case BIN(reverse): { + int inc = 0; + inc += 0;; + return depth + inc; + } + case BIN(reput): { + int inc = 0; + inc += 0;; + return depth + inc; + } + case BIN(topn): { + int inc = 0; + inc += 1;; + return depth + inc; + } + case BIN(setn): { + int inc = 0; + inc += 0; + return depth + inc; + } + case BIN(adjuststack): { + int inc = 0; + int n = FIX2INT(opes[0]); + inc -= n; + return depth + inc; + } + case BIN(defined): { + return depth + 0; + } + case BIN(checkmatch): { + return depth + -1; + } + case BIN(checkkeyword): { + return depth + 1; + } + case BIN(tracecoverage): { + return depth + 0; + } + case BIN(defineclass): { + return depth + -1; + } + case BIN(send): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));; + return depth + inc; + } + case BIN(opt_str_freeze): { + return depth + 1; + } + case BIN(opt_str_uminus): { + return depth + 1; + } + case BIN(opt_newarray_max): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(opt_newarray_min): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(opt_send_without_block): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += -ci->orig_argc;; + return depth + inc; + } + case BIN(invokesuper): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));; + return depth + inc; + } + case BIN(invokeblock): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += 1 - ci->orig_argc;; + return depth + inc; + } + case BIN(leave): { + return depth + 0; + } + case BIN(throw): { + return depth + 0; + } + case BIN(jump): { + return depth + 0; + } + case BIN(branchif): { + return depth + -1; + } + case BIN(branchunless): { + return depth + -1; + } + case BIN(branchnil): { + return depth + -1; + } + case BIN(branchiftype): { + return depth + -1; + } + case BIN(getinlinecache): { + return depth + 1; + } + case BIN(setinlinecache): { + return depth + 0; + } + case BIN(once): { + return depth + 1; + } + case BIN(opt_case_dispatch): { + int inc = 0; + inc += -1;; + return depth + inc; + } + case BIN(opt_plus): { + return depth + -1; + } + case BIN(opt_minus): { + return depth + -1; + } + case BIN(opt_mult): { + return depth + -1; + } + case BIN(opt_div): { + return depth + -1; + } + case BIN(opt_mod): { + return depth + -1; + } + case BIN(opt_eq): { + return depth + -1; + } + case BIN(opt_neq): { + return depth + -1; + } + case BIN(opt_lt): { + return depth + -1; + } + case BIN(opt_le): { + return depth + -1; + } + case BIN(opt_gt): { + return depth + -1; + } + case BIN(opt_ge): { + return depth + -1; + } + case BIN(opt_ltlt): { + return depth + -1; + } + case BIN(opt_aref): { + return depth + -1; + } + case BIN(opt_aset): { + return depth + -2; + } + case BIN(opt_aset_with): { + return depth + -1; + } + case BIN(opt_aref_with): { + return depth + 0; + } + case BIN(opt_length): { + return depth + 0; + } + case BIN(opt_size): { + return depth + 0; + } + case BIN(opt_empty_p): { + return depth + 0; + } + case BIN(opt_succ): { + return depth + 0; + } + case BIN(opt_not): { + return depth + 0; + } + case BIN(opt_regexpmatch1): { + return depth + 0; + } + case BIN(opt_regexpmatch2): { + return depth + -1; + } + case BIN(opt_call_c_function): { + return depth + 0; + } + case BIN(bitblt): { + return depth + 1; + } + case BIN(answer): { + return depth + 1; + } + case BIN(getlocal_OP__WC__0): { + return depth + 1; + } + case BIN(getlocal_OP__WC__1): { + return depth + 1; + } + case BIN(setlocal_OP__WC__0): { + return depth + -1; + } + case BIN(setlocal_OP__WC__1): { + return depth + -1; + } + case BIN(putobject_OP_INT2FIX_O_0_C_): { + return depth + 1; + } + case BIN(putobject_OP_INT2FIX_O_1_C_): { + return depth + 1; + } + case BIN(trace_nop): { + return depth + 0; + } + case BIN(trace_getlocal): { + return depth + 1; + } + case BIN(trace_setlocal): { + return depth + -1; + } + case BIN(trace_getblockparam): { + return depth + 1; + } + case BIN(trace_setblockparam): { + return depth + -1; + } + case BIN(trace_getspecial): { + return depth + 1; + } + case BIN(trace_setspecial): { + return depth + -1; + } + case BIN(trace_getinstancevariable): { + return depth + 1; + } + case BIN(trace_setinstancevariable): { + return depth + -1; + } + case BIN(trace_getclassvariable): { + return depth + 1; + } + case BIN(trace_setclassvariable): { + return depth + -1; + } + case BIN(trace_getconstant): { + return depth + 0; + } + case BIN(trace_setconstant): { + return depth + -2; + } + case BIN(trace_getglobal): { + return depth + 1; + } + case BIN(trace_setglobal): { + return depth + -1; + } + case BIN(trace_putnil): { + return depth + 1; + } + case BIN(trace_putself): { + return depth + 1; + } + case BIN(trace_putobject): { + return depth + 1; + } + case BIN(trace_putspecialobject): { + return depth + 1; + } + case BIN(trace_putiseq): { + return depth + 1; + } + case BIN(trace_putstring): { + return depth + 1; + } + case BIN(trace_concatstrings): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(trace_tostring): { + return depth + -1; + } + case BIN(trace_freezestring): { + return depth + 0; + } + case BIN(trace_toregexp): { + int inc = 0; + int cnt = FIX2INT(opes[1]); + inc += 1 - cnt;; + return depth + inc; + } + case BIN(trace_intern): { + return depth + 0; + } + case BIN(trace_newarray): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(trace_duparray): { + return depth + 1; + } + case BIN(trace_expandarray): { + int inc = 0; + int num = FIX2INT(opes[0]); + int flag = FIX2INT(opes[1]); + inc += num - 1 + (flag & 1 ? 1 : 0);; + return depth + inc; + } + case BIN(trace_concatarray): { + return depth + -1; + } + case BIN(trace_splatarray): { + return depth + 0; + } + case BIN(trace_newhash): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(trace_newrange): { + return depth + -1; + } + case BIN(trace_pop): { + return depth + -1; + } + case BIN(trace_dup): { + return depth + 1; + } + case BIN(trace_dupn): { + int inc = 0; + int n = FIX2INT(opes[0]); + inc += n;; + return depth + inc; + } + case BIN(trace_swap): { + return depth + 0; + } + case BIN(trace_reverse): { + int inc = 0; + inc += 0;; + return depth + inc; + } + case BIN(trace_reput): { + int inc = 0; + inc += 0;; + return depth + inc; + } + case BIN(trace_topn): { + int inc = 0; + inc += 1;; + return depth + inc; + } + case BIN(trace_setn): { + int inc = 0; + inc += 0; + return depth + inc; + } + case BIN(trace_adjuststack): { + int inc = 0; + int n = FIX2INT(opes[0]); + inc -= n; + return depth + inc; + } + case BIN(trace_defined): { + return depth + 0; + } + case BIN(trace_checkmatch): { + return depth + -1; + } + case BIN(trace_checkkeyword): { + return depth + 1; + } + case BIN(trace_tracecoverage): { + return depth + 0; + } + case BIN(trace_defineclass): { + return depth + -1; + } + case BIN(trace_send): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));; + return depth + inc; + } + case BIN(trace_opt_str_freeze): { + return depth + 1; + } + case BIN(trace_opt_str_uminus): { + return depth + 1; + } + case BIN(trace_opt_newarray_max): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(trace_opt_newarray_min): { + int inc = 0; + int num = FIX2INT(opes[0]); + inc += 1 - num;; + return depth + inc; + } + case BIN(trace_opt_send_without_block): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += -ci->orig_argc;; + return depth + inc; + } + case BIN(trace_invokesuper): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));; + return depth + inc; + } + case BIN(trace_invokeblock): { + int inc = 0; + CALL_INFO ci = (CALL_INFO)(opes[0]); + inc += 1 - ci->orig_argc;; + return depth + inc; + } + case BIN(trace_leave): { + return depth + 0; + } + case BIN(trace_throw): { + return depth + 0; + } + case BIN(trace_jump): { + return depth + 0; + } + case BIN(trace_branchif): { + return depth + -1; + } + case BIN(trace_branchunless): { + return depth + -1; + } + case BIN(trace_branchnil): { + return depth + -1; + } + case BIN(trace_branchiftype): { + return depth + -1; + } + case BIN(trace_getinlinecache): { + return depth + 1; + } + case BIN(trace_setinlinecache): { + return depth + 0; + } + case BIN(trace_once): { + return depth + 1; + } + case BIN(trace_opt_case_dispatch): { + int inc = 0; + inc += -1;; + return depth + inc; + } + case BIN(trace_opt_plus): { + return depth + -1; + } + case BIN(trace_opt_minus): { + return depth + -1; + } + case BIN(trace_opt_mult): { + return depth + -1; + } + case BIN(trace_opt_div): { + return depth + -1; + } + case BIN(trace_opt_mod): { + return depth + -1; + } + case BIN(trace_opt_eq): { + return depth + -1; + } + case BIN(trace_opt_neq): { + return depth + -1; + } + case BIN(trace_opt_lt): { + return depth + -1; + } + case BIN(trace_opt_le): { + return depth + -1; + } + case BIN(trace_opt_gt): { + return depth + -1; + } + case BIN(trace_opt_ge): { + return depth + -1; + } + case BIN(trace_opt_ltlt): { + return depth + -1; + } + case BIN(trace_opt_aref): { + return depth + -1; + } + case BIN(trace_opt_aset): { + return depth + -2; + } + case BIN(trace_opt_aset_with): { + return depth + -1; + } + case BIN(trace_opt_aref_with): { + return depth + 0; + } + case BIN(trace_opt_length): { + return depth + 0; + } + case BIN(trace_opt_size): { + return depth + 0; + } + case BIN(trace_opt_empty_p): { + return depth + 0; + } + case BIN(trace_opt_succ): { + return depth + 0; + } + case BIN(trace_opt_not): { + return depth + 0; + } + case BIN(trace_opt_regexpmatch1): { + return depth + 0; + } + case BIN(trace_opt_regexpmatch2): { + return depth + -1; + } + case BIN(trace_opt_call_c_function): { + return depth + 0; + } + case BIN(trace_bitblt): { + return depth + 1; + } + case BIN(trace_answer): { + return depth + 1; + } + case BIN(trace_getlocal_OP__WC__0): { + return depth + 1; + } + case BIN(trace_getlocal_OP__WC__1): { + return depth + 1; + } + case BIN(trace_setlocal_OP__WC__0): { + return depth + -1; + } + case BIN(trace_setlocal_OP__WC__1): { + return depth + -1; + } + case BIN(trace_putobject_OP_INT2FIX_O_0_C_): { + return depth + 1; + } + case BIN(trace_putobject_OP_INT2FIX_O_1_C_): { + return depth + 1; + } + default: + rb_bug("insn_sp_increase: unreachable"); + } + return 0; +} +#endif + +/* some utilities */ + +static int +insn_len(VALUE insn) +{ + return (unsigned char)insn_operand_info[(int)insn][0]; +} + +static const char * +insn_name(VALUE insn) +{ + return insn_name_info[(int)insn]; +} + +static const char * +insn_op_types(VALUE insn) +{ + return insn_operand_info[(int)insn]+1; +} + +static int +insn_op_type(VALUE insn, long pos) +{ + int len = insn_len(insn) - 1; + if (pos < len) { + return insn_operand_info[(int)insn][pos+1]; + } + else{ + return 0; + } +} + +#ifdef USE_INSN_RET_NUM +static int +insn_ret_num(VALUE insn) +{ + return insn_stack_push_num_info[(int)insn]; +} +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/internal.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/internal.h new file mode 100644 index 0000000..2e2fa7b --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/internal.h @@ -0,0 +1,2036 @@ +/********************************************************************** + + internal.h - + + $Author$ + created at: Tue May 17 11:42:20 JST 2011 + + Copyright (C) 2011 Yukihiro Matsumoto + +**********************************************************************/ + +#ifndef RUBY_INTERNAL_H +#define RUBY_INTERNAL_H 1 + +#include "ruby.h" +#include "ruby/encoding.h" +#include "ruby/io.h" + +#if defined(__cplusplus) +extern "C" { +#if 0 +} /* satisfy cc-mode */ +#endif +#endif + +#ifdef HAVE_STDBOOL_H +# include +#endif + +#ifndef __bool_true_false_are_defined +# ifndef __cplusplus +# undef bool +# undef false +# undef true +# define bool signed char +# define false 0 +# define true 1 +# define __bool_true_false_are_defined 1 +# endif +#endif + +/* The most significant bit of the lower part of half-long integer. + * If sizeof(long) == 4, this is 0x8000. + * If sizeof(long) == 8, this is 0x80000000. + */ +#define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2)) + +#define LIKELY(x) RB_LIKELY(x) +#define UNLIKELY(x) RB_UNLIKELY(x) + +#ifndef MAYBE_UNUSED +# define MAYBE_UNUSED(x) x +#endif + +#ifndef WARN_UNUSED_RESULT +# define WARN_UNUSED_RESULT(x) x +#endif + +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include +# ifndef VALGRIND_MAKE_MEM_DEFINED +# define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n)) +# endif +# ifndef VALGRIND_MAKE_MEM_UNDEFINED +# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n)) +# endif +#else +# define VALGRIND_MAKE_MEM_DEFINED(p, n) 0 +# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0 +#endif + +#define numberof(array) ((int)(sizeof(array) / sizeof((array)[0]))) + +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +#ifndef __has_extension +# define __has_extension __has_feature +#endif + +#if GCC_VERSION_SINCE(4, 6, 0) || __has_extension(c_static_assert) +# define STATIC_ASSERT(name, expr) _Static_assert(expr, #name ": " #expr) +#else +# define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[1 - 2*!(expr)] +#endif + +#define SIGNED_INTEGER_TYPE_P(int_type) (0 > ((int_type)0)-1) +#define SIGNED_INTEGER_MAX(sint_type) \ + (sint_type) \ + ((((sint_type)1) << (sizeof(sint_type) * CHAR_BIT - 2)) | \ + ((((sint_type)1) << (sizeof(sint_type) * CHAR_BIT - 2)) - 1)) +#define SIGNED_INTEGER_MIN(sint_type) (-SIGNED_INTEGER_MAX(sint_type)-1) +#define UNSIGNED_INTEGER_MAX(uint_type) (~(uint_type)0) + +#if SIGNEDNESS_OF_TIME_T < 0 /* signed */ +# define TIMET_MAX SIGNED_INTEGER_MAX(time_t) +# define TIMET_MIN SIGNED_INTEGER_MIN(time_t) +#elif SIGNEDNESS_OF_TIME_T > 0 /* unsigned */ +# define TIMET_MAX UNSIGNED_INTEGER_MAX(time_t) +# define TIMET_MIN ((time_t)0) +#endif +#define TIMET_MAX_PLUS_ONE (2*(double)(TIMET_MAX/2+1)) + +#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW_P +#define MUL_OVERFLOW_P(a, b) \ + __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0) +#elif defined HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW +#define MUL_OVERFLOW_P(a, b) \ + ({__typeof__(a) c; __builtin_mul_overflow((a), (b), &c);}) +#endif + +#define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \ + (a) == 0 ? 0 : \ + (a) == -1 ? (b) < -(max) : \ + (a) > 0 ? \ + ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \ + ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b))) + +#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW_P +/* __builtin_mul_overflow_p can take bitfield */ +/* and GCC permits bitfields for integers other than int */ +#define MUL_OVERFLOW_FIXNUM_P(a, b) ({ \ + struct { long fixnum : SIZEOF_LONG * CHAR_BIT - 1; } c; \ + __builtin_mul_overflow_p((a), (b), c.fixnum); \ +}) +#else +#define MUL_OVERFLOW_FIXNUM_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX) +#endif + +#ifdef MUL_OVERFLOW_P +#define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b) +#define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b) +#define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b) +#else +#define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX) +#define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX) +#define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX) +#endif + +#ifndef swap16 +# ifdef HAVE_BUILTIN___BUILTIN_BSWAP16 +# define swap16(x) __builtin_bswap16(x) +# endif +#endif + +#ifndef swap16 +# define swap16(x) ((uint16_t)((((x)&0xFF)<<8) | (((x)>>8)&0xFF))) +#endif + +#ifndef swap32 +# ifdef HAVE_BUILTIN___BUILTIN_BSWAP32 +# define swap32(x) __builtin_bswap32(x) +# endif +#endif + +#ifndef swap32 +# define swap32(x) ((uint32_t)((((x)&0xFF)<<24) \ + |(((x)>>24)&0xFF) \ + |(((x)&0x0000FF00)<<8) \ + |(((x)&0x00FF0000)>>8) )) +#endif + +#ifndef swap64 +# ifdef HAVE_BUILTIN___BUILTIN_BSWAP64 +# define swap64(x) __builtin_bswap64(x) +# endif +#endif + +#ifndef swap64 +# ifdef HAVE_INT64_T +# define byte_in_64bit(n) ((uint64_t)0xff << (n)) +# define swap64(x) ((uint64_t)((((x)&byte_in_64bit(0))<<56) \ + |(((x)>>56)&0xFF) \ + |(((x)&byte_in_64bit(8))<<40) \ + |(((x)&byte_in_64bit(48))>>40) \ + |(((x)&byte_in_64bit(16))<<24) \ + |(((x)&byte_in_64bit(40))>>24) \ + |(((x)&byte_in_64bit(24))<<8) \ + |(((x)&byte_in_64bit(32))>>8))) +# endif +#endif + +static inline unsigned int +nlz_int(unsigned int x) +{ +#if defined(HAVE_BUILTIN___BUILTIN_CLZ) + if (x == 0) return SIZEOF_INT * CHAR_BIT; + return (unsigned int)__builtin_clz(x); +#else + unsigned int y; +# if 64 < SIZEOF_INT * CHAR_BIT + unsigned int n = 128; +# elif 32 < SIZEOF_INT * CHAR_BIT + unsigned int n = 64; +# else + unsigned int n = 32; +# endif +# if 64 < SIZEOF_INT * CHAR_BIT + y = x >> 64; if (y) {n -= 64; x = y;} +# endif +# if 32 < SIZEOF_INT * CHAR_BIT + y = x >> 32; if (y) {n -= 32; x = y;} +# endif + y = x >> 16; if (y) {n -= 16; x = y;} + y = x >> 8; if (y) {n -= 8; x = y;} + y = x >> 4; if (y) {n -= 4; x = y;} + y = x >> 2; if (y) {n -= 2; x = y;} + y = x >> 1; if (y) {return n - 2;} + return (unsigned int)(n - x); +#endif +} + +static inline unsigned int +nlz_long(unsigned long x) +{ +#if defined(HAVE_BUILTIN___BUILTIN_CLZL) + if (x == 0) return SIZEOF_LONG * CHAR_BIT; + return (unsigned int)__builtin_clzl(x); +#else + unsigned long y; +# if 64 < SIZEOF_LONG * CHAR_BIT + unsigned int n = 128; +# elif 32 < SIZEOF_LONG * CHAR_BIT + unsigned int n = 64; +# else + unsigned int n = 32; +# endif +# if 64 < SIZEOF_LONG * CHAR_BIT + y = x >> 64; if (y) {n -= 64; x = y;} +# endif +# if 32 < SIZEOF_LONG * CHAR_BIT + y = x >> 32; if (y) {n -= 32; x = y;} +# endif + y = x >> 16; if (y) {n -= 16; x = y;} + y = x >> 8; if (y) {n -= 8; x = y;} + y = x >> 4; if (y) {n -= 4; x = y;} + y = x >> 2; if (y) {n -= 2; x = y;} + y = x >> 1; if (y) {return n - 2;} + return (unsigned int)(n - x); +#endif +} + +#ifdef HAVE_LONG_LONG +static inline unsigned int +nlz_long_long(unsigned LONG_LONG x) +{ +#if defined(HAVE_BUILTIN___BUILTIN_CLZLL) + if (x == 0) return SIZEOF_LONG_LONG * CHAR_BIT; + return (unsigned int)__builtin_clzll(x); +#else + unsigned LONG_LONG y; +# if 64 < SIZEOF_LONG_LONG * CHAR_BIT + unsigned int n = 128; +# elif 32 < SIZEOF_LONG_LONG * CHAR_BIT + unsigned int n = 64; +# else + unsigned int n = 32; +# endif +# if 64 < SIZEOF_LONG_LONG * CHAR_BIT + y = x >> 64; if (y) {n -= 64; x = y;} +# endif +# if 32 < SIZEOF_LONG_LONG * CHAR_BIT + y = x >> 32; if (y) {n -= 32; x = y;} +# endif + y = x >> 16; if (y) {n -= 16; x = y;} + y = x >> 8; if (y) {n -= 8; x = y;} + y = x >> 4; if (y) {n -= 4; x = y;} + y = x >> 2; if (y) {n -= 2; x = y;} + y = x >> 1; if (y) {return n - 2;} + return (unsigned int)(n - x); +#endif +} +#endif + +#ifdef HAVE_UINT128_T +static inline unsigned int +nlz_int128(uint128_t x) +{ + uint128_t y; + unsigned int n = 128; + y = x >> 64; if (y) {n -= 64; x = y;} + y = x >> 32; if (y) {n -= 32; x = y;} + y = x >> 16; if (y) {n -= 16; x = y;} + y = x >> 8; if (y) {n -= 8; x = y;} + y = x >> 4; if (y) {n -= 4; x = y;} + y = x >> 2; if (y) {n -= 2; x = y;} + y = x >> 1; if (y) {return n - 2;} + return (unsigned int)(n - x); +} +#endif + +static inline unsigned int +nlz_intptr(uintptr_t x) +{ +#if SIZEOF_VOIDP == 8 + return nlz_long_long(x); +#elif SIZEOF_VOIDP == 4 + return nlz_int(x); +#endif +} + +static inline unsigned int +rb_popcount32(uint32_t x) +{ +#ifdef HAVE_BUILTIN___BUILTIN_POPCOUNT + return (unsigned int)__builtin_popcount(x); +#else + x = (x & 0x55555555) + (x >> 1 & 0x55555555); + x = (x & 0x33333333) + (x >> 2 & 0x33333333); + x = (x & 0x0f0f0f0f) + (x >> 4 & 0x0f0f0f0f); + x = (x & 0x001f001f) + (x >> 8 & 0x001f001f); + return (x & 0x0000003f) + (x >>16 & 0x0000003f); +#endif +} + +static inline int +rb_popcount64(uint64_t x) +{ +#ifdef HAVE_BUILTIN___BUILTIN_POPCOUNT + return __builtin_popcountll(x); +#else + x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555); + x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333); + x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707); + x = (x & 0x001f001f001f001f) + (x >> 8 & 0x001f001f001f001f); + x = (x & 0x0000003f0000003f) + (x >>16 & 0x0000003f0000003f); + return (x & 0x7f) + (x >>32 & 0x7f); +#endif +} + +static inline int +rb_popcount_intptr(uintptr_t x) +{ +#if SIZEOF_VOIDP == 8 + return rb_popcount64(x); +#elif SIZEOF_VOIDP == 4 + return rb_popcount32(x); +#endif +} + +static inline int +ntz_int32(uint32_t x) +{ +#ifdef HAVE_BUILTIN___BUILTIN_CTZ + return __builtin_ctz(x); +#else + return rb_popcount32((~x) & (x-1)); +#endif +} + +static inline int +ntz_int64(uint64_t x) +{ +#ifdef HAVE_BUILTIN___BUILTIN_CTZLL + return __builtin_ctzll(x); +#else + return rb_popcount64((~x) & (x-1)); +#endif +} + +static inline int +ntz_intptr(uintptr_t x) +{ +#if SIZEOF_VOIDP == 8 + return ntz_int64(x); +#elif SIZEOF_VOIDP == 4 + return ntz_int32(x); +#endif +} + +#if HAVE_LONG_LONG && SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG +# define DLONG LONG_LONG +# define DL2NUM(x) LL2NUM(x) +#elif defined(HAVE_INT128_T) +# define DLONG int128_t +# define DL2NUM(x) (RB_FIXABLE(x) ? LONG2FIX(x) : rb_int128t2big(x)) +VALUE rb_int128t2big(int128_t n); +#endif + +static inline long +rb_overflowed_fix_to_int(long x) +{ + return (long)((unsigned long)(x >> 1) ^ (1LU << (SIZEOF_LONG * CHAR_BIT - 1))); +} + +static inline VALUE +rb_fix_plus_fix(VALUE x, VALUE y) +{ +#ifdef HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW + long lz; + /* NOTE + * (1) `LONG2FIX(FIX2LONG(x)+FIX2LONG(y))` + + = `((lx*2+1)/2 + (ly*2+1)/2)*2+1` + + = `lx*2 + ly*2 + 1` + + = `(lx*2+1) + (ly*2+1) - 1` + + = `x + y - 1` + * (2) Fixnum's LSB is always 1. + * It means you can always run `x - 1` without overflow. + * (3) Of course `z = x + (y-1)` may overflow. + * At that time true value is + * * positive: 0b0 1xxx...1, and z = 0b1xxx...1 + * * nevative: 0b1 0xxx...1, and z = 0b0xxx...1 + * To convert this true value to long, + * (a) Use arithmetic shift + * * positive: 0b11xxx... + * * negative: 0b00xxx... + * (b) invert MSB + * * positive: 0b01xxx... + * * negative: 0b10xxx... + */ + if (__builtin_add_overflow((long)x, (long)y-1, &lz)) { + return rb_int2big(rb_overflowed_fix_to_int(lz)); + } + else { + return (VALUE)lz; + } +#else + long lz = FIX2LONG(x) + FIX2LONG(y); + return LONG2NUM(lz); +#endif +} + +static inline VALUE +rb_fix_minus_fix(VALUE x, VALUE y) +{ +#ifdef HAVE_BUILTIN___BUILTIN_SUB_OVERFLOW + long lz; + if (__builtin_sub_overflow((long)x, (long)y-1, &lz)) { + return rb_int2big(rb_overflowed_fix_to_int(lz)); + } + else { + return (VALUE)lz; + } +#else + long lz = FIX2LONG(x) - FIX2LONG(y); + return LONG2NUM(lz); +#endif +} + +/* arguments must be Fixnum */ +static inline VALUE +rb_fix_mul_fix(VALUE x, VALUE y) +{ + long lx = FIX2LONG(x); + long ly = FIX2LONG(y); +#ifdef DLONG + return DL2NUM((DLONG)lx * (DLONG)ly); +#else + if (MUL_OVERFLOW_FIXNUM_P(lx, ly)) { + return rb_big_mul(rb_int2big(lx), rb_int2big(ly)); + } + else { + return LONG2FIX(lx * ly); + } +#endif +} + +/* + * This behaves different from C99 for negative arguments. + * Note that div may overflow fixnum. + */ +static inline void +rb_fix_divmod_fix(VALUE a, VALUE b, VALUE *divp, VALUE *modp) +{ + /* assume / and % comply C99. + * ldiv(3) won't be inlined by GCC and clang. + * I expect / and % are compiled as single idiv. + */ + long x = FIX2LONG(a); + long y = FIX2LONG(b); + long div, mod; + if (x == FIXNUM_MIN && y == -1) { + if (divp) *divp = LONG2NUM(-FIXNUM_MIN); + if (modp) *modp = LONG2FIX(0); + return; + } + div = x / y; + mod = x % y; + if (y > 0 ? mod < 0 : mod > 0) { + mod += y; + div -= 1; + } + if (divp) *divp = LONG2FIX(div); + if (modp) *modp = LONG2FIX(mod); +} + +/* div() for Ruby + * This behaves different from C99 for negative arguments. + */ +static inline VALUE +rb_fix_div_fix(VALUE x, VALUE y) +{ + VALUE div; + rb_fix_divmod_fix(x, y, &div, NULL); + return div; +} + +/* mod() for Ruby + * This behaves different from C99 for negative arguments. + */ +static inline VALUE +rb_fix_mod_fix(VALUE x, VALUE y) +{ + VALUE mod; + rb_fix_divmod_fix(x, y, NULL, &mod); + return mod; +} + +#if defined(HAVE_UINT128_T) +# define bit_length(x) \ + (unsigned int) \ + (sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \ + sizeof(x) <= SIZEOF_LONG ? SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)) : \ + sizeof(x) <= SIZEOF_LONG_LONG ? SIZEOF_LONG_LONG * CHAR_BIT - nlz_long_long((unsigned LONG_LONG)(x)) : \ + SIZEOF_INT128_T * CHAR_BIT - nlz_int128((uint128_t)(x))) +#elif defined(HAVE_LONG_LONG) +# define bit_length(x) \ + (unsigned int) \ + (sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \ + sizeof(x) <= SIZEOF_LONG ? SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)) : \ + SIZEOF_LONG_LONG * CHAR_BIT - nlz_long_long((unsigned LONG_LONG)(x))) +#else +# define bit_length(x) \ + (unsigned int) \ + (sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \ + SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x))) +#endif + +#ifndef BDIGIT +# if SIZEOF_INT*2 <= SIZEOF_LONG_LONG +# define BDIGIT unsigned int +# define SIZEOF_BDIGIT SIZEOF_INT +# define BDIGIT_DBL unsigned LONG_LONG +# define BDIGIT_DBL_SIGNED LONG_LONG +# define PRI_BDIGIT_PREFIX "" +# define PRI_BDIGIT_DBL_PREFIX PRI_LL_PREFIX +# elif SIZEOF_INT*2 <= SIZEOF_LONG +# define BDIGIT unsigned int +# define SIZEOF_BDIGIT SIZEOF_INT +# define BDIGIT_DBL unsigned long +# define BDIGIT_DBL_SIGNED long +# define PRI_BDIGIT_PREFIX "" +# define PRI_BDIGIT_DBL_PREFIX "l" +# elif SIZEOF_SHORT*2 <= SIZEOF_LONG +# define BDIGIT unsigned short +# define SIZEOF_BDIGIT SIZEOF_SHORT +# define BDIGIT_DBL unsigned long +# define BDIGIT_DBL_SIGNED long +# define PRI_BDIGIT_PREFIX "h" +# define PRI_BDIGIT_DBL_PREFIX "l" +# else +# define BDIGIT unsigned short +# define SIZEOF_BDIGIT (SIZEOF_LONG/2) +# define SIZEOF_ACTUAL_BDIGIT SIZEOF_LONG +# define BDIGIT_DBL unsigned long +# define BDIGIT_DBL_SIGNED long +# define PRI_BDIGIT_PREFIX "h" +# define PRI_BDIGIT_DBL_PREFIX "l" +# endif +#endif +#ifndef SIZEOF_ACTUAL_BDIGIT +# define SIZEOF_ACTUAL_BDIGIT SIZEOF_BDIGIT +#endif + +#ifdef PRI_BDIGIT_PREFIX +# define PRIdBDIGIT PRI_BDIGIT_PREFIX"d" +# define PRIiBDIGIT PRI_BDIGIT_PREFIX"i" +# define PRIoBDIGIT PRI_BDIGIT_PREFIX"o" +# define PRIuBDIGIT PRI_BDIGIT_PREFIX"u" +# define PRIxBDIGIT PRI_BDIGIT_PREFIX"x" +# define PRIXBDIGIT PRI_BDIGIT_PREFIX"X" +#endif + +#ifdef PRI_BDIGIT_DBL_PREFIX +# define PRIdBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"d" +# define PRIiBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"i" +# define PRIoBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"o" +# define PRIuBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"u" +# define PRIxBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"x" +# define PRIXBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"X" +#endif + +#define BIGNUM_EMBED_LEN_NUMBITS 3 +#ifndef BIGNUM_EMBED_LEN_MAX +# if (SIZEOF_VALUE*3/SIZEOF_ACTUAL_BDIGIT) < (1 << BIGNUM_EMBED_LEN_NUMBITS)-1 +# define BIGNUM_EMBED_LEN_MAX (SIZEOF_VALUE*3/SIZEOF_ACTUAL_BDIGIT) +# else +# define BIGNUM_EMBED_LEN_MAX ((1 << BIGNUM_EMBED_LEN_NUMBITS)-1) +# endif +#endif + +struct RBignum { + struct RBasic basic; + union { + struct { + size_t len; + BDIGIT *digits; + } heap; + BDIGIT ary[BIGNUM_EMBED_LEN_MAX]; + } as; +}; +#define BIGNUM_SIGN_BIT ((VALUE)FL_USER1) +/* sign: positive:1, negative:0 */ +#define BIGNUM_SIGN(b) ((RBASIC(b)->flags & BIGNUM_SIGN_BIT) != 0) +#define BIGNUM_SET_SIGN(b,sign) \ + ((sign) ? (RBASIC(b)->flags |= BIGNUM_SIGN_BIT) \ + : (RBASIC(b)->flags &= ~BIGNUM_SIGN_BIT)) +#define BIGNUM_POSITIVE_P(b) BIGNUM_SIGN(b) +#define BIGNUM_NEGATIVE_P(b) (!BIGNUM_SIGN(b)) +#define BIGNUM_NEGATE(b) (RBASIC(b)->flags ^= BIGNUM_SIGN_BIT) + +#define BIGNUM_EMBED_FLAG ((VALUE)FL_USER2) +#define BIGNUM_EMBED_LEN_MASK ((VALUE)(FL_USER5|FL_USER4|FL_USER3)) +#define BIGNUM_EMBED_LEN_SHIFT (FL_USHIFT+BIGNUM_EMBED_LEN_NUMBITS) +#define BIGNUM_LEN(b) \ + ((RBASIC(b)->flags & BIGNUM_EMBED_FLAG) ? \ + (size_t)((RBASIC(b)->flags >> BIGNUM_EMBED_LEN_SHIFT) & \ + (BIGNUM_EMBED_LEN_MASK >> BIGNUM_EMBED_LEN_SHIFT)) : \ + RBIGNUM(b)->as.heap.len) +/* LSB:BIGNUM_DIGITS(b)[0], MSB:BIGNUM_DIGITS(b)[BIGNUM_LEN(b)-1] */ +#define BIGNUM_DIGITS(b) \ + ((RBASIC(b)->flags & BIGNUM_EMBED_FLAG) ? \ + RBIGNUM(b)->as.ary : \ + RBIGNUM(b)->as.heap.digits) +#define BIGNUM_LENINT(b) rb_long2int(BIGNUM_LEN(b)) + +#define RBIGNUM(obj) (R_CAST(RBignum)(obj)) + +struct RRational { + struct RBasic basic; + const VALUE num; + const VALUE den; +}; + +#define RRATIONAL(obj) (R_CAST(RRational)(obj)) +#define RRATIONAL_SET_NUM(rat, n) RB_OBJ_WRITE((rat), &((struct RRational *)(rat))->num,(n)) +#define RRATIONAL_SET_DEN(rat, d) RB_OBJ_WRITE((rat), &((struct RRational *)(rat))->den,(d)) + +struct RFloat { + struct RBasic basic; + double float_value; +}; + +#define RFLOAT(obj) (R_CAST(RFloat)(obj)) + +struct RComplex { + struct RBasic basic; + const VALUE real; + const VALUE imag; +}; + +#define RCOMPLEX(obj) (R_CAST(RComplex)(obj)) + +#ifdef RCOMPLEX_SET_REAL /* shortcut macro for internal only */ +#undef RCOMPLEX_SET_REAL +#undef RCOMPLEX_SET_IMAG +#define RCOMPLEX_SET_REAL(cmp, r) RB_OBJ_WRITE((cmp), &((struct RComplex *)(cmp))->real,(r)) +#define RCOMPLEX_SET_IMAG(cmp, i) RB_OBJ_WRITE((cmp), &((struct RComplex *)(cmp))->imag,(i)) +#endif + +struct RHash { + struct RBasic basic; + struct st_table *ntbl; /* possibly 0 */ + int iter_lev; + const VALUE ifnone; +}; + +#define RHASH(obj) (R_CAST(RHash)(obj)) + +#ifdef RHASH_ITER_LEV +#undef RHASH_ITER_LEV +#undef RHASH_IFNONE +#undef RHASH_SIZE +#define RHASH_ITER_LEV(h) (RHASH(h)->iter_lev) +#define RHASH_IFNONE(h) (RHASH(h)->ifnone) +#define RHASH_SIZE(h) (RHASH(h)->ntbl ? RHASH(h)->ntbl->num_entries : (st_index_t)0) +#endif + +/* missing/setproctitle.c */ +#ifndef HAVE_SETPROCTITLE +extern void ruby_init_setproctitle(int argc, char *argv[]); +#endif + +#define RSTRUCT_EMBED_LEN_MAX RSTRUCT_EMBED_LEN_MAX +#define RSTRUCT_EMBED_LEN_MASK RSTRUCT_EMBED_LEN_MASK +#define RSTRUCT_EMBED_LEN_SHIFT RSTRUCT_EMBED_LEN_SHIFT +enum { + RSTRUCT_EMBED_LEN_MAX = 3, + RSTRUCT_EMBED_LEN_MASK = (RUBY_FL_USER2|RUBY_FL_USER1), + RSTRUCT_EMBED_LEN_SHIFT = (RUBY_FL_USHIFT+1), + + RSTRUCT_ENUM_END +}; + +struct RStruct { + struct RBasic basic; + union { + struct { + long len; + const VALUE *ptr; + } heap; + const VALUE ary[RSTRUCT_EMBED_LEN_MAX]; + } as; +}; + +#undef RSTRUCT_LEN +#undef RSTRUCT_PTR +#undef RSTRUCT_SET +#undef RSTRUCT_GET +#define RSTRUCT_EMBED_LEN(st) \ + (long)((RBASIC(st)->flags >> RSTRUCT_EMBED_LEN_SHIFT) & \ + (RSTRUCT_EMBED_LEN_MASK >> RSTRUCT_EMBED_LEN_SHIFT)) +#define RSTRUCT_LEN(st) rb_struct_len(st) +#define RSTRUCT_LENINT(st) rb_long2int(RSTRUCT_LEN(st)) +#define RSTRUCT_CONST_PTR(st) rb_struct_const_ptr(st) +#define RSTRUCT_PTR(st) ((VALUE *)RSTRUCT_CONST_PTR(RB_OBJ_WB_UNPROTECT_FOR(STRUCT, st))) +#define RSTRUCT_SET(st, idx, v) RB_OBJ_WRITE(st, &RSTRUCT_CONST_PTR(st)[idx], (v)) +#define RSTRUCT_GET(st, idx) (RSTRUCT_CONST_PTR(st)[idx]) +#define RSTRUCT(obj) (R_CAST(RStruct)(obj)) + +static inline long +rb_struct_len(VALUE st) +{ + return (RBASIC(st)->flags & RSTRUCT_EMBED_LEN_MASK) ? + RSTRUCT_EMBED_LEN(st) : RSTRUCT(st)->as.heap.len; +} + +static inline const VALUE * +rb_struct_const_ptr(VALUE st) +{ + return FIX_CONST_VALUE_PTR((RBASIC(st)->flags & RSTRUCT_EMBED_LEN_MASK) ? + RSTRUCT(st)->as.ary : RSTRUCT(st)->as.heap.ptr); +} + +/* class.c */ + +struct rb_deprecated_classext_struct { + char conflict[sizeof(VALUE) * 3]; +}; + +struct rb_subclass_entry; +typedef struct rb_subclass_entry rb_subclass_entry_t; + +struct rb_subclass_entry { + VALUE klass; + rb_subclass_entry_t *next; +}; + +#if defined(HAVE_LONG_LONG) +typedef unsigned LONG_LONG rb_serial_t; +#define SERIALT2NUM ULL2NUM +#elif defined(HAVE_UINT64_T) +typedef uint64_t rb_serial_t; +#define SERIALT2NUM SIZET2NUM +#else +typedef unsigned long rb_serial_t; +#define SERIALT2NUM ULONG2NUM +#endif + +struct rb_classext_struct { + struct st_table *iv_index_tbl; + struct st_table *iv_tbl; + struct rb_id_table *const_tbl; + struct rb_id_table *callable_m_tbl; + rb_subclass_entry_t *subclasses; + rb_subclass_entry_t **parent_subclasses; + /** + * In the case that this is an `ICLASS`, `module_subclasses` points to the link + * in the module's `subclasses` list that indicates that the klass has been + * included. Hopefully that makes sense. + */ + rb_subclass_entry_t **module_subclasses; + rb_serial_t class_serial; + const VALUE origin_; + VALUE refined_class; + rb_alloc_func_t allocator; +}; + +typedef struct rb_classext_struct rb_classext_t; + +#undef RClass +struct RClass { + struct RBasic basic; + VALUE super; + rb_classext_t *ptr; + struct rb_id_table *m_tbl; +}; + +void rb_class_subclass_add(VALUE super, VALUE klass); +void rb_class_remove_from_super_subclasses(VALUE); +int rb_singleton_class_internal_p(VALUE sklass); + +#define RCLASS_EXT(c) (RCLASS(c)->ptr) +#define RCLASS_IV_TBL(c) (RCLASS_EXT(c)->iv_tbl) +#define RCLASS_CONST_TBL(c) (RCLASS_EXT(c)->const_tbl) +#define RCLASS_M_TBL(c) (RCLASS(c)->m_tbl) +#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl) +#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl) +#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_) +#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class) +#define RCLASS_SERIAL(c) (RCLASS_EXT(c)->class_serial) + +#define RICLASS_IS_ORIGIN FL_USER5 + +static inline void +RCLASS_SET_ORIGIN(VALUE klass, VALUE origin) +{ + RB_OBJ_WRITE(klass, &RCLASS_ORIGIN(klass), origin); + if (klass != origin) FL_SET(origin, RICLASS_IS_ORIGIN); +} + +#undef RCLASS_SUPER +static inline VALUE +RCLASS_SUPER(VALUE klass) +{ + return RCLASS(klass)->super; +} + +static inline VALUE +RCLASS_SET_SUPER(VALUE klass, VALUE super) +{ + if (super) { + rb_class_remove_from_super_subclasses(klass); + rb_class_subclass_add(super, klass); + } + RB_OBJ_WRITE(klass, &RCLASS(klass)->super, super); + return super; +} +/* IMEMO: Internal memo object */ + +#ifndef IMEMO_DEBUG +#define IMEMO_DEBUG 0 +#endif + +struct RIMemo { + VALUE flags; + VALUE v0; + VALUE v1; + VALUE v2; + VALUE v3; +}; + +enum imemo_type { + imemo_env = 0, + imemo_cref = 1, /*!< class reference */ + imemo_svar = 2, /*!< special variable */ + imemo_throw_data = 3, + imemo_ifunc = 4, /*!< iterator function */ + imemo_memo = 5, + imemo_ment = 6, + imemo_iseq = 7, + imemo_alloc = 8, + imemo_ast = 9, + imemo_parser_strterm = 10 +}; +#define IMEMO_MASK 0x0f + +static inline enum imemo_type +imemo_type(VALUE imemo) +{ + return (RBASIC(imemo)->flags >> FL_USHIFT) & IMEMO_MASK; +} + +static inline int +imemo_type_p(VALUE imemo, enum imemo_type imemo_type) +{ + if (LIKELY(!RB_SPECIAL_CONST_P(imemo))) { + /* fixed at compile time if imemo_type is given. */ + const VALUE mask = (IMEMO_MASK << FL_USHIFT) | RUBY_T_MASK; + const VALUE expected_type = (imemo_type << FL_USHIFT) | T_IMEMO; + /* fixed at runtime. */ + return expected_type == (RBASIC(imemo)->flags & mask); + } + else { + return 0; + } +} + +/* FL_USER0 to FL_USER3 is for type */ +#define IMEMO_FL_USHIFT (FL_USHIFT + 4) +#define IMEMO_FL_USER0 FL_USER4 +#define IMEMO_FL_USER1 FL_USER5 +#define IMEMO_FL_USER2 FL_USER6 +#define IMEMO_FL_USER3 FL_USER7 +#define IMEMO_FL_USER4 FL_USER8 + +/* CREF (Class REFerence) is defined in method.h */ + +/*! SVAR (Special VARiable) */ +struct vm_svar { + VALUE flags; + const VALUE cref_or_me; /*!< class reference or rb_method_entry_t */ + const VALUE lastline; + const VALUE backref; + const VALUE others; +}; + + +#define THROW_DATA_CONSUMED IMEMO_FL_USER0 + +/*! THROW_DATA */ +struct vm_throw_data { + VALUE flags; + VALUE reserved; + const VALUE throw_obj; + const struct rb_control_frame_struct *catch_frame; + VALUE throw_state; +}; + +#define THROW_DATA_P(err) RB_TYPE_P((VALUE)(err), T_IMEMO) + +/* IFUNC (Internal FUNCtion) */ + +struct vm_ifunc_argc { +#if SIZEOF_INT * 2 > SIZEOF_VALUE + signed int min: (SIZEOF_VALUE * CHAR_BIT) / 2; + signed int max: (SIZEOF_VALUE * CHAR_BIT) / 2; +#else + int min, max; +#endif +}; + +/*! IFUNC (Internal FUNCtion) */ +struct vm_ifunc { + VALUE flags; + VALUE reserved; + VALUE (*func)(ANYARGS); + const void *data; + struct vm_ifunc_argc argc; +}; + +#define IFUNC_NEW(a, b, c) ((struct vm_ifunc *)rb_imemo_new(imemo_ifunc, (VALUE)(a), (VALUE)(b), (VALUE)(c), 0)) +struct vm_ifunc *rb_vm_ifunc_new(VALUE (*func)(ANYARGS), const void *data, int min_argc, int max_argc); +static inline struct vm_ifunc * +rb_vm_ifunc_proc_new(VALUE (*func)(ANYARGS), const void *data) +{ + return rb_vm_ifunc_new(func, data, 0, UNLIMITED_ARGUMENTS); +} + +typedef struct rb_imemo_alloc_struct { + VALUE flags; + VALUE reserved; + VALUE *ptr; /* malloc'ed buffer */ + struct rb_imemo_alloc_struct *next; /* next imemo */ + size_t cnt; /* buffer size in VALUE */ +} rb_imemo_alloc_t; + +rb_imemo_alloc_t *rb_imemo_alloc_new(VALUE, VALUE, VALUE, VALUE); + +void rb_strterm_mark(VALUE obj); + +/*! MEMO + * + * @see imemo_type + * */ +struct MEMO { + VALUE flags; + VALUE reserved; + const VALUE v1; + const VALUE v2; + union { + long cnt; + long state; + const VALUE value; + VALUE (*func)(ANYARGS); + } u3; +}; + +#define MEMO_V1_SET(m, v) RB_OBJ_WRITE((m), &(m)->v1, (v)) +#define MEMO_V2_SET(m, v) RB_OBJ_WRITE((m), &(m)->v2, (v)) + +#define MEMO_CAST(m) ((struct MEMO *)m) + +#define MEMO_NEW(a, b, c) ((struct MEMO *)rb_imemo_new(imemo_memo, (VALUE)(a), (VALUE)(b), (VALUE)(c), 0)) + +#define roomof(x, y) (((x) + (y) - 1) / (y)) +#define type_roomof(x, y) roomof(sizeof(x), sizeof(y)) +#define MEMO_FOR(type, value) ((type *)RARRAY_PTR(value)) +#define NEW_MEMO_FOR(type, value) \ + ((value) = rb_ary_tmp_new_fill(type_roomof(type, VALUE)), MEMO_FOR(type, value)) +#define NEW_PARTIAL_MEMO_FOR(type, value, member) \ + ((value) = rb_ary_tmp_new_fill(type_roomof(type, VALUE)), \ + rb_ary_set_len((value), offsetof(type, member) / sizeof(VALUE)), \ + MEMO_FOR(type, value)) + +#define STRING_P(s) (RB_TYPE_P((s), T_STRING) && CLASS_OF(s) == rb_cString) + +#ifdef RUBY_INTEGER_UNIFICATION +# define rb_cFixnum rb_cInteger +# define rb_cBignum rb_cInteger +#endif + +enum { + cmp_opt_Fixnum, + cmp_opt_String, + cmp_opt_Float, + cmp_optimizable_count +}; + +struct cmp_opt_data { + unsigned int opt_methods; + unsigned int opt_inited; +}; + +#define NEW_CMP_OPT_MEMO(type, value) \ + NEW_PARTIAL_MEMO_FOR(type, value, cmp_opt) +#define CMP_OPTIMIZABLE_BIT(type) (1U << TOKEN_PASTE(cmp_opt_,type)) +#define CMP_OPTIMIZABLE(data, type) \ + (((data).opt_inited & CMP_OPTIMIZABLE_BIT(type)) ? \ + ((data).opt_methods & CMP_OPTIMIZABLE_BIT(type)) : \ + (((data).opt_inited |= CMP_OPTIMIZABLE_BIT(type)), \ + rb_method_basic_definition_p(TOKEN_PASTE(rb_c,type), id_cmp) && \ + ((data).opt_methods |= CMP_OPTIMIZABLE_BIT(type)))) + +#define OPTIMIZED_CMP(a, b, data) \ + ((FIXNUM_P(a) && FIXNUM_P(b) && CMP_OPTIMIZABLE(data, Fixnum)) ? \ + (((long)a > (long)b) ? 1 : ((long)a < (long)b) ? -1 : 0) : \ + (STRING_P(a) && STRING_P(b) && CMP_OPTIMIZABLE(data, String)) ? \ + rb_str_cmp(a, b) : \ + (RB_FLOAT_TYPE_P(a) && RB_FLOAT_TYPE_P(b) && CMP_OPTIMIZABLE(data, Float)) ? \ + rb_float_cmp(a, b) : \ + rb_cmpint(rb_funcallv(a, id_cmp, 1, &b), a, b)) + +/* ment is in method.h */ + +/* global variable */ + +struct rb_global_entry { + struct rb_global_variable *var; + ID id; +}; + +struct rb_global_entry *rb_global_entry(ID); +VALUE rb_gvar_get(struct rb_global_entry *); +VALUE rb_gvar_set(struct rb_global_entry *, VALUE); +VALUE rb_gvar_defined(struct rb_global_entry *); + +struct vtm; /* defined by timev.h */ + +/* array.c */ +VALUE rb_ary_last(int, const VALUE *, VALUE); +void rb_ary_set_len(VALUE, long); +void rb_ary_delete_same(VALUE, VALUE); +VALUE rb_ary_tmp_new_fill(long capa); +VALUE rb_ary_at(VALUE, VALUE); +VALUE rb_ary_aref1(VALUE ary, VALUE i); +VALUE rb_ary_aref2(VALUE ary, VALUE b, VALUE e); +size_t rb_ary_memsize(VALUE); +VALUE rb_to_array_type(VALUE obj); +#ifdef __GNUC__ +#define rb_ary_new_from_args(n, ...) \ + __extension__ ({ \ + const VALUE args_to_new_ary[] = {__VA_ARGS__}; \ + if (__builtin_constant_p(n)) { \ + STATIC_ASSERT(rb_ary_new_from_args, numberof(args_to_new_ary) == (n)); \ + } \ + rb_ary_new_from_values(numberof(args_to_new_ary), args_to_new_ary); \ + }) +#endif + +/* bignum.c */ +extern const char ruby_digitmap[]; +double rb_big_fdiv_double(VALUE x, VALUE y); +VALUE rb_big_uminus(VALUE x); +VALUE rb_big_hash(VALUE); +VALUE rb_big_odd_p(VALUE); +VALUE rb_big_even_p(VALUE); +size_t rb_big_size(VALUE); +VALUE rb_integer_float_cmp(VALUE x, VALUE y); +VALUE rb_integer_float_eq(VALUE x, VALUE y); +VALUE rb_cstr_parse_inum(const char *str, ssize_t len, char **endp, int base); +VALUE rb_big_comp(VALUE x); +VALUE rb_big_aref(VALUE x, VALUE y); +VALUE rb_big_abs(VALUE x); +VALUE rb_big_size_m(VALUE big); +VALUE rb_big_bit_length(VALUE big); +VALUE rb_big_remainder(VALUE x, VALUE y); +VALUE rb_big_gt(VALUE x, VALUE y); +VALUE rb_big_ge(VALUE x, VALUE y); +VALUE rb_big_lt(VALUE x, VALUE y); +VALUE rb_big_le(VALUE x, VALUE y); +VALUE rb_int_powm(int const argc, VALUE * const argv, VALUE const num); + +/* class.c */ +VALUE rb_class_boot(VALUE); +VALUE rb_class_inherited(VALUE, VALUE); +VALUE rb_make_metaclass(VALUE, VALUE); +VALUE rb_include_class_new(VALUE, VALUE); +void rb_class_foreach_subclass(VALUE klass, void (*f)(VALUE, VALUE), VALUE); +void rb_class_detach_subclasses(VALUE); +void rb_class_detach_module_subclasses(VALUE); +void rb_class_remove_from_module_subclasses(VALUE); +VALUE rb_obj_methods(int argc, const VALUE *argv, VALUE obj); +VALUE rb_obj_protected_methods(int argc, const VALUE *argv, VALUE obj); +VALUE rb_obj_private_methods(int argc, const VALUE *argv, VALUE obj); +VALUE rb_obj_public_methods(int argc, const VALUE *argv, VALUE obj); +VALUE rb_special_singleton_class(VALUE); +VALUE rb_singleton_class_clone_and_attach(VALUE obj, VALUE attach); +VALUE rb_singleton_class_get(VALUE obj); +void Init_class_hierarchy(void); + +int rb_class_has_methods(VALUE c); +void rb_undef_methods_from(VALUE klass, VALUE super); + +/* compar.c */ +VALUE rb_invcmp(VALUE, VALUE); + +/* compile.c */ +struct rb_block; +int rb_dvar_defined(ID, const struct rb_block *); +int rb_local_defined(ID, const struct rb_block *); +const char * rb_insns_name(int i); +VALUE rb_insns_name_array(void); + +/* complex.c */ +VALUE rb_complex_plus(VALUE, VALUE); +VALUE rb_complex_mul(VALUE, VALUE); +VALUE rb_complex_abs(VALUE x); +VALUE rb_complex_sqrt(VALUE x); + +/* cont.c */ +VALUE rb_obj_is_fiber(VALUE); +void rb_fiber_reset_root_local_storage(VALUE); +void ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(ANYARGS), VALUE (*rollback_func)(ANYARGS)); + +/* debug.c */ +PRINTF_ARGS(void ruby_debug_printf(const char*, ...), 1, 2); + +/* dir.c */ +VALUE rb_dir_getwd_ospath(void); + +/* dmyext.c */ +void Init_enc(void); +void Init_ext(void); + +/* encoding.c */ +ID rb_id_encoding(void); +void rb_gc_mark_encodings(void); +rb_encoding *rb_enc_get_from_index(int index); +rb_encoding *rb_enc_check_str(VALUE str1, VALUE str2); +int rb_encdb_replicate(const char *alias, const char *orig); +int rb_encdb_alias(const char *alias, const char *orig); +int rb_encdb_dummy(const char *name); +void rb_encdb_declare(const char *name); +void rb_enc_set_base(const char *name, const char *orig); +int rb_enc_set_dummy(int index); +void rb_encdb_set_unicode(int index); +PUREFUNC(int rb_data_is_encoding(VALUE obj)); + +/* enum.c */ +VALUE rb_f_send(int argc, VALUE *argv, VALUE recv); +VALUE rb_nmin_run(VALUE obj, VALUE num, int by, int rev, int ary); + +/* error.c */ +extern VALUE rb_eEAGAIN; +extern VALUE rb_eEWOULDBLOCK; +extern VALUE rb_eEINPROGRESS; +void rb_report_bug_valist(VALUE file, int line, const char *fmt, va_list args); +VALUE rb_syntax_error_append(VALUE, VALUE, int, int, rb_encoding*, const char*, va_list); +VALUE rb_check_backtrace(VALUE); +NORETURN(void rb_async_bug_errno(const char *,int)); +const char *rb_builtin_type_name(int t); +const char *rb_builtin_class_name(VALUE x); +PRINTF_ARGS(void rb_sys_warn(const char *fmt, ...), 1, 2); +PRINTF_ARGS(void rb_syserr_warn(int err, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_enc_warn(rb_encoding *enc, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_sys_enc_warn(rb_encoding *enc, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_syserr_enc_warn(int err, rb_encoding *enc, const char *fmt, ...), 3, 4); +PRINTF_ARGS(void rb_sys_warning(const char *fmt, ...), 1, 2); +PRINTF_ARGS(void rb_syserr_warning(int err, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_enc_warning(rb_encoding *enc, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_sys_enc_warning(rb_encoding *enc, const char *fmt, ...), 2, 3); +PRINTF_ARGS(void rb_syserr_enc_warning(int err, rb_encoding *enc, const char *fmt, ...), 3, 4); + +#define rb_raise_cstr(etype, mesg) \ + rb_exc_raise(rb_exc_new_str(etype, rb_str_new_cstr(mesg))) +#define rb_raise_static(etype, mesg) \ + rb_exc_raise(rb_exc_new_str(etype, rb_str_new_static(mesg, rb_strlen_lit(mesg)))) + +VALUE rb_name_err_new(VALUE mesg, VALUE recv, VALUE method); +#define rb_name_err_raise_str(mesg, recv, name) \ + rb_exc_raise(rb_name_err_new(mesg, recv, name)) +#define rb_name_err_raise(mesg, recv, name) \ + rb_name_err_raise_str(rb_fstring_cstr(mesg), (recv), (name)) +VALUE rb_key_err_new(VALUE mesg, VALUE recv, VALUE name); +#define rb_key_err_raise(mesg, recv, name) \ + rb_exc_raise(rb_key_err_new(mesg, recv, name)) +NORETURN(void ruby_deprecated_internal_feature(const char *)); +#define DEPRECATED_INTERNAL_FEATURE(func) \ + (ruby_deprecated_internal_feature(func), UNREACHABLE) +VALUE rb_warning_warn(VALUE mod, VALUE str); +VALUE rb_warning_string(const char *fmt, ...); + +/* eval.c */ +VALUE rb_refinement_module_get_refined_class(VALUE module); + +/* eval_error.c */ +VALUE rb_get_backtrace(VALUE info); + +/* eval_jump.c */ +void rb_call_end_proc(VALUE data); +void rb_mark_end_proc(void); + +/* file.c */ +VALUE rb_home_dir_of(VALUE user, VALUE result); +VALUE rb_default_home_dir(VALUE result); +VALUE rb_realpath_internal(VALUE basedir, VALUE path, int strict); +VALUE rb_check_realpath(VALUE basedir, VALUE path); +void rb_file_const(const char*, VALUE); +int rb_file_load_ok(const char *); +VALUE rb_file_expand_path_fast(VALUE, VALUE); +VALUE rb_file_expand_path_internal(VALUE, VALUE, int, int, VALUE); +VALUE rb_get_path_check_to_string(VALUE, int); +VALUE rb_get_path_check_convert(VALUE, VALUE, int); +VALUE rb_get_path_check(VALUE, int); +void Init_File(void); +int ruby_is_fd_loadable(int fd); + +#ifdef RUBY_FUNCTION_NAME_STRING +# if defined __GNUC__ && __GNUC__ >= 4 +# pragma GCC visibility push(default) +# endif +NORETURN(void rb_sys_fail_path_in(const char *func_name, VALUE path)); +NORETURN(void rb_syserr_fail_path_in(const char *func_name, int err, VALUE path)); +# if defined __GNUC__ && __GNUC__ >= 4 +# pragma GCC visibility pop +# endif +# define rb_sys_fail_path(path) rb_sys_fail_path_in(RUBY_FUNCTION_NAME_STRING, path) +# define rb_syserr_fail_path(err, path) rb_syserr_fail_path_in(RUBY_FUNCTION_NAME_STRING, (err), (path)) +#else +# define rb_sys_fail_path(path) rb_sys_fail_str(path) +# define rb_syserr_fail_path(err, path) rb_syserr_fail_str((err), (path)) +#endif + +/* gc.c */ +extern VALUE *ruby_initial_gc_stress_ptr; +extern int ruby_disable_gc; +void Init_heap(void); +void *ruby_mimmalloc(size_t size); +void ruby_mimfree(void *ptr); +void rb_objspace_set_event_hook(const rb_event_flag_t event); +#if USE_RGENGC +void rb_gc_writebarrier_remember(VALUE obj); +#else +#define rb_gc_writebarrier_remember(obj) 0 +#endif +void ruby_gc_set_params(int safe_level); +void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj); + +#if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32) +#define ruby_sized_xrealloc(ptr, new_size, old_size) ruby_xrealloc(ptr, new_size) +#define ruby_sized_xrealloc2(ptr, new_count, element_size, old_count) ruby_xrealloc(ptr, new_count, element_size) +#define ruby_sized_xfree(ptr, size) ruby_xfree(ptr) +#define SIZED_REALLOC_N(var,type,n,old_n) REALLOC_N(var, type, n) +#else +void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_ALLOC_SIZE((2)); +void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_ALLOC_SIZE((2, 3)); +void ruby_sized_xfree(void *x, size_t size); +#define SIZED_REALLOC_N(var,type,n,old_n) ((var)=(type*)ruby_sized_xrealloc((char*)(var), (n) * sizeof(type), (old_n) * sizeof(type))) +#endif + +/* optimized version of NEWOBJ() */ +#undef NEWOBJF_OF +#undef RB_NEWOBJ_OF +#define RB_NEWOBJ_OF(obj,type,klass,flags) \ + type *(obj) = (type*)(((flags) & FL_WB_PROTECTED) ? \ + rb_wb_protected_newobj_of(klass, (flags) & ~FL_WB_PROTECTED) : \ + rb_wb_unprotected_newobj_of(klass, flags)) +#define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags) + +/* hash.c */ +struct st_table *rb_hash_tbl_raw(VALUE hash); +VALUE rb_hash_new_with_size(st_index_t size); +RUBY_SYMBOL_EXPORT_BEGIN +VALUE rb_hash_new_compare_by_id(void); +RUBY_SYMBOL_EXPORT_END +VALUE rb_hash_has_key(VALUE hash, VALUE key); +VALUE rb_hash_default_value(VALUE hash, VALUE key); +VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc); +long rb_objid_hash(st_index_t index); +long rb_dbl_long_hash(double d); +st_table *rb_init_identtable(void); +st_table *rb_init_identtable_with_size(st_index_t size); +VALUE rb_hash_compare_by_id_p(VALUE hash); +VALUE rb_to_hash_type(VALUE obj); + +#define RHASH_TBL_RAW(h) rb_hash_tbl_raw(h) +VALUE rb_hash_keys(VALUE hash); +VALUE rb_hash_values(VALUE hash); +VALUE rb_hash_rehash(VALUE hash); +int rb_hash_add_new_element(VALUE hash, VALUE key, VALUE val); +#define HASH_PROC_DEFAULT FL_USER2 + +/* inits.c */ +void rb_call_inits(void); + +/* io.c */ +const char *ruby_get_inplace_mode(void); +void ruby_set_inplace_mode(const char *); +ssize_t rb_io_bufread(VALUE io, void *buf, size_t size); +void rb_stdio_set_default_encoding(void); +VALUE rb_io_flush_raw(VALUE, int); +size_t rb_io_memsize(const rb_io_t *); +int rb_stderr_tty_p(void); + +/* load.c */ +VALUE rb_get_load_path(void); +VALUE rb_get_expanded_load_path(void); +int rb_require_internal(VALUE fname, int safe); +NORETURN(void rb_load_fail(VALUE, const char*)); + +/* loadpath.c */ +extern const char ruby_exec_prefix[]; +extern const char ruby_initial_load_paths[]; + +/* localeinit.c */ +int Init_enc_set_filesystem_encoding(void); + +/* math.c */ +VALUE rb_math_atan2(VALUE, VALUE); +VALUE rb_math_cos(VALUE); +VALUE rb_math_cosh(VALUE); +VALUE rb_math_exp(VALUE); +VALUE rb_math_hypot(VALUE, VALUE); +VALUE rb_math_log(int argc, const VALUE *argv); +VALUE rb_math_sin(VALUE); +VALUE rb_math_sinh(VALUE); +VALUE rb_math_sqrt(VALUE); + +/* newline.c */ +void Init_newline(void); + +/* numeric.c */ + +#define FIXNUM_POSITIVE_P(num) ((SIGNED_VALUE)(num) > (SIGNED_VALUE)INT2FIX(0)) +#define FIXNUM_NEGATIVE_P(num) ((SIGNED_VALUE)(num) < 0) +#define FIXNUM_ZERO_P(num) ((num) == INT2FIX(0)) + +#define INT_NEGATIVE_P(x) (FIXNUM_P(x) ? FIXNUM_NEGATIVE_P(x) : BIGNUM_NEGATIVE_P(x)) + +#ifndef ROUND_DEFAULT +# define ROUND_DEFAULT RUBY_NUM_ROUND_HALF_UP +#endif +enum ruby_num_rounding_mode { + RUBY_NUM_ROUND_HALF_UP, + RUBY_NUM_ROUND_HALF_EVEN, + RUBY_NUM_ROUND_HALF_DOWN, + RUBY_NUM_ROUND_DEFAULT = ROUND_DEFAULT +}; +#define ROUND_TO(mode, even, up, down) \ + ((mode) == RUBY_NUM_ROUND_HALF_EVEN ? even : \ + (mode) == RUBY_NUM_ROUND_HALF_UP ? up : down) +#define ROUND_FUNC(mode, name) \ + ROUND_TO(mode, name##_half_even, name##_half_up, name##_half_down) +#define ROUND_CALL(mode, name, args) \ + ROUND_TO(mode, name##_half_even args, \ + name##_half_up args, name##_half_down args) + +int rb_num_to_uint(VALUE val, unsigned int *ret); +VALUE ruby_num_interval_step_size(VALUE from, VALUE to, VALUE step, int excl); +int ruby_float_step(VALUE from, VALUE to, VALUE step, int excl); +double ruby_float_mod(double x, double y); +int rb_num_negative_p(VALUE); +VALUE rb_int_succ(VALUE num); +VALUE rb_int_pred(VALUE num); +VALUE rb_int_uminus(VALUE num); +VALUE rb_float_uminus(VALUE num); +VALUE rb_int_plus(VALUE x, VALUE y); +VALUE rb_int_minus(VALUE x, VALUE y); +VALUE rb_int_mul(VALUE x, VALUE y); +VALUE rb_int_idiv(VALUE x, VALUE y); +VALUE rb_int_modulo(VALUE x, VALUE y); +VALUE rb_int_round(VALUE num, int ndigits, enum ruby_num_rounding_mode mode); +VALUE rb_int2str(VALUE num, int base); +VALUE rb_dbl_hash(double d); +VALUE rb_fix_plus(VALUE x, VALUE y); +VALUE rb_int_gt(VALUE x, VALUE y); +int rb_float_cmp(VALUE x, VALUE y); +VALUE rb_float_gt(VALUE x, VALUE y); +VALUE rb_int_ge(VALUE x, VALUE y); +enum ruby_num_rounding_mode rb_num_get_rounding_option(VALUE opts); +double rb_int_fdiv_double(VALUE x, VALUE y); +VALUE rb_int_pow(VALUE x, VALUE y); +VALUE rb_float_pow(VALUE x, VALUE y); +VALUE rb_int_cmp(VALUE x, VALUE y); +VALUE rb_int_equal(VALUE x, VALUE y); +VALUE rb_int_divmod(VALUE x, VALUE y); +VALUE rb_int_and(VALUE x, VALUE y); +VALUE rb_int_lshift(VALUE x, VALUE y); +VALUE rb_int_div(VALUE x, VALUE y); +VALUE rb_int_abs(VALUE num); +VALUE rb_int_odd_p(VALUE num); + +static inline VALUE +rb_num_compare_with_zero(VALUE num, ID mid) +{ + VALUE zero = INT2FIX(0); + VALUE r = rb_check_funcall(num, mid, 1, &zero); + if (r == Qundef) { + rb_cmperr(num, zero); + } + return r; +} + +static inline int +rb_num_positive_int_p(VALUE num) +{ + const ID mid = '>'; + + if (FIXNUM_P(num)) { + if (rb_method_basic_definition_p(rb_cInteger, mid)) + return FIXNUM_POSITIVE_P(num); + } + else if (RB_TYPE_P(num, T_BIGNUM)) { + if (rb_method_basic_definition_p(rb_cInteger, mid)) + return BIGNUM_POSITIVE_P(num); + } + return RTEST(rb_num_compare_with_zero(num, mid)); +} + + +static inline int +rb_num_negative_int_p(VALUE num) +{ + const ID mid = '<'; + + if (FIXNUM_P(num)) { + if (rb_method_basic_definition_p(rb_cInteger, mid)) + return FIXNUM_NEGATIVE_P(num); + } + else if (RB_TYPE_P(num, T_BIGNUM)) { + if (rb_method_basic_definition_p(rb_cInteger, mid)) + return BIGNUM_NEGATIVE_P(num); + } + return RTEST(rb_num_compare_with_zero(num, mid)); +} + + +VALUE rb_float_abs(VALUE flt); +VALUE rb_float_equal(VALUE x, VALUE y); +VALUE rb_float_eql(VALUE x, VALUE y); + +#if USE_FLONUM +#define RUBY_BIT_ROTL(v, n) (((v) << (n)) | ((v) >> ((sizeof(v) * 8) - n))) +#define RUBY_BIT_ROTR(v, n) (((v) >> (n)) | ((v) << ((sizeof(v) * 8) - n))) +#endif + +static inline double +rb_float_flonum_value(VALUE v) +{ +#if USE_FLONUM + if (v != (VALUE)0x8000000000000002) { /* LIKELY */ + union { + double d; + VALUE v; + } t; + + VALUE b63 = (v >> 63); + /* e: xx1... -> 011... */ + /* xx0... -> 100... */ + /* ^b63 */ + t.v = RUBY_BIT_ROTR((2 - b63) | (v & ~(VALUE)0x03), 3); + return t.d; + } +#endif + return 0.0; +} + +static inline double +rb_float_noflonum_value(VALUE v) +{ + return ((struct RFloat *)v)->float_value; +} + +static inline double +rb_float_value_inline(VALUE v) +{ + if (FLONUM_P(v)) { + return rb_float_flonum_value(v); + } + return rb_float_noflonum_value(v); +} + +static inline VALUE +rb_float_new_inline(double d) +{ +#if USE_FLONUM + union { + double d; + VALUE v; + } t; + int bits; + + t.d = d; + bits = (int)((VALUE)(t.v >> 60) & 0x7); + /* bits contains 3 bits of b62..b60. */ + /* bits - 3 = */ + /* b011 -> b000 */ + /* b100 -> b001 */ + + if (t.v != 0x3000000000000000 /* 1.72723e-77 */ && + !((bits-3) & ~0x01)) { + return (RUBY_BIT_ROTL(t.v, 3) & ~(VALUE)0x01) | 0x02; + } + else if (t.v == (VALUE)0) { + /* +0.0 */ + return 0x8000000000000002; + } + /* out of range */ +#endif + return rb_float_new_in_heap(d); +} + +#define rb_float_value(v) rb_float_value_inline(v) +#define rb_float_new(d) rb_float_new_inline(d) + +/* object.c */ +void rb_obj_copy_ivar(VALUE dest, VALUE obj); +CONSTFUNC(VALUE rb_obj_equal(VALUE obj1, VALUE obj2)); +CONSTFUNC(VALUE rb_obj_not(VALUE obj)); +VALUE rb_class_search_ancestor(VALUE klass, VALUE super); +NORETURN(void rb_undefined_alloc(VALUE klass)); +double rb_num_to_dbl(VALUE val); +VALUE rb_obj_dig(int argc, VALUE *argv, VALUE self, VALUE notfound); +VALUE rb_immutable_obj_clone(int, VALUE *, VALUE); +VALUE rb_obj_not_equal(VALUE obj1, VALUE obj2); +VALUE rb_convert_type_with_id(VALUE,int,const char*,ID); +VALUE rb_check_convert_type_with_id(VALUE,int,const char*,ID); + +struct RBasicRaw { + VALUE flags; + VALUE klass; +}; + +#define RBASIC_CLEAR_CLASS(obj) memset(&(((struct RBasicRaw *)((VALUE)(obj)))->klass), 0, sizeof(VALUE)) +#define RBASIC_SET_CLASS_RAW(obj, cls) memcpy(&((struct RBasicRaw *)((VALUE)(obj)))->klass, &(cls), sizeof(VALUE)) +#define RBASIC_SET_CLASS(obj, cls) do { \ + VALUE _obj_ = (obj); \ + RB_OBJ_WRITE(_obj_, &((struct RBasicRaw *)(_obj_))->klass, cls); \ +} while (0) + +/* parse.y */ +#ifndef USE_SYMBOL_GC +#define USE_SYMBOL_GC 1 +#endif +VALUE rb_parser_get_yydebug(VALUE); +VALUE rb_parser_set_yydebug(VALUE, VALUE); +VALUE rb_parser_set_context(VALUE, const struct rb_block *, int); +void *rb_parser_load_file(VALUE parser, VALUE name); +int rb_is_const_name(VALUE name); +int rb_is_class_name(VALUE name); +int rb_is_global_name(VALUE name); +int rb_is_instance_name(VALUE name); +int rb_is_attrset_name(VALUE name); +int rb_is_local_name(VALUE name); +int rb_is_method_name(VALUE name); +int rb_is_junk_name(VALUE name); +PUREFUNC(int rb_is_const_sym(VALUE sym)); +PUREFUNC(int rb_is_class_sym(VALUE sym)); +PUREFUNC(int rb_is_global_sym(VALUE sym)); +PUREFUNC(int rb_is_instance_sym(VALUE sym)); +PUREFUNC(int rb_is_attrset_sym(VALUE sym)); +PUREFUNC(int rb_is_local_sym(VALUE sym)); +PUREFUNC(int rb_is_method_sym(VALUE sym)); +PUREFUNC(int rb_is_junk_sym(VALUE sym)); +ID rb_make_internal_id(void); +void rb_gc_free_dsymbol(VALUE); +ID rb_id_attrget(ID id); + +/* proc.c */ +VALUE rb_proc_location(VALUE self); +st_index_t rb_hash_proc(st_index_t hash, VALUE proc); +int rb_block_arity(void); +int rb_block_min_max_arity(int *max); +VALUE rb_func_proc_new(rb_block_call_func_t func, VALUE val); +VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc); +VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info); + +/* process.c */ +#define RB_MAX_GROUPS (65536) + +struct rb_execarg { + union { + struct { + VALUE shell_script; + } sh; + struct { + VALUE command_name; + VALUE command_abspath; /* full path string or nil */ + VALUE argv_str; + VALUE argv_buf; + } cmd; + } invoke; + VALUE redirect_fds; + VALUE envp_str; + VALUE envp_buf; + VALUE dup2_tmpbuf; + unsigned use_shell : 1; + unsigned pgroup_given : 1; + unsigned umask_given : 1; + unsigned unsetenv_others_given : 1; + unsigned unsetenv_others_do : 1; + unsigned close_others_given : 1; + unsigned close_others_do : 1; + unsigned chdir_given : 1; + unsigned new_pgroup_given : 1; + unsigned new_pgroup_flag : 1; + unsigned uid_given : 1; + unsigned gid_given : 1; + rb_pid_t pgroup_pgid; /* asis(-1), new pgroup(0), specified pgroup (0body->mark_ary + +#define ISEQ_COVERAGE(iseq) RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE) +#define ISEQ_COVERAGE_SET(iseq, cov) RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE, cov) +#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES) +#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES) + +#define ISEQ_FLIP_CNT(iseq) FIX2INT(RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT)) + +static inline int +ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq) +{ + int cnt = ISEQ_FLIP_CNT(iseq); + RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT, INT2FIX(cnt+1)); + return cnt; +} + +static inline VALUE * +ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq) +{ + VALUE str = RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ); + if (RTEST(str)) return (VALUE *)RSTRING_PTR(str); + return NULL; +} + +static inline void +ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq) +{ + RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, Qnil); +} + +static inline VALUE * +ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size) +{ + VALUE str = rb_str_tmp_new(size * sizeof(VALUE)); + RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, str); + return (VALUE *)RSTRING_PTR(str); +} + +#define ISEQ_TRACE_EVENTS (RUBY_EVENT_LINE | \ + RUBY_EVENT_CLASS | \ + RUBY_EVENT_END | \ + RUBY_EVENT_CALL | \ + RUBY_EVENT_RETURN| \ + RUBY_EVENT_B_CALL| \ + RUBY_EVENT_B_RETURN) + +#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1 +#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2 + +struct iseq_compile_data { + /* GC is needed */ + const VALUE err_info; + VALUE mark_ary; + const VALUE catch_table_ary; /* Array */ + + /* GC is not needed */ + struct iseq_label_data *start_label; + struct iseq_label_data *end_label; + struct iseq_label_data *redo_label; + const rb_iseq_t *current_block; + VALUE ensure_node; + VALUE for_iseq; + struct iseq_compile_data_ensure_node_stack *ensure_node_stack; + int loopval_popped; /* used by NODE_BREAK */ + int cached_const; + struct iseq_compile_data_storage *storage_head; + struct iseq_compile_data_storage *storage_current; + int last_line; + int label_no; + int node_level; + unsigned int ci_index; + unsigned int ci_kw_index; + const rb_compile_option_t *option; + struct rb_id_table *ivar_cache_table; +#if SUPPORT_JOKE + st_table *labels_table; +#endif +}; + +static inline struct iseq_compile_data * +ISEQ_COMPILE_DATA(const rb_iseq_t *iseq) +{ + if (iseq->flags & ISEQ_USE_COMPILE_DATA) { + return iseq->aux.compile_data; + } + else { + return NULL; + } +} + +static inline void +ISEQ_COMPILE_DATA_ALLOC(rb_iseq_t *iseq) +{ + iseq->flags |= ISEQ_USE_COMPILE_DATA; + iseq->aux.compile_data = ZALLOC(struct iseq_compile_data); +} + +static inline void +ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t *iseq) +{ + iseq->flags &= ~ISEQ_USE_COMPILE_DATA; + iseq->aux.compile_data = NULL; +} + +static inline rb_iseq_t * +iseq_imemo_alloc(void) +{ + return (rb_iseq_t *)rb_imemo_new(imemo_iseq, 0, 0, 0, 0); +} + +VALUE rb_iseq_ibf_dump(const rb_iseq_t *iseq, VALUE opt); +void rb_ibf_load_iseq_complete(rb_iseq_t *iseq); +const rb_iseq_t *rb_iseq_ibf_load(VALUE str); +VALUE rb_iseq_ibf_load_extra_data(VALUE str); +void rb_iseq_init_trace(rb_iseq_t *iseq); + +RUBY_SYMBOL_EXPORT_BEGIN + +/* compile.c */ +VALUE rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node); +int rb_iseq_translate_threaded_code(rb_iseq_t *iseq); +VALUE *rb_iseq_original_iseq(const rb_iseq_t *iseq); +void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, + VALUE locals, VALUE args, + VALUE exception, VALUE body); + +/* iseq.c */ +void rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj); +VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt); +VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc); +struct st_table *ruby_insn_make_insn_table(void); +unsigned int rb_iseq_line_no(const rb_iseq_t *iseq, size_t pos); +void rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events); +void rb_iseq_trace_set_all(rb_event_flag_t turnon_events); +void rb_iseq_trace_on_all(void); + +VALUE rb_iseqw_new(const rb_iseq_t *iseq); +const rb_iseq_t *rb_iseqw_to_iseq(VALUE iseqw); + +VALUE rb_iseq_absolute_path(const rb_iseq_t *iseq); /* obsolete */ +VALUE rb_iseq_label(const rb_iseq_t *iseq); +VALUE rb_iseq_base_label(const rb_iseq_t *iseq); +VALUE rb_iseq_first_lineno(const rb_iseq_t *iseq); +VALUE rb_iseq_method_name(const rb_iseq_t *iseq); +void rb_iseq_code_range(const rb_iseq_t *iseq, int *first_lineno, int *first_column, int *last_lineno, int *last_column); + +/* proc.c */ +const rb_iseq_t *rb_method_iseq(VALUE body); +const rb_iseq_t *rb_proc_get_iseq(VALUE proc, int *is_proc); + +struct rb_compile_option_struct { + unsigned int inline_const_cache: 1; + unsigned int peephole_optimization: 1; + unsigned int tailcall_optimization: 1; + unsigned int specialized_instruction: 1; + unsigned int operands_unification: 1; + unsigned int instructions_unification: 1; + unsigned int stack_caching: 1; + unsigned int frozen_string_literal: 1; + unsigned int debug_frozen_string_literal: 1; + unsigned int coverage_enabled: 1; + int debug_level; +}; + +struct iseq_insn_info_entry { + unsigned int position; + int line_no; + rb_event_flag_t events; +}; + +struct iseq_catch_table_entry { + enum catch_type { + CATCH_TYPE_RESCUE = INT2FIX(1), + CATCH_TYPE_ENSURE = INT2FIX(2), + CATCH_TYPE_RETRY = INT2FIX(3), + CATCH_TYPE_BREAK = INT2FIX(4), + CATCH_TYPE_REDO = INT2FIX(5), + CATCH_TYPE_NEXT = INT2FIX(6) + } type; + + /* + * iseq type: + * CATCH_TYPE_RESCUE, CATCH_TYPE_ENSURE: + * use iseq as continuation. + * + * CATCH_TYPE_BREAK (iter): + * use iseq as key. + * + * CATCH_TYPE_BREAK (while), CATCH_TYPE_RETRY, + * CATCH_TYPE_REDO, CATCH_TYPE_NEXT: + * NULL. + */ + const rb_iseq_t *iseq; + + unsigned int start; + unsigned int end; + unsigned int cont; + unsigned int sp; +}; + +PACKED_STRUCT_UNALIGNED(struct iseq_catch_table { + unsigned int size; + struct iseq_catch_table_entry entries[1]; /* flexible array */ +}); + +static inline int +iseq_catch_table_bytes(int n) +{ + enum { + catch_table_entries_max = (INT_MAX - sizeof(struct iseq_catch_table)) / sizeof(struct iseq_catch_table_entry) + }; + if (n > catch_table_entries_max) rb_fatal("too large iseq_catch_table - %d", n); + return (int)(sizeof(struct iseq_catch_table) + + (n - 1) * sizeof(struct iseq_catch_table_entry)); +} + +#define INITIAL_ISEQ_COMPILE_DATA_STORAGE_BUFF_SIZE (512) + +struct iseq_compile_data_storage { + struct iseq_compile_data_storage *next; + unsigned int pos; + unsigned int size; + char buff[1]; /* flexible array */ +}; + +/* account for flexible array */ +#define SIZEOF_ISEQ_COMPILE_DATA_STORAGE \ + (sizeof(struct iseq_compile_data_storage) - 1) + +/* defined? */ + +enum defined_type { + DEFINED_NIL = 1, + DEFINED_IVAR, + DEFINED_LVAR, + DEFINED_GVAR, + DEFINED_CVAR, + DEFINED_CONST, + DEFINED_METHOD, + DEFINED_YIELD, + DEFINED_ZSUPER, + DEFINED_SELF, + DEFINED_TRUE, + DEFINED_FALSE, + DEFINED_ASGN, + DEFINED_EXPR, + DEFINED_IVAR2, + DEFINED_REF, + DEFINED_FUNC +}; + +VALUE rb_iseq_defined_string(enum defined_type type); +void rb_iseq_make_compile_option(struct rb_compile_option_struct *option, VALUE opt); + +/* vm.c */ +VALUE rb_iseq_local_variables(const rb_iseq_t *iseq); + +RUBY_SYMBOL_EXPORT_END + +#endif /* RUBY_ISEQ_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/known_errors.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/known_errors.inc new file mode 100644 index 0000000..ac4a9ea --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/known_errors.inc @@ -0,0 +1,746 @@ +/** -*-c-*- + * DO NOT EDIT + * This file is automatically generated by tool/generic_erb.rb from + * template/known_errors.inc.tmpl and defs/known_errors.def. + */ + +#ifdef EPERM + defined_error("EPERM", EPERM) +#else + undefined_error("EPERM") +#endif +#ifdef ENOENT + defined_error("ENOENT", ENOENT) +#else + undefined_error("ENOENT") +#endif +#ifdef ESRCH + defined_error("ESRCH", ESRCH) +#else + undefined_error("ESRCH") +#endif +#ifdef EINTR + defined_error("EINTR", EINTR) +#else + undefined_error("EINTR") +#endif +#ifdef EIO + defined_error("EIO", EIO) +#else + undefined_error("EIO") +#endif +#ifdef ENXIO + defined_error("ENXIO", ENXIO) +#else + undefined_error("ENXIO") +#endif +#ifdef E2BIG + defined_error("E2BIG", E2BIG) +#else + undefined_error("E2BIG") +#endif +#ifdef ENOEXEC + defined_error("ENOEXEC", ENOEXEC) +#else + undefined_error("ENOEXEC") +#endif +#ifdef EBADF + defined_error("EBADF", EBADF) +#else + undefined_error("EBADF") +#endif +#ifdef ECHILD + defined_error("ECHILD", ECHILD) +#else + undefined_error("ECHILD") +#endif +#ifdef EAGAIN + defined_error("EAGAIN", EAGAIN) +#else + undefined_error("EAGAIN") +#endif +#ifdef ENOMEM + defined_error("ENOMEM", ENOMEM) +#else + undefined_error("ENOMEM") +#endif +#ifdef EACCES + defined_error("EACCES", EACCES) +#else + undefined_error("EACCES") +#endif +#ifdef EFAULT + defined_error("EFAULT", EFAULT) +#else + undefined_error("EFAULT") +#endif +#ifdef ENOTBLK + defined_error("ENOTBLK", ENOTBLK) +#else + undefined_error("ENOTBLK") +#endif +#ifdef EBUSY + defined_error("EBUSY", EBUSY) +#else + undefined_error("EBUSY") +#endif +#ifdef EEXIST + defined_error("EEXIST", EEXIST) +#else + undefined_error("EEXIST") +#endif +#ifdef EXDEV + defined_error("EXDEV", EXDEV) +#else + undefined_error("EXDEV") +#endif +#ifdef ENODEV + defined_error("ENODEV", ENODEV) +#else + undefined_error("ENODEV") +#endif +#ifdef ENOTDIR + defined_error("ENOTDIR", ENOTDIR) +#else + undefined_error("ENOTDIR") +#endif +#ifdef EISDIR + defined_error("EISDIR", EISDIR) +#else + undefined_error("EISDIR") +#endif +#ifdef EINVAL + defined_error("EINVAL", EINVAL) +#else + undefined_error("EINVAL") +#endif +#ifdef ENFILE + defined_error("ENFILE", ENFILE) +#else + undefined_error("ENFILE") +#endif +#ifdef EMFILE + defined_error("EMFILE", EMFILE) +#else + undefined_error("EMFILE") +#endif +#ifdef ENOTTY + defined_error("ENOTTY", ENOTTY) +#else + undefined_error("ENOTTY") +#endif +#ifdef ETXTBSY + defined_error("ETXTBSY", ETXTBSY) +#else + undefined_error("ETXTBSY") +#endif +#ifdef EFBIG + defined_error("EFBIG", EFBIG) +#else + undefined_error("EFBIG") +#endif +#ifdef ENOSPC + defined_error("ENOSPC", ENOSPC) +#else + undefined_error("ENOSPC") +#endif +#ifdef ESPIPE + defined_error("ESPIPE", ESPIPE) +#else + undefined_error("ESPIPE") +#endif +#ifdef EROFS + defined_error("EROFS", EROFS) +#else + undefined_error("EROFS") +#endif +#ifdef EMLINK + defined_error("EMLINK", EMLINK) +#else + undefined_error("EMLINK") +#endif +#ifdef EPIPE + defined_error("EPIPE", EPIPE) +#else + undefined_error("EPIPE") +#endif +#ifdef EDOM + defined_error("EDOM", EDOM) +#else + undefined_error("EDOM") +#endif +#ifdef ERANGE + defined_error("ERANGE", ERANGE) +#else + undefined_error("ERANGE") +#endif +#ifdef EDEADLK + defined_error("EDEADLK", EDEADLK) +#else + undefined_error("EDEADLK") +#endif +#ifdef ENAMETOOLONG + defined_error("ENAMETOOLONG", ENAMETOOLONG) +#else + undefined_error("ENAMETOOLONG") +#endif +#ifdef ENOLCK + defined_error("ENOLCK", ENOLCK) +#else + undefined_error("ENOLCK") +#endif +#ifdef ENOSYS + defined_error("ENOSYS", ENOSYS) +#else + undefined_error("ENOSYS") +#endif +#ifdef ENOTEMPTY + defined_error("ENOTEMPTY", ENOTEMPTY) +#else + undefined_error("ENOTEMPTY") +#endif +#ifdef ELOOP + defined_error("ELOOP", ELOOP) +#else + undefined_error("ELOOP") +#endif +#ifdef EWOULDBLOCK + defined_error("EWOULDBLOCK", EWOULDBLOCK) +#else + undefined_error("EWOULDBLOCK") +#endif +#ifdef ENOMSG + defined_error("ENOMSG", ENOMSG) +#else + undefined_error("ENOMSG") +#endif +#ifdef EIDRM + defined_error("EIDRM", EIDRM) +#else + undefined_error("EIDRM") +#endif +#ifdef ECHRNG + defined_error("ECHRNG", ECHRNG) +#else + undefined_error("ECHRNG") +#endif +#ifdef EL2NSYNC + defined_error("EL2NSYNC", EL2NSYNC) +#else + undefined_error("EL2NSYNC") +#endif +#ifdef EL3HLT + defined_error("EL3HLT", EL3HLT) +#else + undefined_error("EL3HLT") +#endif +#ifdef EL3RST + defined_error("EL3RST", EL3RST) +#else + undefined_error("EL3RST") +#endif +#ifdef ELNRNG + defined_error("ELNRNG", ELNRNG) +#else + undefined_error("ELNRNG") +#endif +#ifdef EUNATCH + defined_error("EUNATCH", EUNATCH) +#else + undefined_error("EUNATCH") +#endif +#ifdef ENOCSI + defined_error("ENOCSI", ENOCSI) +#else + undefined_error("ENOCSI") +#endif +#ifdef EL2HLT + defined_error("EL2HLT", EL2HLT) +#else + undefined_error("EL2HLT") +#endif +#ifdef EBADE + defined_error("EBADE", EBADE) +#else + undefined_error("EBADE") +#endif +#ifdef EBADR + defined_error("EBADR", EBADR) +#else + undefined_error("EBADR") +#endif +#ifdef EXFULL + defined_error("EXFULL", EXFULL) +#else + undefined_error("EXFULL") +#endif +#ifdef ENOANO + defined_error("ENOANO", ENOANO) +#else + undefined_error("ENOANO") +#endif +#ifdef EBADRQC + defined_error("EBADRQC", EBADRQC) +#else + undefined_error("EBADRQC") +#endif +#ifdef EBADSLT + defined_error("EBADSLT", EBADSLT) +#else + undefined_error("EBADSLT") +#endif +#ifdef EDEADLOCK + defined_error("EDEADLOCK", EDEADLOCK) +#else + undefined_error("EDEADLOCK") +#endif +#ifdef EBFONT + defined_error("EBFONT", EBFONT) +#else + undefined_error("EBFONT") +#endif +#ifdef ENOSTR + defined_error("ENOSTR", ENOSTR) +#else + undefined_error("ENOSTR") +#endif +#ifdef ENODATA + defined_error("ENODATA", ENODATA) +#else + undefined_error("ENODATA") +#endif +#ifdef ETIME + defined_error("ETIME", ETIME) +#else + undefined_error("ETIME") +#endif +#ifdef ENOSR + defined_error("ENOSR", ENOSR) +#else + undefined_error("ENOSR") +#endif +#ifdef ENONET + defined_error("ENONET", ENONET) +#else + undefined_error("ENONET") +#endif +#ifdef ENOPKG + defined_error("ENOPKG", ENOPKG) +#else + undefined_error("ENOPKG") +#endif +#ifdef EREMOTE + defined_error("EREMOTE", EREMOTE) +#else + undefined_error("EREMOTE") +#endif +#ifdef ENOLINK + defined_error("ENOLINK", ENOLINK) +#else + undefined_error("ENOLINK") +#endif +#ifdef EADV + defined_error("EADV", EADV) +#else + undefined_error("EADV") +#endif +#ifdef ESRMNT + defined_error("ESRMNT", ESRMNT) +#else + undefined_error("ESRMNT") +#endif +#ifdef ECOMM + defined_error("ECOMM", ECOMM) +#else + undefined_error("ECOMM") +#endif +#ifdef EPROTO + defined_error("EPROTO", EPROTO) +#else + undefined_error("EPROTO") +#endif +#ifdef EMULTIHOP + defined_error("EMULTIHOP", EMULTIHOP) +#else + undefined_error("EMULTIHOP") +#endif +#ifdef EDOTDOT + defined_error("EDOTDOT", EDOTDOT) +#else + undefined_error("EDOTDOT") +#endif +#ifdef EBADMSG + defined_error("EBADMSG", EBADMSG) +#else + undefined_error("EBADMSG") +#endif +#ifdef EOVERFLOW + defined_error("EOVERFLOW", EOVERFLOW) +#else + undefined_error("EOVERFLOW") +#endif +#ifdef ENOTUNIQ + defined_error("ENOTUNIQ", ENOTUNIQ) +#else + undefined_error("ENOTUNIQ") +#endif +#ifdef EBADFD + defined_error("EBADFD", EBADFD) +#else + undefined_error("EBADFD") +#endif +#ifdef EREMCHG + defined_error("EREMCHG", EREMCHG) +#else + undefined_error("EREMCHG") +#endif +#ifdef ELIBACC + defined_error("ELIBACC", ELIBACC) +#else + undefined_error("ELIBACC") +#endif +#ifdef ELIBBAD + defined_error("ELIBBAD", ELIBBAD) +#else + undefined_error("ELIBBAD") +#endif +#ifdef ELIBSCN + defined_error("ELIBSCN", ELIBSCN) +#else + undefined_error("ELIBSCN") +#endif +#ifdef ELIBMAX + defined_error("ELIBMAX", ELIBMAX) +#else + undefined_error("ELIBMAX") +#endif +#ifdef ELIBEXEC + defined_error("ELIBEXEC", ELIBEXEC) +#else + undefined_error("ELIBEXEC") +#endif +#ifdef EILSEQ + defined_error("EILSEQ", EILSEQ) +#else + undefined_error("EILSEQ") +#endif +#ifdef ERESTART + defined_error("ERESTART", ERESTART) +#else + undefined_error("ERESTART") +#endif +#ifdef ESTRPIPE + defined_error("ESTRPIPE", ESTRPIPE) +#else + undefined_error("ESTRPIPE") +#endif +#ifdef EUSERS + defined_error("EUSERS", EUSERS) +#else + undefined_error("EUSERS") +#endif +#ifdef ENOTSOCK + defined_error("ENOTSOCK", ENOTSOCK) +#else + undefined_error("ENOTSOCK") +#endif +#ifdef EDESTADDRREQ + defined_error("EDESTADDRREQ", EDESTADDRREQ) +#else + undefined_error("EDESTADDRREQ") +#endif +#ifdef EMSGSIZE + defined_error("EMSGSIZE", EMSGSIZE) +#else + undefined_error("EMSGSIZE") +#endif +#ifdef EPROTOTYPE + defined_error("EPROTOTYPE", EPROTOTYPE) +#else + undefined_error("EPROTOTYPE") +#endif +#ifdef ENOPROTOOPT + defined_error("ENOPROTOOPT", ENOPROTOOPT) +#else + undefined_error("ENOPROTOOPT") +#endif +#ifdef EPROTONOSUPPORT + defined_error("EPROTONOSUPPORT", EPROTONOSUPPORT) +#else + undefined_error("EPROTONOSUPPORT") +#endif +#ifdef ESOCKTNOSUPPORT + defined_error("ESOCKTNOSUPPORT", ESOCKTNOSUPPORT) +#else + undefined_error("ESOCKTNOSUPPORT") +#endif +#ifdef EOPNOTSUPP + defined_error("EOPNOTSUPP", EOPNOTSUPP) +#else + undefined_error("EOPNOTSUPP") +#endif +#ifdef EPFNOSUPPORT + defined_error("EPFNOSUPPORT", EPFNOSUPPORT) +#else + undefined_error("EPFNOSUPPORT") +#endif +#ifdef EAFNOSUPPORT + defined_error("EAFNOSUPPORT", EAFNOSUPPORT) +#else + undefined_error("EAFNOSUPPORT") +#endif +#ifdef EADDRINUSE + defined_error("EADDRINUSE", EADDRINUSE) +#else + undefined_error("EADDRINUSE") +#endif +#ifdef EADDRNOTAVAIL + defined_error("EADDRNOTAVAIL", EADDRNOTAVAIL) +#else + undefined_error("EADDRNOTAVAIL") +#endif +#ifdef ENETDOWN + defined_error("ENETDOWN", ENETDOWN) +#else + undefined_error("ENETDOWN") +#endif +#ifdef ENETUNREACH + defined_error("ENETUNREACH", ENETUNREACH) +#else + undefined_error("ENETUNREACH") +#endif +#ifdef ENETRESET + defined_error("ENETRESET", ENETRESET) +#else + undefined_error("ENETRESET") +#endif +#ifdef ECONNABORTED + defined_error("ECONNABORTED", ECONNABORTED) +#else + undefined_error("ECONNABORTED") +#endif +#ifdef ECONNRESET + defined_error("ECONNRESET", ECONNRESET) +#else + undefined_error("ECONNRESET") +#endif +#ifdef ENOBUFS + defined_error("ENOBUFS", ENOBUFS) +#else + undefined_error("ENOBUFS") +#endif +#ifdef EISCONN + defined_error("EISCONN", EISCONN) +#else + undefined_error("EISCONN") +#endif +#ifdef ENOTCONN + defined_error("ENOTCONN", ENOTCONN) +#else + undefined_error("ENOTCONN") +#endif +#ifdef ESHUTDOWN + defined_error("ESHUTDOWN", ESHUTDOWN) +#else + undefined_error("ESHUTDOWN") +#endif +#ifdef ETOOMANYREFS + defined_error("ETOOMANYREFS", ETOOMANYREFS) +#else + undefined_error("ETOOMANYREFS") +#endif +#ifdef ETIMEDOUT + defined_error("ETIMEDOUT", ETIMEDOUT) +#else + undefined_error("ETIMEDOUT") +#endif +#ifdef ECONNREFUSED + defined_error("ECONNREFUSED", ECONNREFUSED) +#else + undefined_error("ECONNREFUSED") +#endif +#ifdef EHOSTDOWN + defined_error("EHOSTDOWN", EHOSTDOWN) +#else + undefined_error("EHOSTDOWN") +#endif +#ifdef EHOSTUNREACH + defined_error("EHOSTUNREACH", EHOSTUNREACH) +#else + undefined_error("EHOSTUNREACH") +#endif +#ifdef EALREADY + defined_error("EALREADY", EALREADY) +#else + undefined_error("EALREADY") +#endif +#ifdef EINPROGRESS + defined_error("EINPROGRESS", EINPROGRESS) +#else + undefined_error("EINPROGRESS") +#endif +#ifdef ESTALE + defined_error("ESTALE", ESTALE) +#else + undefined_error("ESTALE") +#endif +#ifdef EUCLEAN + defined_error("EUCLEAN", EUCLEAN) +#else + undefined_error("EUCLEAN") +#endif +#ifdef ENOTNAM + defined_error("ENOTNAM", ENOTNAM) +#else + undefined_error("ENOTNAM") +#endif +#ifdef ENAVAIL + defined_error("ENAVAIL", ENAVAIL) +#else + undefined_error("ENAVAIL") +#endif +#ifdef EISNAM + defined_error("EISNAM", EISNAM) +#else + undefined_error("EISNAM") +#endif +#ifdef EREMOTEIO + defined_error("EREMOTEIO", EREMOTEIO) +#else + undefined_error("EREMOTEIO") +#endif +#ifdef EDQUOT + defined_error("EDQUOT", EDQUOT) +#else + undefined_error("EDQUOT") +#endif +#ifdef ECANCELED + defined_error("ECANCELED", ECANCELED) +#else + undefined_error("ECANCELED") +#endif +#ifdef EKEYEXPIRED + defined_error("EKEYEXPIRED", EKEYEXPIRED) +#else + undefined_error("EKEYEXPIRED") +#endif +#ifdef EKEYREJECTED + defined_error("EKEYREJECTED", EKEYREJECTED) +#else + undefined_error("EKEYREJECTED") +#endif +#ifdef EKEYREVOKED + defined_error("EKEYREVOKED", EKEYREVOKED) +#else + undefined_error("EKEYREVOKED") +#endif +#ifdef EMEDIUMTYPE + defined_error("EMEDIUMTYPE", EMEDIUMTYPE) +#else + undefined_error("EMEDIUMTYPE") +#endif +#ifdef ENOKEY + defined_error("ENOKEY", ENOKEY) +#else + undefined_error("ENOKEY") +#endif +#ifdef ENOMEDIUM + defined_error("ENOMEDIUM", ENOMEDIUM) +#else + undefined_error("ENOMEDIUM") +#endif +#ifdef ENOTRECOVERABLE + defined_error("ENOTRECOVERABLE", ENOTRECOVERABLE) +#else + undefined_error("ENOTRECOVERABLE") +#endif +#ifdef EOWNERDEAD + defined_error("EOWNERDEAD", EOWNERDEAD) +#else + undefined_error("EOWNERDEAD") +#endif +#ifdef ERFKILL + defined_error("ERFKILL", ERFKILL) +#else + undefined_error("ERFKILL") +#endif +#ifdef EAUTH + defined_error("EAUTH", EAUTH) +#else + undefined_error("EAUTH") +#endif +#ifdef EBADRPC + defined_error("EBADRPC", EBADRPC) +#else + undefined_error("EBADRPC") +#endif +#ifdef EDOOFUS + defined_error("EDOOFUS", EDOOFUS) +#else + undefined_error("EDOOFUS") +#endif +#ifdef EFTYPE + defined_error("EFTYPE", EFTYPE) +#else + undefined_error("EFTYPE") +#endif +#ifdef ENEEDAUTH + defined_error("ENEEDAUTH", ENEEDAUTH) +#else + undefined_error("ENEEDAUTH") +#endif +#ifdef ENOATTR + defined_error("ENOATTR", ENOATTR) +#else + undefined_error("ENOATTR") +#endif +#ifdef ENOTSUP + defined_error("ENOTSUP", ENOTSUP) +#else + undefined_error("ENOTSUP") +#endif +#ifdef EPROCLIM + defined_error("EPROCLIM", EPROCLIM) +#else + undefined_error("EPROCLIM") +#endif +#ifdef EPROCUNAVAIL + defined_error("EPROCUNAVAIL", EPROCUNAVAIL) +#else + undefined_error("EPROCUNAVAIL") +#endif +#ifdef EPROGMISMATCH + defined_error("EPROGMISMATCH", EPROGMISMATCH) +#else + undefined_error("EPROGMISMATCH") +#endif +#ifdef EPROGUNAVAIL + defined_error("EPROGUNAVAIL", EPROGUNAVAIL) +#else + undefined_error("EPROGUNAVAIL") +#endif +#ifdef ERPCMISMATCH + defined_error("ERPCMISMATCH", ERPCMISMATCH) +#else + undefined_error("ERPCMISMATCH") +#endif +#ifdef EIPSEC + defined_error("EIPSEC", EIPSEC) +#else + undefined_error("EIPSEC") +#endif +#ifdef EHWPOISON + defined_error("EHWPOISON", EHWPOISON) +#else + undefined_error("EHWPOISON") +#endif +#ifdef ECAPMODE + defined_error("ECAPMODE", ECAPMODE) +#else + undefined_error("ECAPMODE") +#endif +#ifdef ENOTCAPABLE + defined_error("ENOTCAPABLE", ENOTCAPABLE) +#else + undefined_error("ENOTCAPABLE") +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/method.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/method.h new file mode 100644 index 0000000..1d719e9 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/method.h @@ -0,0 +1,218 @@ +/********************************************************************** + + method.h - + + $Author: mame $ + created at: Wed Jul 15 20:02:33 2009 + + Copyright (C) 2009 Koichi Sasada + +**********************************************************************/ +#ifndef RUBY_METHOD_H +#define RUBY_METHOD_H 1 + +#include "internal.h" + +#ifndef END_OF_ENUMERATION +# if defined(__GNUC__) &&! defined(__STRICT_ANSI__) +# define END_OF_ENUMERATION(key) +# else +# define END_OF_ENUMERATION(key) END_OF_##key##_PLACEHOLDER = 0 +# endif +#endif + +/* cref */ + +typedef enum { + METHOD_VISI_UNDEF = 0x00, + METHOD_VISI_PUBLIC = 0x01, + METHOD_VISI_PRIVATE = 0x02, + METHOD_VISI_PROTECTED = 0x03, + + METHOD_VISI_MASK = 0x03 +} rb_method_visibility_t; + +typedef struct rb_scope_visi_struct { + rb_method_visibility_t method_visi : 3; + unsigned int module_func : 1; +} rb_scope_visibility_t; + +/*! CREF (Class REFerence) */ +typedef struct rb_cref_struct { + VALUE flags; + const VALUE refinements; + const VALUE klass; + struct rb_cref_struct * const next; + const rb_scope_visibility_t scope_visi; +} rb_cref_t; + +/* method data type */ + +typedef struct rb_method_entry_struct { + VALUE flags; + const VALUE defined_class; + struct rb_method_definition_struct * const def; + ID called_id; + const VALUE owner; +} rb_method_entry_t; + +typedef struct rb_callable_method_entry_struct { /* same fields with rb_method_entry_t */ + VALUE flags; + const VALUE defined_class; + struct rb_method_definition_struct * const def; + ID called_id; + const VALUE owner; +} rb_callable_method_entry_t; + +#define METHOD_ENTRY_VISI(me) (rb_method_visibility_t)(((me)->flags & (IMEMO_FL_USER0 | IMEMO_FL_USER1)) >> (IMEMO_FL_USHIFT+0)) +#define METHOD_ENTRY_BASIC(me) (int) (((me)->flags & (IMEMO_FL_USER2 )) >> (IMEMO_FL_USHIFT+2)) +#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3) +#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3) + +static inline void +METHOD_ENTRY_VISI_SET(rb_method_entry_t *me, rb_method_visibility_t visi) +{ + VM_ASSERT((int)visi >= 0 && visi <= 3); + me->flags = (me->flags & ~(IMEMO_FL_USER0 | IMEMO_FL_USER1)) | (visi << (IMEMO_FL_USHIFT+0)); +} +static inline void +METHOD_ENTRY_BASIC_SET(rb_method_entry_t *me, unsigned int basic) +{ + VM_ASSERT(basic <= 1); + me->flags = (me->flags & ~(IMEMO_FL_USER2 )) | (basic << (IMEMO_FL_USHIFT+2)); +} +static inline void +METHOD_ENTRY_FLAGS_SET(rb_method_entry_t *me, rb_method_visibility_t visi, unsigned int basic) +{ + VM_ASSERT((int)visi >= 0 && visi <= 3); + VM_ASSERT(basic <= 1); + me->flags = + (me->flags & ~(IMEMO_FL_USER0|IMEMO_FL_USER1|IMEMO_FL_USER2)) | + ((visi << (IMEMO_FL_USHIFT+0)) | (basic << (IMEMO_FL_USHIFT+2))); +} +static inline void +METHOD_ENTRY_FLAGS_COPY(rb_method_entry_t *dst, const rb_method_entry_t *src) +{ + dst->flags = + (dst->flags & ~(IMEMO_FL_USER0|IMEMO_FL_USER1|IMEMO_FL_USER2)) | + (src->flags & (IMEMO_FL_USER0|IMEMO_FL_USER1|IMEMO_FL_USER2)); +} + +typedef enum { + VM_METHOD_TYPE_ISEQ, /*!< Ruby method */ + VM_METHOD_TYPE_CFUNC, /*!< C method */ + VM_METHOD_TYPE_ATTRSET, /*!< attr_writer or attr_accessor */ + VM_METHOD_TYPE_IVAR, /*!< attr_reader or attr_accessor */ + VM_METHOD_TYPE_BMETHOD, + VM_METHOD_TYPE_ZSUPER, + VM_METHOD_TYPE_ALIAS, + VM_METHOD_TYPE_UNDEF, + VM_METHOD_TYPE_NOTIMPLEMENTED, + VM_METHOD_TYPE_OPTIMIZED, /*!< Kernel#send, Proc#call, etc */ + VM_METHOD_TYPE_MISSING, /*!< wrapper for method_missing(id) */ + VM_METHOD_TYPE_REFINED, /*!< refinement */ + + END_OF_ENUMERATION(VM_METHOD_TYPE) +} rb_method_type_t; + +#ifndef rb_iseq_t +typedef struct rb_iseq_struct rb_iseq_t; +#define rb_iseq_t rb_iseq_t +#endif + +typedef struct rb_method_iseq_struct { + const rb_iseq_t * const iseqptr; /*!< iseq pointer, should be separated from iseqval */ + rb_cref_t * const cref; /*!< class reference, should be marked */ +} rb_method_iseq_t; /* check rb_add_method_iseq() when modify the fields */ + +typedef struct rb_method_cfunc_struct { + VALUE (*func)(ANYARGS); + VALUE (*invoker)(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv); + int argc; +} rb_method_cfunc_t; + +typedef struct rb_method_attr_struct { + ID id; + const VALUE location; /* should be marked */ +} rb_method_attr_t; + +typedef struct rb_method_alias_struct { + const struct rb_method_entry_struct * const original_me; /* original_me->klass is original owner */ +} rb_method_alias_t; + +typedef struct rb_method_refined_struct { + const struct rb_method_entry_struct * const orig_me; + const VALUE owner; +} rb_method_refined_t; + +enum method_optimized_type { + OPTIMIZED_METHOD_TYPE_SEND, + OPTIMIZED_METHOD_TYPE_CALL, + OPTIMIZED_METHOD_TYPE__MAX +}; + +PACKED_STRUCT_UNALIGNED(struct rb_method_definition_struct { + unsigned int type : 4; /* method type */ + int alias_count : 28; + int complemented_count : 28; + + union { + rb_method_iseq_t iseq; + rb_method_cfunc_t cfunc; + rb_method_attr_t attr; + rb_method_alias_t alias; + rb_method_refined_t refined; + + const VALUE proc; /* should be marked */ + enum method_optimized_type optimize_type; + } body; + + ID original_id; +}); + +typedef struct rb_method_definition_struct rb_method_definition_t; + +#define UNDEFINED_METHOD_ENTRY_P(me) (!(me) || !(me)->def || (me)->def->type == VM_METHOD_TYPE_UNDEF) +#define UNDEFINED_REFINED_METHOD_P(def) \ + ((def)->type == VM_METHOD_TYPE_REFINED && \ + UNDEFINED_METHOD_ENTRY_P((def)->body.refined.orig_me)) + +void rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_method_visibility_t visi); +void rb_add_method_iseq(VALUE klass, ID mid, const rb_iseq_t *iseq, rb_cref_t *cref, rb_method_visibility_t visi); +void rb_add_refined_method_entry(VALUE refined_class, ID mid); + +rb_method_entry_t *rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi); +rb_method_entry_t *rb_method_entry_set(VALUE klass, ID mid, const rb_method_entry_t *, rb_method_visibility_t noex); +rb_method_entry_t *rb_method_entry_create(ID called_id, VALUE klass, rb_method_visibility_t visi, const rb_method_definition_t *def); + +const rb_method_entry_t *rb_method_entry_at(VALUE obj, ID id); + +const rb_method_entry_t *rb_method_entry(VALUE klass, ID id); +const rb_method_entry_t *rb_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_class); +const rb_method_entry_t *rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me); +RUBY_SYMBOL_EXPORT_BEGIN +const rb_method_entry_t *rb_resolve_me_location(const rb_method_entry_t *, VALUE[5]); +RUBY_SYMBOL_EXPORT_END + +const rb_callable_method_entry_t *rb_callable_method_entry(VALUE klass, ID id); +const rb_callable_method_entry_t *rb_callable_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class); +const rb_callable_method_entry_t *rb_callable_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_class); + +int rb_method_entry_arity(const rb_method_entry_t *me); +int rb_method_entry_eq(const rb_method_entry_t *m1, const rb_method_entry_t *m2); +st_index_t rb_hash_method_entry(st_index_t hash, const rb_method_entry_t *me); + +VALUE rb_method_entry_location(const rb_method_entry_t *me); +VALUE rb_mod_method_location(VALUE mod, ID id); +VALUE rb_obj_method_location(VALUE obj, ID id); + +void rb_free_method_entry(const rb_method_entry_t *me); +void rb_sweep_method_entry(void *vm); + +const rb_method_entry_t *rb_method_entry_clone(const rb_method_entry_t *me); +const rb_callable_method_entry_t *rb_method_entry_complement_defined_class(const rb_method_entry_t *src_me, ID called_id, VALUE defined_class); +void rb_method_entry_copy(rb_method_entry_t *dst, const rb_method_entry_t *src); + +void rb_scope_visibility_set(rb_method_visibility_t); + +#endif /* RUBY_METHOD_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/node.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/node.h new file mode 100644 index 0000000..5987dba --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/node.h @@ -0,0 +1,540 @@ +/********************************************************************** + + node.h - + + $Author: mame $ + created at: Fri May 28 15:14:02 JST 1993 + + Copyright (C) 1993-2007 Yukihiro Matsumoto + +**********************************************************************/ + +#ifndef RUBY_NODE_H +#define RUBY_NODE_H 1 + +#if defined(__cplusplus) +extern "C" { +#if 0 +} /* satisfy cc-mode */ +#endif +#endif + +enum node_type { + NODE_SCOPE, +#define NODE_SCOPE NODE_SCOPE + NODE_BLOCK, +#define NODE_BLOCK NODE_BLOCK + NODE_IF, +#define NODE_IF NODE_IF + NODE_UNLESS, +#define NODE_UNLESS NODE_UNLESS + NODE_CASE, +#define NODE_CASE NODE_CASE + NODE_CASE2, +#define NODE_CASE2 NODE_CASE2 + NODE_WHEN, +#define NODE_WHEN NODE_WHEN + NODE_WHILE, +#define NODE_WHILE NODE_WHILE + NODE_UNTIL, +#define NODE_UNTIL NODE_UNTIL + NODE_ITER, +#define NODE_ITER NODE_ITER + NODE_FOR, +#define NODE_FOR NODE_FOR + NODE_BREAK, +#define NODE_BREAK NODE_BREAK + NODE_NEXT, +#define NODE_NEXT NODE_NEXT + NODE_REDO, +#define NODE_REDO NODE_REDO + NODE_RETRY, +#define NODE_RETRY NODE_RETRY + NODE_BEGIN, +#define NODE_BEGIN NODE_BEGIN + NODE_RESCUE, +#define NODE_RESCUE NODE_RESCUE + NODE_RESBODY, +#define NODE_RESBODY NODE_RESBODY + NODE_ENSURE, +#define NODE_ENSURE NODE_ENSURE + NODE_AND, +#define NODE_AND NODE_AND + NODE_OR, +#define NODE_OR NODE_OR + NODE_MASGN, +#define NODE_MASGN NODE_MASGN + NODE_LASGN, +#define NODE_LASGN NODE_LASGN + NODE_DASGN, +#define NODE_DASGN NODE_DASGN + NODE_DASGN_CURR, +#define NODE_DASGN_CURR NODE_DASGN_CURR + NODE_GASGN, +#define NODE_GASGN NODE_GASGN + NODE_IASGN, +#define NODE_IASGN NODE_IASGN + NODE_CDECL, +#define NODE_CDECL NODE_CDECL + NODE_CVASGN, +#define NODE_CVASGN NODE_CVASGN + NODE_OP_ASGN1, +#define NODE_OP_ASGN1 NODE_OP_ASGN1 + NODE_OP_ASGN2, +#define NODE_OP_ASGN2 NODE_OP_ASGN2 + NODE_OP_ASGN_AND, +#define NODE_OP_ASGN_AND NODE_OP_ASGN_AND + NODE_OP_ASGN_OR, +#define NODE_OP_ASGN_OR NODE_OP_ASGN_OR + NODE_OP_CDECL, +#define NODE_OP_CDECL NODE_OP_CDECL + NODE_CALL, +#define NODE_CALL NODE_CALL + NODE_OPCALL, +#define NODE_OPCALL NODE_OPCALL + NODE_FCALL, +#define NODE_FCALL NODE_FCALL + NODE_VCALL, +#define NODE_VCALL NODE_VCALL + NODE_QCALL, +#define NODE_QCALL NODE_QCALL + NODE_SUPER, +#define NODE_SUPER NODE_SUPER + NODE_ZSUPER, +#define NODE_ZSUPER NODE_ZSUPER + NODE_ARRAY, +#define NODE_ARRAY NODE_ARRAY + NODE_ZARRAY, +#define NODE_ZARRAY NODE_ZARRAY + NODE_VALUES, +#define NODE_VALUES NODE_VALUES + NODE_HASH, +#define NODE_HASH NODE_HASH + NODE_RETURN, +#define NODE_RETURN NODE_RETURN + NODE_YIELD, +#define NODE_YIELD NODE_YIELD + NODE_LVAR, +#define NODE_LVAR NODE_LVAR + NODE_DVAR, +#define NODE_DVAR NODE_DVAR + NODE_GVAR, +#define NODE_GVAR NODE_GVAR + NODE_IVAR, +#define NODE_IVAR NODE_IVAR + NODE_CONST, +#define NODE_CONST NODE_CONST + NODE_CVAR, +#define NODE_CVAR NODE_CVAR + NODE_NTH_REF, +#define NODE_NTH_REF NODE_NTH_REF + NODE_BACK_REF, +#define NODE_BACK_REF NODE_BACK_REF + NODE_MATCH, +#define NODE_MATCH NODE_MATCH + NODE_MATCH2, +#define NODE_MATCH2 NODE_MATCH2 + NODE_MATCH3, +#define NODE_MATCH3 NODE_MATCH3 + NODE_LIT, +#define NODE_LIT NODE_LIT + NODE_STR, +#define NODE_STR NODE_STR + NODE_DSTR, +#define NODE_DSTR NODE_DSTR + NODE_XSTR, +#define NODE_XSTR NODE_XSTR + NODE_DXSTR, +#define NODE_DXSTR NODE_DXSTR + NODE_EVSTR, +#define NODE_EVSTR NODE_EVSTR + NODE_DREGX, +#define NODE_DREGX NODE_DREGX + NODE_ARGS, +#define NODE_ARGS NODE_ARGS + NODE_ARGS_AUX, +#define NODE_ARGS_AUX NODE_ARGS_AUX + NODE_OPT_ARG, +#define NODE_OPT_ARG NODE_OPT_ARG + NODE_KW_ARG, +#define NODE_KW_ARG NODE_KW_ARG + NODE_POSTARG, +#define NODE_POSTARG NODE_POSTARG + NODE_ARGSCAT, +#define NODE_ARGSCAT NODE_ARGSCAT + NODE_ARGSPUSH, +#define NODE_ARGSPUSH NODE_ARGSPUSH + NODE_SPLAT, +#define NODE_SPLAT NODE_SPLAT + NODE_BLOCK_PASS, +#define NODE_BLOCK_PASS NODE_BLOCK_PASS + NODE_DEFN, +#define NODE_DEFN NODE_DEFN + NODE_DEFS, +#define NODE_DEFS NODE_DEFS + NODE_ALIAS, +#define NODE_ALIAS NODE_ALIAS + NODE_VALIAS, +#define NODE_VALIAS NODE_VALIAS + NODE_UNDEF, +#define NODE_UNDEF NODE_UNDEF + NODE_CLASS, +#define NODE_CLASS NODE_CLASS + NODE_MODULE, +#define NODE_MODULE NODE_MODULE + NODE_SCLASS, +#define NODE_SCLASS NODE_SCLASS + NODE_COLON2, +#define NODE_COLON2 NODE_COLON2 + NODE_COLON3, +#define NODE_COLON3 NODE_COLON3 + NODE_DOT2, +#define NODE_DOT2 NODE_DOT2 + NODE_DOT3, +#define NODE_DOT3 NODE_DOT3 + NODE_FLIP2, +#define NODE_FLIP2 NODE_FLIP2 + NODE_FLIP3, +#define NODE_FLIP3 NODE_FLIP3 + NODE_SELF, +#define NODE_SELF NODE_SELF + NODE_NIL, +#define NODE_NIL NODE_NIL + NODE_TRUE, +#define NODE_TRUE NODE_TRUE + NODE_FALSE, +#define NODE_FALSE NODE_FALSE + NODE_ERRINFO, +#define NODE_ERRINFO NODE_ERRINFO + NODE_DEFINED, +#define NODE_DEFINED NODE_DEFINED + NODE_POSTEXE, +#define NODE_POSTEXE NODE_POSTEXE + NODE_DSYM, +#define NODE_DSYM NODE_DSYM + NODE_ATTRASGN, +#define NODE_ATTRASGN NODE_ATTRASGN + NODE_PRELUDE, +#define NODE_PRELUDE NODE_PRELUDE + NODE_LAMBDA, +#define NODE_LAMBDA NODE_LAMBDA + NODE_LAST +#define NODE_LAST NODE_LAST +}; + +typedef struct rb_code_location_struct { + int lineno; + int column; +} rb_code_location_t; + +typedef struct rb_code_range_struct { + rb_code_location_t first_loc; + rb_code_location_t last_loc; +} rb_code_range_t; + +typedef struct RNode { + VALUE flags; + union { + struct RNode *node; + ID id; + VALUE value; + VALUE (*cfunc)(ANYARGS); + ID *tbl; + } u1; + union { + struct RNode *node; + ID id; + long argc; + VALUE value; + } u2; + union { + struct RNode *node; + ID id; + long state; + struct rb_global_entry *entry; + struct rb_args_info *args; + long cnt; + VALUE value; + } u3; + rb_code_range_t nd_loc; +} NODE; + +#define RNODE(obj) (R_CAST(RNode)(obj)) + +/* FL : 0..4: T_TYPES, 5: KEEP_WB, 6: PROMOTED, 7: FINALIZE, 8: TAINT, 9: UNTRUSTED, 10: EXIVAR, 11: FREEZE */ +/* NODE_FL: 0..4: T_TYPES, 5: KEEP_WB, 6: PROMOTED, 7: NODE_FL_NEWLINE, + * 8..14: nd_type, + * 15..: nd_line + */ +#define NODE_FL_NEWLINE (((VALUE)1)<<7) + +#define NODE_TYPESHIFT 8 +#define NODE_TYPEMASK (((VALUE)0x7f)<flags & NODE_TYPEMASK)>>NODE_TYPESHIFT)) +#define nd_set_type(n,t) \ + (n)->flags=(((n)->flags&~NODE_TYPEMASK)|((((unsigned long)(t))<flags)>>NODE_LSHIFT) +#define nd_set_line(n,l) \ + (n)->flags=(((n)->flags&~((VALUE)(-1)<nd_loc.first_loc.column)) +#define nd_set_first_column(n, v) ((n)->nd_loc.first_loc.column = (v)) +#define nd_first_lineno(n) ((int)((n)->nd_loc.first_loc.lineno)) +#define nd_set_first_lineno(n, v) ((n)->nd_loc.first_loc.lineno = (v)) + +#define nd_last_column(n) ((int)((n)->nd_loc.last_loc.column)) +#define nd_set_last_column(n, v) ((n)->nd_loc.last_loc.column = (v)) +#define nd_last_lineno(n) ((int)((n)->nd_loc.last_loc.lineno)) +#define nd_set_last_lineno(n, v) ((n)->nd_loc.last_loc.lineno = (v)) +#define nd_last_loc(n) ((n)->nd_loc.last_loc) +#define nd_set_last_loc(n, v) (nd_last_loc(n) = (v)) + +#define nd_head u1.node +#define nd_alen u2.argc +#define nd_next u3.node + +#define nd_cond u1.node +#define nd_body u2.node +#define nd_else u3.node + +#define nd_resq u2.node +#define nd_ensr u3.node + +#define nd_1st u1.node +#define nd_2nd u2.node + +#define nd_stts u1.node + +#define nd_entry u3.entry +#define nd_vid u1.id +#define nd_cflag u2.id +#define nd_cval u3.value + +#define nd_oid u1.id +#define nd_cnt u3.cnt +#define nd_tbl u1.tbl + +#define nd_var u1.node +#define nd_iter u3.node + +#define nd_value u2.node +#define nd_aid u3.id + +#define nd_lit u1.value + +#define nd_frml u2.argc +#define nd_rest u1.id +#define nd_opt u1.node +#define nd_pid u1.id +#define nd_plen u2.argc + +#define nd_recv u1.node +#define nd_mid u2.id +#define nd_args u3.node +#define nd_ainfo u3.args + +#define nd_noex u3.id +#define nd_defn u3.node + +#define nd_cfnc u1.cfunc +#define nd_argc u2.argc + +#define nd_cpath u1.node +#define nd_super u3.node + +#define nd_beg u1.node +#define nd_end u2.node +#define nd_state u3.state +#define nd_rval u2.value + +#define nd_nth u2.argc + +#define nd_tag u1.id + +#define nd_alias u1.id +#define nd_orig u2.id +#define nd_undef u2.node + +#define nd_compile_option u3.value + +#define NEW_NODE(t,a0,a1,a2) rb_node_newnode((t),(VALUE)(a0),(VALUE)(a1),(VALUE)(a2)) + +#define NEW_DEFN(i,a,d,p) NEW_NODE(NODE_DEFN,0,i,NEW_SCOPE(a,d)) +#define NEW_DEFS(r,i,a,d) NEW_NODE(NODE_DEFS,r,i,NEW_SCOPE(a,d)) +#define NEW_SCOPE(a,b) NEW_NODE(NODE_SCOPE,local_tbl(),b,a) +#define NEW_BLOCK(a) NEW_NODE(NODE_BLOCK,a,0,0) +#define NEW_IF(c,t,e) NEW_NODE(NODE_IF,c,t,e) +#define NEW_UNLESS(c,t,e) NEW_NODE(NODE_UNLESS,c,t,e) +#define NEW_CASE(h,b) NEW_NODE(NODE_CASE,h,b,0) +#define NEW_CASE2(b) NEW_NODE(NODE_CASE2,0,b,0) +#define NEW_WHEN(c,t,e) NEW_NODE(NODE_WHEN,c,t,e) +#define NEW_WHILE(c,b,n) NEW_NODE(NODE_WHILE,c,b,n) +#define NEW_UNTIL(c,b,n) NEW_NODE(NODE_UNTIL,c,b,n) +#define NEW_FOR(v,i,b) NEW_NODE(NODE_FOR,v,b,i) +#define NEW_ITER(a,b) NEW_NODE(NODE_ITER,0,NEW_SCOPE(a,b),0) +#define NEW_LAMBDA(a,b) NEW_NODE(NODE_LAMBDA,0,NEW_SCOPE(a,b),0) +#define NEW_BREAK(s) NEW_NODE(NODE_BREAK,s,0,0) +#define NEW_NEXT(s) NEW_NODE(NODE_NEXT,s,0,0) +#define NEW_REDO() NEW_NODE(NODE_REDO,0,0,0) +#define NEW_RETRY() NEW_NODE(NODE_RETRY,0,0,0) +#define NEW_BEGIN(b) NEW_NODE(NODE_BEGIN,0,b,0) +#define NEW_RESCUE(b,res,e) NEW_NODE(NODE_RESCUE,b,res,e) +#define NEW_RESBODY(a,ex,n) NEW_NODE(NODE_RESBODY,n,ex,a) +#define NEW_ENSURE(b,en) NEW_NODE(NODE_ENSURE,b,0,en) +#define NEW_RETURN(s) NEW_NODE(NODE_RETURN,s,0,0) +#define NEW_YIELD(a) NEW_NODE(NODE_YIELD,a,0,0) +#define NEW_LIST(a) NEW_ARRAY(a) +#define NEW_ARRAY(a) NEW_NODE(NODE_ARRAY,a,1,0) +#define NEW_ZARRAY() NEW_NODE(NODE_ZARRAY,0,0,0) +#define NEW_HASH(a) NEW_NODE(NODE_HASH,a,0,0) +#define NEW_MASGN(l,r) NEW_NODE(NODE_MASGN,l,0,r) +#define NEW_GASGN(v,val) NEW_NODE(NODE_GASGN,v,val,rb_global_entry(v)) +#define NEW_LASGN(v,val) NEW_NODE(NODE_LASGN,v,val,0) +#define NEW_DASGN(v,val) NEW_NODE(NODE_DASGN,v,val,0) +#define NEW_DASGN_CURR(v,val) NEW_NODE(NODE_DASGN_CURR,v,val,0) +#define NEW_IASGN(v,val) NEW_NODE(NODE_IASGN,v,val,0) +#define NEW_CDECL(v,val,path) NEW_NODE(NODE_CDECL,v,val,path) +#define NEW_CVASGN(v,val) NEW_NODE(NODE_CVASGN,v,val,0) +#define NEW_OP_ASGN1(p,id,a) NEW_NODE(NODE_OP_ASGN1,p,id,a) +#define NEW_OP_ASGN2(r,t,i,o,val) NEW_NODE(NODE_OP_ASGN2,r,val,NEW_OP_ASGN22(i,o,t)) +#define NEW_OP_ASGN22(i,o,t) NEW_NODE(NODE_OP_ASGN2,i,o,t) +#define NEW_OP_ASGN_OR(i,val) NEW_NODE(NODE_OP_ASGN_OR,i,val,0) +#define NEW_OP_ASGN_AND(i,val) NEW_NODE(NODE_OP_ASGN_AND,i,val,0) +#define NEW_OP_CDECL(v,op,val) NEW_NODE(NODE_OP_CDECL,v,val,op) +#define NEW_GVAR(v) NEW_NODE(NODE_GVAR,v,0,rb_global_entry(v)) +#define NEW_LVAR(v) NEW_NODE(NODE_LVAR,v,0,0) +#define NEW_DVAR(v) NEW_NODE(NODE_DVAR,v,0,0) +#define NEW_IVAR(v) NEW_NODE(NODE_IVAR,v,0,0) +#define NEW_CONST(v) NEW_NODE(NODE_CONST,v,0,0) +#define NEW_CVAR(v) NEW_NODE(NODE_CVAR,v,0,0) +#define NEW_NTH_REF(n) NEW_NODE(NODE_NTH_REF,0,n,0) +#define NEW_BACK_REF(n) NEW_NODE(NODE_BACK_REF,0,n,0) +#define NEW_MATCH(c) NEW_NODE(NODE_MATCH,c,0,0) +#define NEW_MATCH2(n1,n2) NEW_NODE(NODE_MATCH2,n1,n2,0) +#define NEW_MATCH3(r,n2) NEW_NODE(NODE_MATCH3,r,n2,0) +#define NEW_LIT(l) NEW_NODE(NODE_LIT,l,0,0) +#define NEW_STR(s) NEW_NODE(NODE_STR,s,0,0) +#define NEW_DSTR(s) NEW_NODE(NODE_DSTR,s,1,0) +#define NEW_XSTR(s) NEW_NODE(NODE_XSTR,s,0,0) +#define NEW_DXSTR(s) NEW_NODE(NODE_DXSTR,s,0,0) +#define NEW_DSYM(s) NEW_NODE(NODE_DSYM,s,0,0) +#define NEW_EVSTR(n) NEW_NODE(NODE_EVSTR,0,(n),0) +#define NEW_CALL(r,m,a) NEW_NODE(NODE_CALL,r,m,a) +#define NEW_OPCALL(r,m,a) NEW_NODE(NODE_OPCALL,r,m,a) +#define NEW_FCALL(m,a) NEW_NODE(NODE_FCALL,0,m,a) +#define NEW_VCALL(m) NEW_NODE(NODE_VCALL,0,m,0) +#define NEW_SUPER(a) NEW_NODE(NODE_SUPER,0,0,a) +#define NEW_ZSUPER() NEW_NODE(NODE_ZSUPER,0,0,0) +#define NEW_ARGS_AUX(r,b) NEW_NODE(NODE_ARGS_AUX,r,b,0) +#define NEW_OPT_ARG(i,v) NEW_NODE(NODE_OPT_ARG,i,v,0) +#define NEW_KW_ARG(i,v) NEW_NODE(NODE_KW_ARG,i,v,0) +#define NEW_POSTARG(i,v) NEW_NODE(NODE_POSTARG,i,v,0) +#define NEW_ARGSCAT(a,b) NEW_NODE(NODE_ARGSCAT,a,b,0) +#define NEW_ARGSPUSH(a,b) NEW_NODE(NODE_ARGSPUSH,a,b,0) +#define NEW_SPLAT(a) NEW_NODE(NODE_SPLAT,a,0,0) +#define NEW_BLOCK_PASS(b) NEW_NODE(NODE_BLOCK_PASS,0,b,0) +#define NEW_ALIAS(n,o) NEW_NODE(NODE_ALIAS,n,o,0) +#define NEW_VALIAS(n,o) NEW_NODE(NODE_VALIAS,n,o,0) +#define NEW_UNDEF(i) NEW_NODE(NODE_UNDEF,0,i,0) +#define NEW_CLASS(n,b,s) NEW_NODE(NODE_CLASS,n,NEW_SCOPE(0,b),(s)) +#define NEW_SCLASS(r,b) NEW_NODE(NODE_SCLASS,r,NEW_SCOPE(0,b),0) +#define NEW_MODULE(n,b) NEW_NODE(NODE_MODULE,n,NEW_SCOPE(0,b),0) +#define NEW_COLON2(c,i) NEW_NODE(NODE_COLON2,c,i,0) +#define NEW_COLON3(i) NEW_NODE(NODE_COLON3,0,i,0) +#define NEW_DOT2(b,e) NEW_NODE(NODE_DOT2,b,e,0) +#define NEW_DOT3(b,e) NEW_NODE(NODE_DOT3,b,e,0) +#define NEW_SELF() NEW_NODE(NODE_SELF,0,0,0) +#define NEW_NIL() NEW_NODE(NODE_NIL,0,0,0) +#define NEW_TRUE() NEW_NODE(NODE_TRUE,0,0,0) +#define NEW_FALSE() NEW_NODE(NODE_FALSE,0,0,0) +#define NEW_ERRINFO() NEW_NODE(NODE_ERRINFO,0,0,0) +#define NEW_DEFINED(e) NEW_NODE(NODE_DEFINED,e,0,0) +#define NEW_PREEXE(b) NEW_SCOPE(b) +#define NEW_POSTEXE(b) NEW_NODE(NODE_POSTEXE,0,b,0) +#define NEW_ATTRASGN(r,m,a) NEW_NODE(NODE_ATTRASGN,r,m,a) +#define NEW_PRELUDE(p,b,o) NEW_NODE(NODE_PRELUDE,p,b,o) + +#define NODE_SPECIAL_REQUIRED_KEYWORD ((NODE *)-1) +#define NODE_SPECIAL_NO_NAME_REST ((NODE *)-1) + +RUBY_SYMBOL_EXPORT_BEGIN + +typedef struct node_buffer_struct node_buffer_t; +/* T_IMEMO/ast */ +typedef struct rb_ast_struct { + VALUE flags; + VALUE reserved1; + NODE *root; + node_buffer_t *node_buffer; + VALUE mark_ary; +} rb_ast_t; +rb_ast_t *rb_ast_new(); +void rb_ast_mark(rb_ast_t*); +void rb_ast_dispose(rb_ast_t*); +void rb_ast_free(rb_ast_t*); +void rb_ast_add_mark_object(rb_ast_t*, VALUE); +void rb_ast_delete_mark_object(rb_ast_t*, VALUE); +NODE *rb_ast_newnode(rb_ast_t*); +void rb_ast_delete_node(rb_ast_t*, NODE *n); + +VALUE rb_parser_new(void); +VALUE rb_parser_end_seen_p(VALUE); +VALUE rb_parser_encoding(VALUE); +VALUE rb_parser_get_yydebug(VALUE); +VALUE rb_parser_set_yydebug(VALUE, VALUE); +VALUE rb_parser_dump_tree(NODE *node, int comment); +void rb_parser_set_options(VALUE, int, int, int, int); + +rb_ast_t *rb_parser_compile_cstr(VALUE, const char*, const char*, int, int); +rb_ast_t *rb_parser_compile_string(VALUE, const char*, VALUE, int); +rb_ast_t *rb_parser_compile_file(VALUE, const char*, VALUE, int); +rb_ast_t *rb_parser_compile_string_path(VALUE vparser, VALUE fname, VALUE src, int line); +rb_ast_t *rb_parser_compile_file_path(VALUE vparser, VALUE fname, VALUE input, int line); + +rb_ast_t *rb_compile_cstr(const char*, const char*, int, int); +rb_ast_t *rb_compile_string(const char*, VALUE, int); +rb_ast_t *rb_compile_file(const char*, VALUE, int); + +void rb_node_init(NODE *n, enum node_type type, VALUE a0, VALUE a1, VALUE a2); + +const struct kwtable *rb_reserved_word(const char *, unsigned int); + +struct rb_args_info { + NODE *pre_init; + NODE *post_init; + + int pre_args_num; /* count of mandatory pre-arguments */ + int post_args_num; /* count of mandatory post-arguments */ + + ID first_post_arg; + + ID rest_arg; + ID block_arg; + + NODE *kw_args; + NODE *kw_rest_arg; + + NODE *opt_args; +}; + +struct parser_params; +void *rb_parser_malloc(struct parser_params *, size_t); +void *rb_parser_realloc(struct parser_params *, void *, size_t); +void *rb_parser_calloc(struct parser_params *, size_t, size_t); +void rb_parser_free(struct parser_params *, void *); +void rb_parser_printf(struct parser_params *parser, const char *fmt, ...); + +RUBY_SYMBOL_EXPORT_END + +#if defined(__cplusplus) +#if 0 +{ /* satisfy cc-mode */ +#endif +} /* extern "C" { */ +#endif + +#endif /* RUBY_NODE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/node_name.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/node_name.inc new file mode 100644 index 0000000..c86368a --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/node_name.inc @@ -0,0 +1,198 @@ + case NODE_SCOPE: + return "NODE_SCOPE"; + case NODE_BLOCK: + return "NODE_BLOCK"; + case NODE_IF: + return "NODE_IF"; + case NODE_UNLESS: + return "NODE_UNLESS"; + case NODE_CASE: + return "NODE_CASE"; + case NODE_CASE2: + return "NODE_CASE2"; + case NODE_WHEN: + return "NODE_WHEN"; + case NODE_WHILE: + return "NODE_WHILE"; + case NODE_UNTIL: + return "NODE_UNTIL"; + case NODE_ITER: + return "NODE_ITER"; + case NODE_FOR: + return "NODE_FOR"; + case NODE_BREAK: + return "NODE_BREAK"; + case NODE_NEXT: + return "NODE_NEXT"; + case NODE_REDO: + return "NODE_REDO"; + case NODE_RETRY: + return "NODE_RETRY"; + case NODE_BEGIN: + return "NODE_BEGIN"; + case NODE_RESCUE: + return "NODE_RESCUE"; + case NODE_RESBODY: + return "NODE_RESBODY"; + case NODE_ENSURE: + return "NODE_ENSURE"; + case NODE_AND: + return "NODE_AND"; + case NODE_OR: + return "NODE_OR"; + case NODE_MASGN: + return "NODE_MASGN"; + case NODE_LASGN: + return "NODE_LASGN"; + case NODE_DASGN: + return "NODE_DASGN"; + case NODE_DASGN_CURR: + return "NODE_DASGN_CURR"; + case NODE_GASGN: + return "NODE_GASGN"; + case NODE_IASGN: + return "NODE_IASGN"; + case NODE_CDECL: + return "NODE_CDECL"; + case NODE_CVASGN: + return "NODE_CVASGN"; + case NODE_OP_ASGN1: + return "NODE_OP_ASGN1"; + case NODE_OP_ASGN2: + return "NODE_OP_ASGN2"; + case NODE_OP_ASGN_AND: + return "NODE_OP_ASGN_AND"; + case NODE_OP_ASGN_OR: + return "NODE_OP_ASGN_OR"; + case NODE_OP_CDECL: + return "NODE_OP_CDECL"; + case NODE_CALL: + return "NODE_CALL"; + case NODE_OPCALL: + return "NODE_OPCALL"; + case NODE_FCALL: + return "NODE_FCALL"; + case NODE_VCALL: + return "NODE_VCALL"; + case NODE_QCALL: + return "NODE_QCALL"; + case NODE_SUPER: + return "NODE_SUPER"; + case NODE_ZSUPER: + return "NODE_ZSUPER"; + case NODE_ARRAY: + return "NODE_ARRAY"; + case NODE_ZARRAY: + return "NODE_ZARRAY"; + case NODE_VALUES: + return "NODE_VALUES"; + case NODE_HASH: + return "NODE_HASH"; + case NODE_RETURN: + return "NODE_RETURN"; + case NODE_YIELD: + return "NODE_YIELD"; + case NODE_LVAR: + return "NODE_LVAR"; + case NODE_DVAR: + return "NODE_DVAR"; + case NODE_GVAR: + return "NODE_GVAR"; + case NODE_IVAR: + return "NODE_IVAR"; + case NODE_CONST: + return "NODE_CONST"; + case NODE_CVAR: + return "NODE_CVAR"; + case NODE_NTH_REF: + return "NODE_NTH_REF"; + case NODE_BACK_REF: + return "NODE_BACK_REF"; + case NODE_MATCH: + return "NODE_MATCH"; + case NODE_MATCH2: + return "NODE_MATCH2"; + case NODE_MATCH3: + return "NODE_MATCH3"; + case NODE_LIT: + return "NODE_LIT"; + case NODE_STR: + return "NODE_STR"; + case NODE_DSTR: + return "NODE_DSTR"; + case NODE_XSTR: + return "NODE_XSTR"; + case NODE_DXSTR: + return "NODE_DXSTR"; + case NODE_EVSTR: + return "NODE_EVSTR"; + case NODE_DREGX: + return "NODE_DREGX"; + case NODE_ARGS: + return "NODE_ARGS"; + case NODE_ARGS_AUX: + return "NODE_ARGS_AUX"; + case NODE_OPT_ARG: + return "NODE_OPT_ARG"; + case NODE_KW_ARG: + return "NODE_KW_ARG"; + case NODE_POSTARG: + return "NODE_POSTARG"; + case NODE_ARGSCAT: + return "NODE_ARGSCAT"; + case NODE_ARGSPUSH: + return "NODE_ARGSPUSH"; + case NODE_SPLAT: + return "NODE_SPLAT"; + case NODE_BLOCK_PASS: + return "NODE_BLOCK_PASS"; + case NODE_DEFN: + return "NODE_DEFN"; + case NODE_DEFS: + return "NODE_DEFS"; + case NODE_ALIAS: + return "NODE_ALIAS"; + case NODE_VALIAS: + return "NODE_VALIAS"; + case NODE_UNDEF: + return "NODE_UNDEF"; + case NODE_CLASS: + return "NODE_CLASS"; + case NODE_MODULE: + return "NODE_MODULE"; + case NODE_SCLASS: + return "NODE_SCLASS"; + case NODE_COLON2: + return "NODE_COLON2"; + case NODE_COLON3: + return "NODE_COLON3"; + case NODE_DOT2: + return "NODE_DOT2"; + case NODE_DOT3: + return "NODE_DOT3"; + case NODE_FLIP2: + return "NODE_FLIP2"; + case NODE_FLIP3: + return "NODE_FLIP3"; + case NODE_SELF: + return "NODE_SELF"; + case NODE_NIL: + return "NODE_NIL"; + case NODE_TRUE: + return "NODE_TRUE"; + case NODE_FALSE: + return "NODE_FALSE"; + case NODE_ERRINFO: + return "NODE_ERRINFO"; + case NODE_DEFINED: + return "NODE_DEFINED"; + case NODE_POSTEXE: + return "NODE_POSTEXE"; + case NODE_DSYM: + return "NODE_DSYM"; + case NODE_ATTRASGN: + return "NODE_ATTRASGN"; + case NODE_PRELUDE: + return "NODE_PRELUDE"; + case NODE_LAMBDA: + return "NODE_LAMBDA"; diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/opt_sc.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/opt_sc.inc new file mode 100644 index 0000000..288808a --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/opt_sc.inc @@ -0,0 +1,1601 @@ +/* -*-c-*- *********************************************************/ +/*******************************************************************/ +/*******************************************************************/ +/** + This file is for threaded code. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/opt_sc.inc.tmpl' + or tool/insns2vm.rb + */ + +#define SC_STATE_SIZE 6 + +#define SCS_XX 1 +#define SCS_AX 2 +#define SCS_BX 3 +#define SCS_AB 4 +#define SCS_BA 5 + +#define SC_ERROR 0xffffffff + +static const VALUE sc_insn_info[][SC_STATE_SIZE] = { + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR}, + { +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR, +SC_ERROR} +}; + +ASSERT_VM_INSTRUCTION_SIZE(sc_insn_info); + +static const VALUE sc_insn_next[] = { + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX, + SCS_XX +}; + +ASSERT_VM_INSTRUCTION_SIZE(sc_insn_next); diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/optinsn.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/optinsn.inc new file mode 100644 index 0000000..72cbafd --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/optinsn.inc @@ -0,0 +1,103 @@ +/* -*-c-*- *********************************************************/ +/*******************************************************************/ +/*******************************************************************/ +/** + This file is for threaded code. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/optinsn.inc.tmpl' + or tool/insns2vm.rb + */ + +static INSN * +insn_operands_unification(INSN *insnobj) +{ +#ifdef OPT_OPERANDS_UNIFICATION + /* optimize rule */ + switch(insnobj->insn_id){ + case BIN(getlocal): + if ( + insnobj->operands[1] == INT2FIX(0) && + 1) { + insnobj->insn_id = BIN(getlocal_OP__WC__0); + insnobj->operand_size = 1; + break; + } + if ( + insnobj->operands[1] == INT2FIX(1) && + 1) { + insnobj->insn_id = BIN(getlocal_OP__WC__1); + insnobj->operand_size = 1; + break; + } + break; + case BIN(setlocal): + if ( + insnobj->operands[1] == INT2FIX(0) && + 1) { + insnobj->insn_id = BIN(setlocal_OP__WC__0); + insnobj->operand_size = 1; + break; + } + if ( + insnobj->operands[1] == INT2FIX(1) && + 1) { + insnobj->insn_id = BIN(setlocal_OP__WC__1); + insnobj->operand_size = 1; + break; + } + break; + case BIN(putobject): + if ( + insnobj->operands[0] == INT2FIX(0) && + 1) { + insnobj->insn_id = BIN(putobject_OP_INT2FIX_O_0_C_); + insnobj->operand_size = 0; + break; + } + if ( + insnobj->operands[0] == INT2FIX(1) && + 1) { + insnobj->insn_id = BIN(putobject_OP_INT2FIX_O_1_C_); + insnobj->operand_size = 0; + break; + } + break; + + default: + /* do nothing */; + break; + } +#endif + return insnobj; +} + +int +rb_insn_unified_local_var_level(VALUE insn) +{ +#ifdef OPT_OPERANDS_UNIFICATION + /* optimize rule */ + switch (insn) { + case BIN(getlocal_OP__WC__0): + return 0; + case BIN(getlocal_OP__WC__1): + return 1; + case BIN(setlocal_OP__WC__0): + return 0; + case BIN(setlocal_OP__WC__1): + return 1; + case BIN(putobject_OP_INT2FIX_O_0_C_): + return INT2FIX(0); + case BIN(putobject_OP_INT2FIX_O_1_C_): + return INT2FIX(1); + + default: + /* do nothing */; + break; + } +#endif + return -1; +} diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/optunifs.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/optunifs.inc new file mode 100644 index 0000000..86e8653 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/optunifs.inc @@ -0,0 +1,61 @@ +/* -*-c-*- *********************************************************/ +/*******************************************************************/ +/*******************************************************************/ +/** + This file is for threaded code. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/optunifs.inc.tmpl' + or tool/insns2vm.rb + */ + +/* + static const int UNIFIED_insn_name_1[] = {id, size, ...}; + static const int UNIFIED_insn_name_2[] = {id, size, ...}; + ... + + static const int *const UNIFIED_insn_name[] = {size, + UNIFIED_insn_name_1, + UNIFIED_insn_name_2, ...}; + ... + + static const int *const *const unified_insns_data[] = { + UNIFIED_insn_nameA, + UNIFIED_insn_nameB, ...}; + */ + + +static const int *const *const unified_insns_data[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +}; + +#undef GET_INSN_NAME + +ASSERT_VM_INSTRUCTION_SIZE(unified_insns_data); diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/parse.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/parse.h new file mode 100644 index 0000000..cfc2b83 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/parse.h @@ -0,0 +1,200 @@ +/* A Bison parser, made by GNU Bison 2.5. */ + +/* Bison interface for Yacc-like parsers in C + + Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* As a special exception, you may create a larger work that contains + part or all of the Bison parser skeleton and distribute that work + under terms of your choice, so long as that work isn't itself a + parser generator using the skeleton or a modified version thereof + as a parser skeleton. Alternatively, if you modify or redistribute + the parser skeleton itself, you may (at your option) remove this + special exception, which will cause the skeleton and the resulting + Bison output files to be licensed under the GNU General Public + License without this special exception. + + This special exception was added by the Free Software Foundation in + version 2.2 of Bison. */ + + +/* Tokens. */ +#ifndef YYTOKENTYPE +# define YYTOKENTYPE + /* Put the tokens into the symbol table, so that GDB and other debuggers + know about them. */ + enum yytokentype { + END_OF_INPUT = 0, + keyword_class = 258, + keyword_module = 259, + keyword_def = 260, + keyword_undef = 261, + keyword_begin = 262, + keyword_rescue = 263, + keyword_ensure = 264, + keyword_end = 265, + keyword_if = 266, + keyword_unless = 267, + keyword_then = 268, + keyword_elsif = 269, + keyword_else = 270, + keyword_case = 271, + keyword_when = 272, + keyword_while = 273, + keyword_until = 274, + keyword_for = 275, + keyword_break = 276, + keyword_next = 277, + keyword_redo = 278, + keyword_retry = 279, + keyword_in = 280, + keyword_do = 281, + keyword_do_cond = 282, + keyword_do_block = 283, + keyword_do_LAMBDA = 284, + keyword_return = 285, + keyword_yield = 286, + keyword_super = 287, + keyword_self = 288, + keyword_nil = 289, + keyword_true = 290, + keyword_false = 291, + keyword_and = 292, + keyword_or = 293, + keyword_not = 294, + modifier_if = 295, + modifier_unless = 296, + modifier_while = 297, + modifier_until = 298, + modifier_rescue = 299, + keyword_alias = 300, + keyword_defined = 301, + keyword_BEGIN = 302, + keyword_END = 303, + keyword__LINE__ = 304, + keyword__FILE__ = 305, + keyword__ENCODING__ = 306, + tIDENTIFIER = 307, + tFID = 308, + tGVAR = 309, + tIVAR = 310, + tCONSTANT = 311, + tCVAR = 312, + tLABEL = 313, + tINTEGER = 314, + tFLOAT = 315, + tRATIONAL = 316, + tIMAGINARY = 317, + tSTRING_CONTENT = 318, + tCHAR = 319, + tNTH_REF = 320, + tBACK_REF = 321, + tREGEXP_END = 322, + tUPLUS = 130, + tUMINUS = 131, + tPOW = 132, + tCMP = 133, + tEQ = 138, + tEQQ = 139, + tNEQ = 140, + tGEQ = 137, + tLEQ = 136, + tANDOP = 146, + tOROP = 147, + tMATCH = 141, + tNMATCH = 142, + tDOT2 = 128, + tDOT3 = 129, + tAREF = 143, + tASET = 144, + tLSHFT = 134, + tRSHFT = 135, + tANDDOT = 148, + tCOLON2 = 145, + tCOLON3 = 323, + tOP_ASGN = 324, + tASSOC = 325, + tLPAREN = 326, + tLPAREN_ARG = 327, + tRPAREN = 328, + tLBRACK = 329, + tLBRACE = 330, + tLBRACE_ARG = 331, + tSTAR = 332, + tDSTAR = 333, + tAMPER = 334, + tLAMBDA = 335, + tSYMBEG = 336, + tSTRING_BEG = 337, + tXSTRING_BEG = 338, + tREGEXP_BEG = 339, + tWORDS_BEG = 340, + tQWORDS_BEG = 341, + tSYMBOLS_BEG = 342, + tQSYMBOLS_BEG = 343, + tSTRING_DBEG = 344, + tSTRING_DEND = 345, + tSTRING_DVAR = 346, + tSTRING_END = 347, + tLAMBEG = 348, + tLABEL_END = 349, + tLOWEST = 350, + tUMINUS_NUM = 351, + tLAST_TOKEN = 352 + }; +#endif + + + +#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED +typedef union YYSTYPE +{ + +/* Line 2068 of yacc.c */ + + VALUE val; + NODE *node; + ID id; + int num; + const struct vtable *vars; + struct rb_strterm_struct *strterm; + + + +/* Line 2068 of yacc.c */ +} YYSTYPE; +# define YYSTYPE_IS_TRIVIAL 1 +# define yystype YYSTYPE /* obsolescent; will be withdrawn */ +# define YYSTYPE_IS_DECLARED 1 +#endif + + + +#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED +typedef struct YYLTYPE +{ + int first_line; + int first_column; + int last_line; + int last_column; +} YYLTYPE; +# define yyltype YYLTYPE /* obsolescent; will be withdrawn */ +# define YYLTYPE_IS_DECLARED 1 +# define YYLTYPE_IS_TRIVIAL 1 +#endif + + + diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/probes_helper.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/probes_helper.h new file mode 100644 index 0000000..1393436 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/probes_helper.h @@ -0,0 +1,43 @@ +#ifndef RUBY_PROBES_HELPER_H +#define RUBY_PROBES_HELPER_H + +#include "ruby/ruby.h" +#include "probes.h" + +struct ruby_dtrace_method_hook_args { + const char *classname; + const char *methodname; + const char *filename; + int line_no; + volatile VALUE klass; + volatile VALUE name; +}; + +NOINLINE(int rb_dtrace_setup(rb_execution_context_t *, VALUE, ID, struct ruby_dtrace_method_hook_args *)); + +#define RUBY_DTRACE_METHOD_HOOK(name, ec, klazz, id) \ +do { \ + if (UNLIKELY(RUBY_DTRACE_##name##_ENABLED())) { \ + struct ruby_dtrace_method_hook_args args; \ + if (rb_dtrace_setup(ec, klazz, id, &args)) { \ + RUBY_DTRACE_##name(args.classname, \ + args.methodname, \ + args.filename, \ + args.line_no); \ + } \ + } \ +} while (0) + +#define RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, klass, id) \ + RUBY_DTRACE_METHOD_HOOK(METHOD_ENTRY, ec, klass, id) + +#define RUBY_DTRACE_METHOD_RETURN_HOOK(ec, klass, id) \ + RUBY_DTRACE_METHOD_HOOK(METHOD_RETURN, ec, klass, id) + +#define RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, klass, id) \ + RUBY_DTRACE_METHOD_HOOK(CMETHOD_ENTRY, ec, klass, id) + +#define RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, klass, id) \ + RUBY_DTRACE_METHOD_HOOK(CMETHOD_RETURN, ec, klass, id) + +#endif /* RUBY_PROBES_HELPER_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/regenc.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regenc.h new file mode 100644 index 0000000..969e114 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regenc.h @@ -0,0 +1,254 @@ +#ifndef ONIGMO_REGENC_H +#define ONIGMO_REGENC_H +/********************************************************************** + regenc.h - Onigmo (Oniguruma-mod) (regular expression library) +**********************************************************************/ +/*- + * Copyright (c) 2002-2008 K.Kosako + * Copyright (c) 2011-2016 K.Takata + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if !defined(RUBY) && (defined(RUBY_EXPORT) || defined(ONIG_ENC_REGISTER)) +# define RUBY +#endif +#ifdef RUBY +# ifndef ONIGMO_REGINT_H +# ifndef RUBY_EXTERN +# include "ruby/config.h" +# include "ruby/defines.h" +# endif +# endif +#else /* RUBY */ +# ifndef PACKAGE +/* PACKAGE is defined in config.h */ +# include "config.h" +# endif +#endif /* RUBY */ + +#ifdef ONIG_ESCAPE_UCHAR_COLLISION +# undef ONIG_ESCAPE_UCHAR_COLLISION +#endif + +#ifdef RUBY +# include "ruby/onigmo.h" +#else +# include "onigmo.h" +#endif + +RUBY_SYMBOL_EXPORT_BEGIN + +typedef struct { + OnigCodePoint from; + OnigCodePoint to; +} OnigPairCaseFoldCodes; + + +#ifndef NULL +# define NULL ((void* )0) +#endif + +#ifndef TRUE +# define TRUE 1 +#endif + +#ifndef FALSE +# define FALSE 0 +#endif + +#ifndef ARG_UNUSED +# if defined(__GNUC__) +# define ARG_UNUSED __attribute__ ((unused)) +# else +# define ARG_UNUSED +# endif +#endif + +#define ONIG_IS_NULL(p) (((void*)(p)) == (void*)0) +#define ONIG_IS_NOT_NULL(p) (((void*)(p)) != (void*)0) +#define ONIG_CHECK_NULL_RETURN(p) if (ONIG_IS_NULL(p)) return NULL +#define ONIG_CHECK_NULL_RETURN_VAL(p,val) if (ONIG_IS_NULL(p)) return (val) + +#define enclen(enc,p,e) ((enc->max_enc_len == enc->min_enc_len) ? enc->min_enc_len : ONIGENC_MBC_ENC_LEN(enc,p,e)) + +/* character types bit flag */ +#define BIT_CTYPE_NEWLINE (1<< ONIGENC_CTYPE_NEWLINE) +#define BIT_CTYPE_ALPHA (1<< ONIGENC_CTYPE_ALPHA) +#define BIT_CTYPE_BLANK (1<< ONIGENC_CTYPE_BLANK) +#define BIT_CTYPE_CNTRL (1<< ONIGENC_CTYPE_CNTRL) +#define BIT_CTYPE_DIGIT (1<< ONIGENC_CTYPE_DIGIT) +#define BIT_CTYPE_GRAPH (1<< ONIGENC_CTYPE_GRAPH) +#define BIT_CTYPE_LOWER (1<< ONIGENC_CTYPE_LOWER) +#define BIT_CTYPE_PRINT (1<< ONIGENC_CTYPE_PRINT) +#define BIT_CTYPE_PUNCT (1<< ONIGENC_CTYPE_PUNCT) +#define BIT_CTYPE_SPACE (1<< ONIGENC_CTYPE_SPACE) +#define BIT_CTYPE_UPPER (1<< ONIGENC_CTYPE_UPPER) +#define BIT_CTYPE_XDIGIT (1<< ONIGENC_CTYPE_XDIGIT) +#define BIT_CTYPE_WORD (1<< ONIGENC_CTYPE_WORD) +#define BIT_CTYPE_ALNUM (1<< ONIGENC_CTYPE_ALNUM) +#define BIT_CTYPE_ASCII (1<< ONIGENC_CTYPE_ASCII) + +#define CTYPE_TO_BIT(ctype) (1<<(ctype)) +#define CTYPE_IS_WORD_GRAPH_PRINT(ctype) \ + ((ctype) == ONIGENC_CTYPE_WORD || (ctype) == ONIGENC_CTYPE_GRAPH ||\ + (ctype) == ONIGENC_CTYPE_PRINT) + + +typedef struct { + short int len; + const UChar name[6]; + int ctype; +} PosixBracketEntryType; + +#define POSIX_BRACKET_ENTRY_INIT(name, ctype) \ + {(short int )(sizeof(name) - 1), (name), (ctype)} + +#ifndef numberof +# define numberof(array) (int )(sizeof(array) / sizeof((array)[0])) +#endif + + +#define USE_CRNL_AS_LINE_TERMINATOR +#define USE_UNICODE_PROPERTIES +#define USE_UNICODE_AGE_PROPERTIES +/* #define USE_UNICODE_CASE_FOLD_TURKISH_AZERI */ +/* #define USE_UNICODE_ALL_LINE_TERMINATORS */ /* see Unicode.org UTS #18 */ + + +#define ONIG_ENCODING_INIT_DEFAULT ONIG_ENCODING_ASCII + +/* for encoding system implementation (internal) */ +ONIG_EXTERN int onigenc_ascii_apply_all_case_fold(OnigCaseFoldType flag, OnigApplyAllCaseFoldFunc f, void* arg, OnigEncoding enc); +ONIG_EXTERN int onigenc_ascii_get_case_fold_codes_by_str(OnigCaseFoldType flag, const OnigUChar* p, const OnigUChar* end, OnigCaseFoldCodeItem items[], OnigEncoding enc); +ONIG_EXTERN int onigenc_apply_all_case_fold_with_map(int map_size, const OnigPairCaseFoldCodes map[], int ess_tsett_flag, OnigCaseFoldType flag, OnigApplyAllCaseFoldFunc f, void* arg); +ONIG_EXTERN int onigenc_get_case_fold_codes_by_str_with_map(int map_size, const OnigPairCaseFoldCodes map[], int ess_tsett_flag, OnigCaseFoldType flag, const OnigUChar* p, const OnigUChar* end, OnigCaseFoldCodeItem items[]); +ONIG_EXTERN int onigenc_not_support_get_ctype_code_range(OnigCtype ctype, OnigCodePoint* sb_out, const OnigCodePoint* ranges[], OnigEncoding enc); +ONIG_EXTERN int onigenc_is_mbc_newline_0x0a(const UChar* p, const UChar* end, OnigEncoding enc); +ONIG_EXTERN int onigenc_single_byte_ascii_only_case_map(OnigCaseFoldType* flagP, const OnigUChar** pp, const OnigUChar* end, OnigUChar* to, OnigUChar* to_end, const struct OnigEncodingTypeST* enc); + + +/* methods for single byte encoding */ +ONIG_EXTERN int onigenc_ascii_mbc_case_fold(OnigCaseFoldType flag, const UChar** p, const UChar* end, UChar* lower, OnigEncoding enc); +ONIG_EXTERN int onigenc_single_byte_mbc_enc_len(const UChar* p, const UChar* e, OnigEncoding enc); +ONIG_EXTERN OnigCodePoint onigenc_single_byte_mbc_to_code(const UChar* p, const UChar* end, OnigEncoding enc); +ONIG_EXTERN int onigenc_single_byte_code_to_mbclen(OnigCodePoint code, OnigEncoding enc); +ONIG_EXTERN int onigenc_single_byte_code_to_mbc(OnigCodePoint code, UChar *buf, OnigEncoding enc); +ONIG_EXTERN UChar* onigenc_single_byte_left_adjust_char_head(const UChar* start, const UChar* s, const OnigUChar* end, OnigEncoding enc); +ONIG_EXTERN int onigenc_always_true_is_allowed_reverse_match(const UChar* s, const UChar* end, OnigEncoding enc); +ONIG_EXTERN int onigenc_always_false_is_allowed_reverse_match(const UChar* s, const UChar* end, OnigEncoding enc); +ONIG_EXTERN int onigenc_ascii_is_code_ctype(OnigCodePoint code, unsigned int ctype, OnigEncoding enc); + +/* methods for multi byte encoding */ +ONIG_EXTERN OnigCodePoint onigenc_mbn_mbc_to_code(OnigEncoding enc, const UChar* p, const UChar* end); +ONIG_EXTERN int onigenc_mbn_mbc_case_fold(OnigEncoding enc, OnigCaseFoldType flag, const UChar** p, const UChar* end, UChar* lower); +ONIG_EXTERN int onigenc_mb2_code_to_mbclen(OnigCodePoint code, OnigEncoding enc); +ONIG_EXTERN int onigenc_mb2_code_to_mbc(OnigEncoding enc, OnigCodePoint code, UChar *buf); +ONIG_EXTERN int onigenc_minimum_property_name_to_ctype(OnigEncoding enc, const UChar* p, const UChar* end); +ONIG_EXTERN int onigenc_unicode_property_name_to_ctype(OnigEncoding enc, const UChar* p, const UChar* end); +ONIG_EXTERN int onigenc_mb2_is_code_ctype(OnigEncoding enc, OnigCodePoint code, unsigned int ctype); +ONIG_EXTERN int onigenc_mb4_code_to_mbclen(OnigCodePoint code, OnigEncoding enc); +ONIG_EXTERN int onigenc_mb4_code_to_mbc(OnigEncoding enc, OnigCodePoint code, UChar *buf); +ONIG_EXTERN int onigenc_mb4_is_code_ctype(OnigEncoding enc, OnigCodePoint code, unsigned int ctype); + +ONIG_EXTERN int onigenc_unicode_case_map(OnigCaseFoldType* flagP, const OnigUChar** pp, const OnigUChar* end, OnigUChar* to, OnigUChar* to_end, const struct OnigEncodingTypeST* enc); + + +/* in enc/unicode.c */ +ONIG_EXTERN int onigenc_unicode_is_code_ctype(OnigCodePoint code, unsigned int ctype, OnigEncoding enc); +ONIG_EXTERN int onigenc_utf16_32_get_ctype_code_range(OnigCtype ctype, OnigCodePoint *sb_out, const OnigCodePoint* ranges[], OnigEncoding enc); +ONIG_EXTERN int onigenc_unicode_ctype_code_range(int ctype, const OnigCodePoint* ranges[]); +ONIG_EXTERN int onigenc_unicode_get_case_fold_codes_by_str(OnigEncoding enc, OnigCaseFoldType flag, const OnigUChar* p, const OnigUChar* end, OnigCaseFoldCodeItem items[]); +ONIG_EXTERN int onigenc_unicode_mbc_case_fold(OnigEncoding enc, OnigCaseFoldType flag, const UChar** pp, const UChar* end, UChar* fold); +ONIG_EXTERN int onigenc_unicode_apply_all_case_fold(OnigCaseFoldType flag, OnigApplyAllCaseFoldFunc f, void* arg, OnigEncoding enc); + + +#define UTF16_IS_SURROGATE_FIRST(c) (((c) & 0xfc) == 0xd8) +#define UTF16_IS_SURROGATE_SECOND(c) (((c) & 0xfc) == 0xdc) +#define UTF16_IS_SURROGATE(c) (((c) & 0xf8) == 0xd8) +#define UNICODE_VALID_CODEPOINT_P(c) ( \ + ((c) <= 0x10ffff) && \ + !((c) < 0x10000 && UTF16_IS_SURROGATE((c) >> 8))) + +#define ONIGENC_ISO_8859_1_TO_LOWER_CASE(c) \ + OnigEncISO_8859_1_ToLowerCaseTable[c] +#define ONIGENC_ISO_8859_1_TO_UPPER_CASE(c) \ + OnigEncISO_8859_1_ToUpperCaseTable[c] + +ONIG_EXTERN const UChar OnigEncISO_8859_1_ToLowerCaseTable[]; +ONIG_EXTERN const UChar OnigEncISO_8859_1_ToUpperCaseTable[]; + +ONIG_EXTERN int +onigenc_with_ascii_strncmp(OnigEncoding enc, const UChar* p, const UChar* end, const UChar* sascii /* ascii */, int n); +ONIG_EXTERN int +onigenc_with_ascii_strnicmp(OnigEncoding enc, const UChar* p, const UChar* end, const UChar* sascii /* ascii */, int n); +ONIG_EXTERN UChar* +onigenc_step(OnigEncoding enc, const UChar* p, const UChar* end, int n); + +/* defined in regexec.c, but used in enc/xxx.c */ +extern int onig_is_in_code_range(const UChar* p, OnigCodePoint code); + +ONIG_EXTERN OnigEncoding OnigEncDefaultCharEncoding; +ONIG_EXTERN const UChar OnigEncAsciiToLowerCaseTable[]; +ONIG_EXTERN const UChar OnigEncAsciiToUpperCaseTable[]; +ONIG_EXTERN const unsigned short OnigEncAsciiCtypeTable[]; + +#define ONIGENC_IS_ASCII_CODE(code) ((code) < 0x80) +#define ONIGENC_ASCII_CODE_TO_LOWER_CASE(c) OnigEncAsciiToLowerCaseTable[c] +#define ONIGENC_ASCII_CODE_TO_UPPER_CASE(c) OnigEncAsciiToUpperCaseTable[c] +#define ONIGENC_IS_ASCII_CODE_CTYPE(code,ctype) \ + ((OnigEncAsciiCtypeTable[code] & CTYPE_TO_BIT(ctype)) != 0) +#define ONIGENC_IS_ASCII_CODE_CASE_AMBIG(code) \ + (ONIGENC_IS_ASCII_CODE_CTYPE(code, ONIGENC_CTYPE_UPPER) ||\ + ONIGENC_IS_ASCII_CODE_CTYPE(code, ONIGENC_CTYPE_LOWER)) + +/* Check if the code is in the range. (from <= code && code <= to) */ +#define ONIGENC_IS_IN_RANGE(code, from, to) \ + ((OnigCodePoint )((code) - (from)) <= (OnigCodePoint )((to) - (from))) + + +#ifdef ONIG_ENC_REGISTER +extern int ONIG_ENC_REGISTER(const char *, OnigEncoding); +# define OnigEncodingName(n) encoding_##n +# define OnigEncodingDeclare(n) static const OnigEncodingType OnigEncodingName(n) +# define OnigEncodingDefine(f,n) \ + OnigEncodingDeclare(n); \ + void Init_##f(void) { \ + ONIG_ENC_REGISTER(OnigEncodingName(n).name, \ + &OnigEncodingName(n)); \ + } \ + OnigEncodingDeclare(n) +#else +# define OnigEncodingName(n) OnigEncoding##n +# define OnigEncodingDeclare(n) const OnigEncodingType OnigEncodingName(n) +# define OnigEncodingDefine(f,n) OnigEncodingDeclare(n) +#endif + +/* macros for define replica encoding and encoding alias */ +#define ENC_REPLICATE(name, orig) +#define ENC_ALIAS(name, orig) +#define ENC_DUMMY(name) + +RUBY_SYMBOL_EXPORT_END + +#endif /* ONIGMO_REGENC_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/regint.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regint.h new file mode 100644 index 0000000..a2f5bbb --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regint.h @@ -0,0 +1,938 @@ +#ifndef ONIGMO_REGINT_H +#define ONIGMO_REGINT_H +/********************************************************************** + regint.h - Onigmo (Oniguruma-mod) (regular expression library) +**********************************************************************/ +/*- + * Copyright (c) 2002-2013 K.Kosako + * Copyright (c) 2011-2016 K.Takata + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* for debug */ +/* #define ONIG_DEBUG_PARSE_TREE */ +/* #define ONIG_DEBUG_COMPILE */ +/* #define ONIG_DEBUG_SEARCH */ +/* #define ONIG_DEBUG_MATCH */ +/* #define ONIG_DEBUG_MEMLEAK */ +/* #define ONIG_DONT_OPTIMIZE */ + +/* for byte-code statistical data. */ +/* #define ONIG_DEBUG_STATISTICS */ + +#if defined(ONIG_DEBUG_PARSE_TREE) || defined(ONIG_DEBUG_MATCH) || \ + defined(ONIG_DEBUG_SEARCH) || defined(ONIG_DEBUG_COMPILE) || \ + defined(ONIG_DEBUG_STATISTICS) || defined(ONIG_DEBUG_MEMLEAK) +# ifndef ONIG_DEBUG +# define ONIG_DEBUG +# endif +#endif + +#ifndef UNALIGNED_WORD_ACCESS +# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \ + defined(__powerpc64__) || \ + defined(__mc68020__) +# define UNALIGNED_WORD_ACCESS 1 +# else +# define UNALIGNED_WORD_ACCESS 0 +# endif +#endif + +#if UNALIGNED_WORD_ACCESS +# define PLATFORM_UNALIGNED_WORD_ACCESS +#endif + +/* config */ +/* spec. config */ +#define USE_NAMED_GROUP +#define USE_SUBEXP_CALL +#define USE_PERL_SUBEXP_CALL +#define USE_CAPITAL_P_NAMED_GROUP +#define USE_BACKREF_WITH_LEVEL /* \k, \k */ +#define USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT /* /(?:()|())*\2/ */ +#define USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE /* /\n$/ =~ "\n" */ +#define USE_WARNING_REDUNDANT_NESTED_REPEAT_OPERATOR +/* !!! moved to regenc.h. */ /* #define USE_CRNL_AS_LINE_TERMINATOR */ +#define USE_NO_INVALID_QUANTIFIER + +/* internal config */ +/* #define USE_OP_PUSH_OR_JUMP_EXACT */ +#define USE_QTFR_PEEK_NEXT +#define USE_ST_LIBRARY +#define USE_SUNDAY_QUICK_SEARCH + +#define INIT_MATCH_STACK_SIZE 160 +#define DEFAULT_MATCH_STACK_LIMIT_SIZE 0 /* unlimited */ +#define DEFAULT_PARSE_DEPTH_LIMIT 4096 + +#define OPT_EXACT_MAXLEN 24 + +/* check config */ +#if defined(USE_PERL_SUBEXP_CALL) || defined(USE_CAPITAL_P_NAMED_GROUP) +# if !defined(USE_NAMED_GROUP) || !defined(USE_SUBEXP_CALL) +# error USE_NAMED_GROUP and USE_SUBEXP_CALL must be defined. +# endif +#endif + +#if defined(__GNUC__) +# define ARG_UNUSED __attribute__ ((unused)) +#else +# define ARG_UNUSED +#endif + +#if !defined(RUBY) && defined(RUBY_EXPORT) +# define RUBY +#endif +#ifdef RUBY +# ifndef RUBY_DEFINES_H +# include "ruby/ruby.h" +# undef xmalloc +# undef xrealloc +# undef xcalloc +# undef xfree +# endif +#else /* RUBY */ +# include "config.h" +# if SIZEOF_LONG_LONG > 0 +# define LONG_LONG long long +# endif +#endif /* RUBY */ + +#include + +/* */ +/* escape other system UChar definition */ +#ifdef ONIG_ESCAPE_UCHAR_COLLISION +# undef ONIG_ESCAPE_UCHAR_COLLISION +#endif + +#define USE_WORD_BEGIN_END /* "\<": word-begin, "\>": word-end */ +#ifdef RUBY +# undef USE_CAPTURE_HISTORY +#else +# define USE_CAPTURE_HISTORY +#endif +#define USE_VARIABLE_META_CHARS +#define USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE +/* #define USE_COMBINATION_EXPLOSION_CHECK */ /* (X*)* */ + + +#ifndef xmalloc +# define xmalloc malloc +# define xrealloc realloc +# define xcalloc calloc +# define xfree free +#endif + +#ifdef RUBY + +# define CHECK_INTERRUPT_IN_MATCH_AT rb_thread_check_ints() +# define onig_st_init_table st_init_table +# define onig_st_init_table_with_size st_init_table_with_size +# define onig_st_init_numtable st_init_numtable +# define onig_st_init_numtable_with_size st_init_numtable_with_size +# define onig_st_init_strtable st_init_strtable +# define onig_st_init_strtable_with_size st_init_strtable_with_size +# define onig_st_delete st_delete +# define onig_st_delete_safe st_delete_safe +# define onig_st_insert st_insert +# define onig_st_lookup st_lookup +# define onig_st_foreach st_foreach +# define onig_st_add_direct st_add_direct +# define onig_st_free_table st_free_table +# define onig_st_cleanup_safe st_cleanup_safe +# define onig_st_copy st_copy +# define onig_st_nothing_key_clone st_nothing_key_clone +# define onig_st_nothing_key_free st_nothing_key_free +# define onig_st_is_member st_is_member + +# define USE_UPPER_CASE_TABLE +#else /* RUBY */ + +# define CHECK_INTERRUPT_IN_MATCH_AT + +# define st_init_table onig_st_init_table +# define st_init_table_with_size onig_st_init_table_with_size +# define st_init_numtable onig_st_init_numtable +# define st_init_numtable_with_size onig_st_init_numtable_with_size +# define st_init_strtable onig_st_init_strtable +# define st_init_strtable_with_size onig_st_init_strtable_with_size +# define st_delete onig_st_delete +# define st_delete_safe onig_st_delete_safe +# define st_insert onig_st_insert +# define st_lookup onig_st_lookup +# define st_foreach onig_st_foreach +# define st_add_direct onig_st_add_direct +# define st_free_table onig_st_free_table +# define st_cleanup_safe onig_st_cleanup_safe +# define st_copy onig_st_copy +# define st_nothing_key_clone onig_st_nothing_key_clone +# define st_nothing_key_free onig_st_nothing_key_free +/* */ +# define onig_st_is_member st_is_member + +#endif /* RUBY */ + +#define STATE_CHECK_STRING_THRESHOLD_LEN 7 +#define STATE_CHECK_BUFF_MAX_SIZE 0x4000 + +#define xmemset memset +#define xmemcpy memcpy +#define xmemmove memmove + +#if ((defined(RUBY_MSVCRT_VERSION) && RUBY_MSVCRT_VERSION >= 90) \ + || (!defined(RUBY_MSVCRT_VERSION) && defined(_WIN32))) \ + && !defined(__GNUC__) +# define xalloca _alloca +# define xvsnprintf(buf,size,fmt,args) _vsnprintf_s(buf,size,_TRUNCATE,fmt,args) +# define xsnprintf sprintf_s +# define xstrcat(dest,src,size) strcat_s(dest,size,src) +#else +# define xalloca alloca +# define xvsnprintf vsnprintf +# define xsnprintf snprintf +# define xstrcat(dest,src,size) strcat(dest,src) +#endif + +#if defined(ONIG_DEBUG_MEMLEAK) && defined(_MSC_VER) +# define _CRTDBG_MAP_ALLOC +# include +# include +#endif + +#include + +#if defined(HAVE_ALLOCA_H) && (defined(_AIX) || !defined(__GNUC__)) +# include +#endif + +#include + +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif + +#ifdef HAVE_STDINT_H +# include +#endif + +#ifdef HAVE_INTTYPES_H +# include +#endif + +#include + +#ifdef _WIN32 +# include /* for alloca() */ +#endif + +#ifdef ONIG_DEBUG +# include +#endif + +#ifdef _WIN32 +# if defined(_MSC_VER) && (_MSC_VER < 1300) +# ifndef _INTPTR_T_DEFINED +# define _INTPTR_T_DEFINED +typedef int intptr_t; +# endif +# ifndef _UINTPTR_T_DEFINED +# define _UINTPTR_T_DEFINED +typedef unsigned int uintptr_t; +# endif +# endif +#endif /* _WIN32 */ + +#ifndef PRIdPTR +# ifdef _WIN64 +# define PRIdPTR "I64d" +# define PRIuPTR "I64u" +# define PRIxPTR "I64x" +# else +# define PRIdPTR "ld" +# define PRIuPTR "lu" +# define PRIxPTR "lx" +# endif +#endif + +#ifndef PRIdPTRDIFF +# define PRIdPTRDIFF PRIdPTR +#endif + +#include "regenc.h" + +RUBY_SYMBOL_EXPORT_BEGIN + +#ifdef MIN +# undef MIN +#endif +#ifdef MAX +# undef MAX +#endif +#define MIN(a,b) (((a)>(b))?(b):(a)) +#define MAX(a,b) (((a)<(b))?(b):(a)) + +#define IS_NULL(p) (((void*)(p)) == (void*)0) +#define IS_NOT_NULL(p) (((void*)(p)) != (void*)0) +#define CHECK_NULL_RETURN(p) if (IS_NULL(p)) return NULL +#define CHECK_NULL_RETURN_MEMERR(p) if (IS_NULL(p)) return ONIGERR_MEMORY +#define NULL_UCHARP ((UChar* )0) + +#define ONIG_LAST_CODE_POINT (~((OnigCodePoint )0)) + +#ifdef PLATFORM_UNALIGNED_WORD_ACCESS + +# define PLATFORM_GET_INC(val,p,type) do{\ + val = *(type* )p;\ + (p) += sizeof(type);\ +} while(0) + +#else + +# define PLATFORM_GET_INC(val,p,type) do{\ + xmemcpy(&val, (p), sizeof(type));\ + (p) += sizeof(type);\ +} while(0) + +/* sizeof(OnigCodePoint) */ +# define WORD_ALIGNMENT_SIZE SIZEOF_LONG + +# define GET_ALIGNMENT_PAD_SIZE(addr,pad_size) do {\ + (pad_size) = WORD_ALIGNMENT_SIZE \ + - ((uintptr_t )(addr) % WORD_ALIGNMENT_SIZE);\ + if ((pad_size) == WORD_ALIGNMENT_SIZE) (pad_size) = 0;\ +} while (0) + +# define ALIGNMENT_RIGHT(addr) do {\ + (addr) += (WORD_ALIGNMENT_SIZE - 1);\ + (addr) -= ((uintptr_t )(addr) % WORD_ALIGNMENT_SIZE);\ +} while (0) + +#endif /* PLATFORM_UNALIGNED_WORD_ACCESS */ + +/* stack pop level */ +#define STACK_POP_LEVEL_FREE 0 +#define STACK_POP_LEVEL_MEM_START 1 +#define STACK_POP_LEVEL_ALL 2 + +/* optimize flags */ +#define ONIG_OPTIMIZE_NONE 0 +#define ONIG_OPTIMIZE_EXACT 1 /* Slow Search */ +#define ONIG_OPTIMIZE_EXACT_BM 2 /* Boyer Moore Search */ +#define ONIG_OPTIMIZE_EXACT_BM_NOT_REV 3 /* BM (applied to a multibyte string) */ +#define ONIG_OPTIMIZE_EXACT_IC 4 /* Slow Search (ignore case) */ +#define ONIG_OPTIMIZE_MAP 5 /* char map */ +#define ONIG_OPTIMIZE_EXACT_BM_IC 6 /* BM (ignore case) */ +#define ONIG_OPTIMIZE_EXACT_BM_NOT_REV_IC 7 /* BM (applied to a multibyte string) (ignore case) */ + +/* bit status */ +typedef unsigned int BitStatusType; + +#define BIT_STATUS_BITS_NUM (sizeof(BitStatusType) * 8) +#define BIT_STATUS_CLEAR(stats) (stats) = 0 +#define BIT_STATUS_ON_ALL(stats) (stats) = ~((BitStatusType )0) +#define BIT_STATUS_AT(stats,n) \ + ((n) < (int )BIT_STATUS_BITS_NUM ? ((stats) & ((BitStatusType )1 << n)) : ((stats) & 1)) + +#define BIT_STATUS_ON_AT(stats,n) do {\ + if ((n) < (int )BIT_STATUS_BITS_NUM)\ + (stats) |= (1 << (n));\ + else\ + (stats) |= 1;\ +} while (0) + +#define BIT_STATUS_ON_AT_SIMPLE(stats,n) do {\ + if ((n) < (int )BIT_STATUS_BITS_NUM)\ + (stats) |= (1 << (n));\ +} while (0) + + +#define INT_MAX_LIMIT ((1UL << (SIZEOF_INT * 8 - 1)) - 1) + +#define DIGITVAL(code) ((code) - '0') +#define ODIGITVAL(code) DIGITVAL(code) +#define XDIGITVAL(enc,code) \ + (ONIGENC_IS_CODE_DIGIT(enc,code) ? DIGITVAL(code) \ + : (ONIGENC_IS_CODE_UPPER(enc,code) ? (code) - 'A' + 10 : (code) - 'a' + 10)) + +#define IS_SINGLELINE(option) ((option) & ONIG_OPTION_SINGLELINE) +#define IS_MULTILINE(option) ((option) & ONIG_OPTION_MULTILINE) +#define IS_IGNORECASE(option) ((option) & ONIG_OPTION_IGNORECASE) +#define IS_EXTEND(option) ((option) & ONIG_OPTION_EXTEND) +#define IS_FIND_LONGEST(option) ((option) & ONIG_OPTION_FIND_LONGEST) +#define IS_FIND_NOT_EMPTY(option) ((option) & ONIG_OPTION_FIND_NOT_EMPTY) +#define IS_FIND_CONDITION(option) ((option) & \ + (ONIG_OPTION_FIND_LONGEST | ONIG_OPTION_FIND_NOT_EMPTY)) +#define IS_NOTBOL(option) ((option) & ONIG_OPTION_NOTBOL) +#define IS_NOTEOL(option) ((option) & ONIG_OPTION_NOTEOL) +#define IS_NOTBOS(option) ((option) & ONIG_OPTION_NOTBOS) +#define IS_NOTEOS(option) ((option) & ONIG_OPTION_NOTEOS) +#define IS_ASCII_RANGE(option) ((option) & ONIG_OPTION_ASCII_RANGE) +#define IS_POSIX_BRACKET_ALL_RANGE(option) ((option) & ONIG_OPTION_POSIX_BRACKET_ALL_RANGE) +#define IS_WORD_BOUND_ALL_RANGE(option) ((option) & ONIG_OPTION_WORD_BOUND_ALL_RANGE) +#define IS_NEWLINE_CRLF(option) ((option) & ONIG_OPTION_NEWLINE_CRLF) + +/* OP_SET_OPTION is required for these options. +#define IS_DYNAMIC_OPTION(option) \ + (((option) & (ONIG_OPTION_MULTILINE | ONIG_OPTION_IGNORECASE)) != 0) +*/ +/* ignore-case and multibyte status are included in compiled code. */ +#define IS_DYNAMIC_OPTION(option) 0 + +#define DISABLE_CASE_FOLD_MULTI_CHAR(case_fold_flag) \ + ((case_fold_flag) & ~INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) + +#define REPEAT_INFINITE -1 +#define IS_REPEAT_INFINITE(n) ((n) == REPEAT_INFINITE) + +/* bitset */ +#define BITS_PER_BYTE 8 +#define SINGLE_BYTE_SIZE (1 << BITS_PER_BYTE) +#define BITS_IN_ROOM ((int )sizeof(Bits) * BITS_PER_BYTE) +#define BITSET_SIZE (SINGLE_BYTE_SIZE / BITS_IN_ROOM) + +#ifdef PLATFORM_UNALIGNED_WORD_ACCESS +typedef unsigned int Bits; +#else +typedef unsigned char Bits; +#endif +typedef Bits BitSet[BITSET_SIZE]; +typedef Bits* BitSetRef; + +#define SIZE_BITSET (int )sizeof(BitSet) + +#define BITSET_CLEAR(bs) do {\ + int i;\ + for (i = 0; i < BITSET_SIZE; i++) { (bs)[i] = 0; } \ +} while (0) + +#define BS_ROOM(bs,pos) (bs)[(int )(pos) / BITS_IN_ROOM] +#define BS_BIT(pos) (1U << ((int )(pos) % BITS_IN_ROOM)) + +#define BITSET_AT(bs, pos) (BS_ROOM(bs,pos) & BS_BIT(pos)) +#define BITSET_SET_BIT(bs, pos) BS_ROOM(bs,pos) |= BS_BIT(pos) +#define BITSET_CLEAR_BIT(bs, pos) BS_ROOM(bs,pos) &= ~(BS_BIT(pos)) +#define BITSET_INVERT_BIT(bs, pos) BS_ROOM(bs,pos) ^= BS_BIT(pos) + +/* bytes buffer */ +typedef struct _BBuf { + UChar* p; + unsigned int used; + unsigned int alloc; +} BBuf; + +#define BBUF_INIT(buf,size) onig_bbuf_init((BBuf* )(buf), (size)) + +#define BBUF_SIZE_INC(buf,inc) do{\ + UChar *tmp;\ + (buf)->alloc += (inc);\ + tmp = (UChar* )xrealloc((buf)->p, (buf)->alloc);\ + if (IS_NULL(tmp)) return(ONIGERR_MEMORY);\ + (buf)->p = tmp;\ +} while (0) + +#define BBUF_EXPAND(buf,low) do{\ + UChar *tmp;\ + do { (buf)->alloc *= 2; } while ((buf)->alloc < (unsigned int )low);\ + tmp = (UChar* )xrealloc((buf)->p, (buf)->alloc);\ + if (IS_NULL(tmp)) return(ONIGERR_MEMORY);\ + (buf)->p = tmp;\ +} while (0) + +#define BBUF_ENSURE_SIZE(buf,size) do{\ + unsigned int new_alloc = (buf)->alloc;\ + while (new_alloc < (unsigned int )(size)) { new_alloc *= 2; }\ + if ((buf)->alloc != new_alloc) {\ + UChar *tmp;\ + tmp = (UChar* )xrealloc((buf)->p, new_alloc);\ + if (IS_NULL(tmp)) return(ONIGERR_MEMORY);\ + (buf)->p = tmp;\ + (buf)->alloc = new_alloc;\ + }\ +} while (0) + +#define BBUF_WRITE(buf,pos,bytes,n) do{\ + int used = (pos) + (int )(n);\ + if ((buf)->alloc < (unsigned int )used) BBUF_EXPAND((buf),used);\ + xmemcpy((buf)->p + (pos), (bytes), (n));\ + if ((buf)->used < (unsigned int )used) (buf)->used = used;\ +} while (0) + +#define BBUF_WRITE1(buf,pos,byte) do{\ + int used = (pos) + 1;\ + if ((buf)->alloc < (unsigned int )used) BBUF_EXPAND((buf),used);\ + (buf)->p[(pos)] = (UChar )(byte);\ + if ((buf)->used < (unsigned int )used) (buf)->used = used;\ +} while (0) + +#define BBUF_ADD(buf,bytes,n) BBUF_WRITE((buf),(buf)->used,(bytes),(n)) +#define BBUF_ADD1(buf,byte) BBUF_WRITE1((buf),(buf)->used,(byte)) +#define BBUF_GET_ADD_ADDRESS(buf) ((buf)->p + (buf)->used) +#define BBUF_GET_OFFSET_POS(buf) ((buf)->used) + +/* from < to */ +#define BBUF_MOVE_RIGHT(buf,from,to,n) do {\ + if ((unsigned int )((to)+(n)) > (buf)->alloc) BBUF_EXPAND((buf),(to) + (n));\ + xmemmove((buf)->p + (to), (buf)->p + (from), (n));\ + if ((unsigned int )((to)+(n)) > (buf)->used) (buf)->used = (to) + (n);\ +} while (0) + +/* from > to */ +#define BBUF_MOVE_LEFT(buf,from,to,n) do {\ + xmemmove((buf)->p + (to), (buf)->p + (from), (n));\ +} while (0) + +/* from > to */ +#define BBUF_MOVE_LEFT_REDUCE(buf,from,to) do {\ + xmemmove((buf)->p + (to), (buf)->p + (from), (buf)->used - (from));\ + (buf)->used -= (from - to);\ +} while (0) + +#define BBUF_INSERT(buf,pos,bytes,n) do {\ + if (pos >= (buf)->used) {\ + BBUF_WRITE(buf,pos,bytes,n);\ + }\ + else {\ + BBUF_MOVE_RIGHT((buf),(pos),(pos) + (n),((buf)->used - (pos)));\ + xmemcpy((buf)->p + (pos), (bytes), (n));\ + }\ +} while (0) + +#define BBUF_GET_BYTE(buf, pos) (buf)->p[(pos)] + + +#define ANCHOR_BEGIN_BUF (1<<0) +#define ANCHOR_BEGIN_LINE (1<<1) +#define ANCHOR_BEGIN_POSITION (1<<2) +#define ANCHOR_END_BUF (1<<3) +#define ANCHOR_SEMI_END_BUF (1<<4) +#define ANCHOR_END_LINE (1<<5) + +#define ANCHOR_WORD_BOUND (1<<6) +#define ANCHOR_NOT_WORD_BOUND (1<<7) +#define ANCHOR_WORD_BEGIN (1<<8) +#define ANCHOR_WORD_END (1<<9) +#define ANCHOR_PREC_READ (1<<10) +#define ANCHOR_PREC_READ_NOT (1<<11) +#define ANCHOR_LOOK_BEHIND (1<<12) +#define ANCHOR_LOOK_BEHIND_NOT (1<<13) + +#define ANCHOR_ANYCHAR_STAR (1<<14) /* ".*" optimize info */ +#define ANCHOR_ANYCHAR_STAR_ML (1<<15) /* ".*" optimize info (multi-line) */ + +#define ANCHOR_KEEP (1<<16) + +/* operation code */ +enum OpCode { + OP_FINISH = 0, /* matching process terminator (no more alternative) */ + OP_END = 1, /* pattern code terminator (success end) */ + + OP_EXACT1 = 2, /* single byte, N = 1 */ + OP_EXACT2, /* single byte, N = 2 */ + OP_EXACT3, /* single byte, N = 3 */ + OP_EXACT4, /* single byte, N = 4 */ + OP_EXACT5, /* single byte, N = 5 */ + OP_EXACTN, /* single byte */ + OP_EXACTMB2N1, /* mb-length = 2 N = 1 */ + OP_EXACTMB2N2, /* mb-length = 2 N = 2 */ + OP_EXACTMB2N3, /* mb-length = 2 N = 3 */ + OP_EXACTMB2N, /* mb-length = 2 */ + OP_EXACTMB3N, /* mb-length = 3 */ + OP_EXACTMBN, /* other length */ + + OP_EXACT1_IC, /* single byte, N = 1, ignore case */ + OP_EXACTN_IC, /* single byte, ignore case */ + + OP_CCLASS, + OP_CCLASS_MB, + OP_CCLASS_MIX, + OP_CCLASS_NOT, + OP_CCLASS_MB_NOT, + OP_CCLASS_MIX_NOT, + + OP_ANYCHAR, /* "." */ + OP_ANYCHAR_ML, /* "." multi-line */ + OP_ANYCHAR_STAR, /* ".*" */ + OP_ANYCHAR_ML_STAR, /* ".*" multi-line */ + OP_ANYCHAR_STAR_PEEK_NEXT, + OP_ANYCHAR_ML_STAR_PEEK_NEXT, + + OP_WORD, + OP_NOT_WORD, + OP_WORD_BOUND, + OP_NOT_WORD_BOUND, + OP_WORD_BEGIN, + OP_WORD_END, + + OP_ASCII_WORD, + OP_NOT_ASCII_WORD, + OP_ASCII_WORD_BOUND, + OP_NOT_ASCII_WORD_BOUND, + OP_ASCII_WORD_BEGIN, + OP_ASCII_WORD_END, + + OP_BEGIN_BUF, + OP_END_BUF, + OP_BEGIN_LINE, + OP_END_LINE, + OP_SEMI_END_BUF, + OP_BEGIN_POSITION, + + OP_BACKREF1, + OP_BACKREF2, + OP_BACKREFN, + OP_BACKREFN_IC, + OP_BACKREF_MULTI, + OP_BACKREF_MULTI_IC, + OP_BACKREF_WITH_LEVEL, /* \k, \k */ + + OP_MEMORY_START, + OP_MEMORY_START_PUSH, /* push back-tracker to stack */ + OP_MEMORY_END_PUSH, /* push back-tracker to stack */ + OP_MEMORY_END_PUSH_REC, /* push back-tracker to stack */ + OP_MEMORY_END, + OP_MEMORY_END_REC, /* push marker to stack */ + + OP_KEEP, + + OP_FAIL, /* pop stack and move */ + OP_JUMP, + OP_PUSH, + OP_POP, + OP_PUSH_OR_JUMP_EXACT1, /* if match exact then push, else jump. */ + OP_PUSH_IF_PEEK_NEXT, /* if match exact then push, else none. */ + OP_REPEAT, /* {n,m} */ + OP_REPEAT_NG, /* {n,m}? (non greedy) */ + OP_REPEAT_INC, + OP_REPEAT_INC_NG, /* non greedy */ + OP_REPEAT_INC_SG, /* search and get in stack */ + OP_REPEAT_INC_NG_SG, /* search and get in stack (non greedy) */ + OP_NULL_CHECK_START, /* null loop checker start */ + OP_NULL_CHECK_END, /* null loop checker end */ + OP_NULL_CHECK_END_MEMST, /* null loop checker end (with capture status) */ + OP_NULL_CHECK_END_MEMST_PUSH, /* with capture status and push check-end */ + + OP_PUSH_POS, /* (?=...) start */ + OP_POP_POS, /* (?=...) end */ + OP_PUSH_POS_NOT, /* (?!...) start */ + OP_FAIL_POS, /* (?!...) end */ + OP_PUSH_STOP_BT, /* (?>...) start */ + OP_POP_STOP_BT, /* (?>...) end */ + OP_LOOK_BEHIND, /* (?<=...) start (no needs end opcode) */ + OP_PUSH_LOOK_BEHIND_NOT, /* (? */ + OP_RETURN, + + OP_CONDITION, + + OP_STATE_CHECK_PUSH, /* combination explosion check and push */ + OP_STATE_CHECK_PUSH_OR_JUMP, /* check ok -> push, else jump */ + OP_STATE_CHECK, /* check only */ + OP_STATE_CHECK_ANYCHAR_STAR, + OP_STATE_CHECK_ANYCHAR_ML_STAR, + + /* no need: IS_DYNAMIC_OPTION() == 0 */ + OP_SET_OPTION_PUSH, /* set option and push recover option */ + OP_SET_OPTION /* set option */ +}; + +typedef int RelAddrType; +typedef int AbsAddrType; +typedef int LengthType; +typedef int RepeatNumType; +typedef short int MemNumType; +typedef short int StateCheckNumType; +typedef void* PointerType; + +#define SIZE_OPCODE 1 +#define SIZE_RELADDR (int )sizeof(RelAddrType) +#define SIZE_ABSADDR (int )sizeof(AbsAddrType) +#define SIZE_LENGTH (int )sizeof(LengthType) +#define SIZE_MEMNUM (int )sizeof(MemNumType) +#define SIZE_STATE_CHECK_NUM (int )sizeof(StateCheckNumType) +#define SIZE_REPEATNUM (int )sizeof(RepeatNumType) +#define SIZE_OPTION (int )sizeof(OnigOptionType) +#define SIZE_CODE_POINT (int )sizeof(OnigCodePoint) +#define SIZE_POINTER (int )sizeof(PointerType) + + +#define GET_RELADDR_INC(addr,p) PLATFORM_GET_INC(addr, p, RelAddrType) +#define GET_ABSADDR_INC(addr,p) PLATFORM_GET_INC(addr, p, AbsAddrType) +#define GET_LENGTH_INC(len,p) PLATFORM_GET_INC(len, p, LengthType) +#define GET_MEMNUM_INC(num,p) PLATFORM_GET_INC(num, p, MemNumType) +#define GET_REPEATNUM_INC(num,p) PLATFORM_GET_INC(num, p, RepeatNumType) +#define GET_OPTION_INC(option,p) PLATFORM_GET_INC(option, p, OnigOptionType) +#define GET_POINTER_INC(ptr,p) PLATFORM_GET_INC(ptr, p, PointerType) +#define GET_STATE_CHECK_NUM_INC(num,p) PLATFORM_GET_INC(num, p, StateCheckNumType) + +/* code point's address must be aligned address. */ +#define GET_CODE_POINT(code,p) code = *((OnigCodePoint* )(p)) +#define GET_BYTE_INC(byte,p) do{\ + byte = *(p);\ + (p)++;\ +} while(0) + + +/* op-code + arg size */ +#define SIZE_OP_ANYCHAR_STAR SIZE_OPCODE +#define SIZE_OP_ANYCHAR_STAR_PEEK_NEXT (SIZE_OPCODE + 1) +#define SIZE_OP_JUMP (SIZE_OPCODE + SIZE_RELADDR) +#define SIZE_OP_PUSH (SIZE_OPCODE + SIZE_RELADDR) +#define SIZE_OP_POP SIZE_OPCODE +#define SIZE_OP_PUSH_OR_JUMP_EXACT1 (SIZE_OPCODE + SIZE_RELADDR + 1) +#define SIZE_OP_PUSH_IF_PEEK_NEXT (SIZE_OPCODE + SIZE_RELADDR + 1) +#define SIZE_OP_REPEAT_INC (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_REPEAT_INC_NG (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_PUSH_POS SIZE_OPCODE +#define SIZE_OP_PUSH_POS_NOT (SIZE_OPCODE + SIZE_RELADDR) +#define SIZE_OP_POP_POS SIZE_OPCODE +#define SIZE_OP_FAIL_POS SIZE_OPCODE +#define SIZE_OP_SET_OPTION (SIZE_OPCODE + SIZE_OPTION) +#define SIZE_OP_SET_OPTION_PUSH (SIZE_OPCODE + SIZE_OPTION) +#define SIZE_OP_FAIL SIZE_OPCODE +#define SIZE_OP_MEMORY_START (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_MEMORY_START_PUSH (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_MEMORY_END_PUSH (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_MEMORY_END_PUSH_REC (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_MEMORY_END (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_MEMORY_END_REC (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_PUSH_STOP_BT SIZE_OPCODE +#define SIZE_OP_POP_STOP_BT SIZE_OPCODE +#define SIZE_OP_NULL_CHECK_START (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_NULL_CHECK_END (SIZE_OPCODE + SIZE_MEMNUM) +#define SIZE_OP_LOOK_BEHIND (SIZE_OPCODE + SIZE_LENGTH) +#define SIZE_OP_PUSH_LOOK_BEHIND_NOT (SIZE_OPCODE + SIZE_RELADDR + SIZE_LENGTH) +#define SIZE_OP_FAIL_LOOK_BEHIND_NOT SIZE_OPCODE +#define SIZE_OP_CALL (SIZE_OPCODE + SIZE_ABSADDR) +#define SIZE_OP_RETURN SIZE_OPCODE +#define SIZE_OP_CONDITION (SIZE_OPCODE + SIZE_MEMNUM + SIZE_RELADDR) +#define SIZE_OP_PUSH_ABSENT_POS SIZE_OPCODE +#define SIZE_OP_ABSENT (SIZE_OPCODE + SIZE_RELADDR) +#define SIZE_OP_ABSENT_END SIZE_OPCODE + +#ifdef USE_COMBINATION_EXPLOSION_CHECK +# define SIZE_OP_STATE_CHECK (SIZE_OPCODE + SIZE_STATE_CHECK_NUM) +# define SIZE_OP_STATE_CHECK_PUSH (SIZE_OPCODE + SIZE_STATE_CHECK_NUM + SIZE_RELADDR) +# define SIZE_OP_STATE_CHECK_PUSH_OR_JUMP (SIZE_OPCODE + SIZE_STATE_CHECK_NUM + SIZE_RELADDR) +# define SIZE_OP_STATE_CHECK_ANYCHAR_STAR (SIZE_OPCODE + SIZE_STATE_CHECK_NUM) +#endif + +#define MC_ESC(syn) (syn)->meta_char_table.esc +#define MC_ANYCHAR(syn) (syn)->meta_char_table.anychar +#define MC_ANYTIME(syn) (syn)->meta_char_table.anytime +#define MC_ZERO_OR_ONE_TIME(syn) (syn)->meta_char_table.zero_or_one_time +#define MC_ONE_OR_MORE_TIME(syn) (syn)->meta_char_table.one_or_more_time +#define MC_ANYCHAR_ANYTIME(syn) (syn)->meta_char_table.anychar_anytime + +#define IS_MC_ESC_CODE(code, syn) \ + ((code) == MC_ESC(syn) && \ + !IS_SYNTAX_OP2((syn), ONIG_SYN_OP2_INEFFECTIVE_ESCAPE)) + + +#define SYN_POSIX_COMMON_OP \ + ( ONIG_SYN_OP_DOT_ANYCHAR | ONIG_SYN_OP_POSIX_BRACKET | \ + ONIG_SYN_OP_DECIMAL_BACKREF | \ + ONIG_SYN_OP_BRACKET_CC | ONIG_SYN_OP_ASTERISK_ZERO_INF | \ + ONIG_SYN_OP_LINE_ANCHOR | \ + ONIG_SYN_OP_ESC_CONTROL_CHARS ) + +#define SYN_GNU_REGEX_OP \ + ( ONIG_SYN_OP_DOT_ANYCHAR | ONIG_SYN_OP_BRACKET_CC | \ + ONIG_SYN_OP_POSIX_BRACKET | ONIG_SYN_OP_DECIMAL_BACKREF | \ + ONIG_SYN_OP_BRACE_INTERVAL | ONIG_SYN_OP_LPAREN_SUBEXP | \ + ONIG_SYN_OP_VBAR_ALT | \ + ONIG_SYN_OP_ASTERISK_ZERO_INF | ONIG_SYN_OP_PLUS_ONE_INF | \ + ONIG_SYN_OP_QMARK_ZERO_ONE | \ + ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR | ONIG_SYN_OP_ESC_CAPITAL_G_BEGIN_ANCHOR | \ + ONIG_SYN_OP_ESC_W_WORD | \ + ONIG_SYN_OP_ESC_B_WORD_BOUND | ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END | \ + ONIG_SYN_OP_ESC_S_WHITE_SPACE | ONIG_SYN_OP_ESC_D_DIGIT | \ + ONIG_SYN_OP_LINE_ANCHOR ) + +#define SYN_GNU_REGEX_BV \ + ( ONIG_SYN_CONTEXT_INDEP_ANCHORS | ONIG_SYN_CONTEXT_INDEP_REPEAT_OPS | \ + ONIG_SYN_CONTEXT_INVALID_REPEAT_OPS | ONIG_SYN_ALLOW_INVALID_INTERVAL | \ + ONIG_SYN_BACKSLASH_ESCAPE_IN_CC | ONIG_SYN_ALLOW_DOUBLE_RANGE_OP_IN_CC ) + + +#define NCCLASS_FLAGS(cc) ((cc)->flags) +#define NCCLASS_FLAG_SET(cc,flag) (NCCLASS_FLAGS(cc) |= (flag)) +#define NCCLASS_FLAG_CLEAR(cc,flag) (NCCLASS_FLAGS(cc) &= ~(flag)) +#define IS_NCCLASS_FLAG_ON(cc,flag) ((NCCLASS_FLAGS(cc) & (flag)) != 0) + +/* cclass node */ +#define FLAG_NCCLASS_NOT (1<<0) + +#define NCCLASS_SET_NOT(nd) NCCLASS_FLAG_SET(nd, FLAG_NCCLASS_NOT) +#define NCCLASS_CLEAR_NOT(nd) NCCLASS_FLAG_CLEAR(nd, FLAG_NCCLASS_NOT) +#define IS_NCCLASS_NOT(nd) IS_NCCLASS_FLAG_ON(nd, FLAG_NCCLASS_NOT) + +typedef struct { + int type; + /* struct _Node* next; */ + /* unsigned int flags; */ +} NodeBase; + +typedef struct { + NodeBase base; + unsigned int flags; + BitSet bs; + BBuf* mbuf; /* multi-byte info or NULL */ +} CClassNode; + +typedef intptr_t OnigStackIndex; + +typedef struct _OnigStackType { + unsigned int type; + union { + struct { + UChar *pcode; /* byte code position */ + UChar *pstr; /* string position */ + UChar *pstr_prev; /* previous char position of pstr */ +#ifdef USE_COMBINATION_EXPLOSION_CHECK + unsigned int state_check; +#endif + UChar *pkeep; /* keep pattern position */ + } state; + struct { + int count; /* for OP_REPEAT_INC, OP_REPEAT_INC_NG */ + UChar *pcode; /* byte code position (head of repeated target) */ + int num; /* repeat id */ + } repeat; + struct { + OnigStackIndex si; /* index of stack */ + } repeat_inc; + struct { + int num; /* memory num */ + UChar *pstr; /* start/end position */ + /* Following information is set, if this stack type is MEM-START */ + OnigStackIndex start; /* prev. info (for backtrack "(...)*" ) */ + OnigStackIndex end; /* prev. info (for backtrack "(...)*" ) */ + } mem; + struct { + int num; /* null check id */ + UChar *pstr; /* start position */ + } null_check; +#ifdef USE_SUBEXP_CALL + struct { + UChar *ret_addr; /* byte code position */ + int num; /* null check id */ + UChar *pstr; /* string position */ + } call_frame; +#endif + struct { + UChar *abs_pstr; /* absent start position */ + const UChar *end_pstr; /* end position */ + } absent_pos; + } u; +} OnigStackType; + +typedef struct { + void* stack_p; + size_t stack_n; + OnigOptionType options; + OnigRegion* region; + const UChar* start; /* search start position */ + const UChar* gpos; /* global position (for \G: BEGIN_POSITION) */ +#ifdef USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE + OnigPosition best_len; /* for ONIG_OPTION_FIND_LONGEST */ + UChar* best_s; +#endif +#ifdef USE_COMBINATION_EXPLOSION_CHECK + void* state_check_buff; + int state_check_buff_size; +#endif +} OnigMatchArg; + + +#define IS_CODE_SB_WORD(enc,code) \ + (ONIGENC_IS_CODE_ASCII(code) && ONIGENC_IS_CODE_WORD(enc,code)) + +typedef struct OnigEndCallListItem { + struct OnigEndCallListItem* next; + void (*func)(void); +} OnigEndCallListItemType; + +extern void onig_add_end_call(void (*func)(void)); + + +#ifdef ONIG_DEBUG + +typedef struct { + short int opcode; + const char* name; + short int arg_type; +} OnigOpInfoType; + +extern OnigOpInfoType OnigOpInfo[]; + + +extern void onig_print_compiled_byte_code(FILE* f, UChar* bp, UChar* bpend, UChar** nextp, OnigEncoding enc); + +# ifdef ONIG_DEBUG_STATISTICS +extern void onig_statistics_init(void); +extern void onig_print_statistics(FILE* f); +# endif +#endif + +extern UChar* onig_error_code_to_format(OnigPosition code); +extern void onig_vsnprintf_with_pattern(UChar buf[], int bufsize, OnigEncoding enc, UChar* pat, UChar* pat_end, const UChar *fmt, va_list args); +extern void onig_snprintf_with_pattern(UChar buf[], int bufsize, OnigEncoding enc, UChar* pat, UChar* pat_end, const UChar *fmt, ...); +extern int onig_bbuf_init(BBuf* buf, OnigDistance size); +extern int onig_compile(regex_t* reg, const UChar* pattern, const UChar* pattern_end, OnigErrorInfo* einfo); +#ifdef RUBY +extern int onig_compile_ruby(regex_t* reg, const UChar* pattern, const UChar* pattern_end, OnigErrorInfo* einfo, const char *sourcefile, int sourceline); +#endif +extern void onig_transfer(regex_t* to, regex_t* from); +extern int onig_is_code_in_cc(OnigEncoding enc, OnigCodePoint code, CClassNode* cc); +extern int onig_is_code_in_cc_len(int enclen, OnigCodePoint code, CClassNode* cc); + +/* strend hash */ +typedef void hash_table_type; +#ifdef RUBY +# include "ruby/st.h" +#else +# include "st.h" +#endif +typedef st_data_t hash_data_type; + +extern hash_table_type* onig_st_init_strend_table_with_size(st_index_t size); +extern int onig_st_lookup_strend(hash_table_type* table, const UChar* str_key, const UChar* end_key, hash_data_type *value); +extern int onig_st_insert_strend(hash_table_type* table, const UChar* str_key, const UChar* end_key, hash_data_type value); + +#ifdef RUBY +extern size_t onig_memsize(const regex_t *reg); +extern size_t onig_region_memsize(const struct re_registers *regs); +#endif + +RUBY_SYMBOL_EXPORT_END + +#endif /* ONIGMO_REGINT_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/regparse.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regparse.h new file mode 100644 index 0000000..888ebf4 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/regparse.h @@ -0,0 +1,370 @@ +#ifndef ONIGMO_REGPARSE_H +#define ONIGMO_REGPARSE_H +/********************************************************************** + regparse.h - Onigmo (Oniguruma-mod) (regular expression library) +**********************************************************************/ +/*- + * Copyright (c) 2002-2007 K.Kosako + * Copyright (c) 2011-2016 K.Takata + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "regint.h" + +RUBY_SYMBOL_EXPORT_BEGIN + +/* node type */ +#define NT_STR 0 +#define NT_CCLASS 1 +#define NT_CTYPE 2 +#define NT_CANY 3 +#define NT_BREF 4 +#define NT_QTFR 5 +#define NT_ENCLOSE 6 +#define NT_ANCHOR 7 +#define NT_LIST 8 +#define NT_ALT 9 +#define NT_CALL 10 + +/* node type bit */ +#define NTYPE2BIT(type) (1<<(type)) + +#define BIT_NT_STR NTYPE2BIT(NT_STR) +#define BIT_NT_CCLASS NTYPE2BIT(NT_CCLASS) +#define BIT_NT_CTYPE NTYPE2BIT(NT_CTYPE) +#define BIT_NT_CANY NTYPE2BIT(NT_CANY) +#define BIT_NT_BREF NTYPE2BIT(NT_BREF) +#define BIT_NT_QTFR NTYPE2BIT(NT_QTFR) +#define BIT_NT_ENCLOSE NTYPE2BIT(NT_ENCLOSE) +#define BIT_NT_ANCHOR NTYPE2BIT(NT_ANCHOR) +#define BIT_NT_LIST NTYPE2BIT(NT_LIST) +#define BIT_NT_ALT NTYPE2BIT(NT_ALT) +#define BIT_NT_CALL NTYPE2BIT(NT_CALL) + +#define IS_NODE_TYPE_SIMPLE(type) \ + ((NTYPE2BIT(type) & (BIT_NT_STR | BIT_NT_CCLASS | BIT_NT_CTYPE |\ + BIT_NT_CANY | BIT_NT_BREF)) != 0) + +#define NTYPE(node) ((node)->u.base.type) +#define SET_NTYPE(node, ntype) \ + do { \ + int value = ntype; \ + memcpy(&((node)->u.base.type), &value, sizeof(int)); \ + } while (0) + +#define NSTR(node) (&((node)->u.str)) +#define NCCLASS(node) (&((node)->u.cclass)) +#define NCTYPE(node) (&((node)->u.ctype)) +#define NBREF(node) (&((node)->u.bref)) +#define NQTFR(node) (&((node)->u.qtfr)) +#define NENCLOSE(node) (&((node)->u.enclose)) +#define NANCHOR(node) (&((node)->u.anchor)) +#define NCONS(node) (&((node)->u.cons)) +#define NCALL(node) (&((node)->u.call)) + +#define NCAR(node) (NCONS(node)->car) +#define NCDR(node) (NCONS(node)->cdr) + + + +#define ANCHOR_ANYCHAR_STAR_MASK (ANCHOR_ANYCHAR_STAR | ANCHOR_ANYCHAR_STAR_ML) +#define ANCHOR_END_BUF_MASK (ANCHOR_END_BUF | ANCHOR_SEMI_END_BUF) + +#define ENCLOSE_MEMORY (1<<0) +#define ENCLOSE_OPTION (1<<1) +#define ENCLOSE_STOP_BACKTRACK (1<<2) +#define ENCLOSE_CONDITION (1<<3) +#define ENCLOSE_ABSENT (1<<4) + +#define NODE_STR_MARGIN 16 +#define NODE_STR_BUF_SIZE 24 /* sizeof(CClassNode) - sizeof(int)*4 */ +#define NODE_BACKREFS_SIZE 6 + +#define NSTR_RAW (1<<0) /* by backslashed number */ +#define NSTR_AMBIG (1<<1) +#define NSTR_DONT_GET_OPT_INFO (1<<2) + +#define NSTRING_LEN(node) (OnigDistance )((node)->u.str.end - (node)->u.str.s) +#define NSTRING_SET_RAW(node) (node)->u.str.flag |= NSTR_RAW +#define NSTRING_CLEAR_RAW(node) (node)->u.str.flag &= ~NSTR_RAW +#define NSTRING_SET_AMBIG(node) (node)->u.str.flag |= NSTR_AMBIG +#define NSTRING_SET_DONT_GET_OPT_INFO(node) \ + (node)->u.str.flag |= NSTR_DONT_GET_OPT_INFO +#define NSTRING_IS_RAW(node) (((node)->u.str.flag & NSTR_RAW) != 0) +#define NSTRING_IS_AMBIG(node) (((node)->u.str.flag & NSTR_AMBIG) != 0) +#define NSTRING_IS_DONT_GET_OPT_INFO(node) \ + (((node)->u.str.flag & NSTR_DONT_GET_OPT_INFO) != 0) + +#define BACKREFS_P(br) \ + (IS_NOT_NULL((br)->back_dynamic) ? (br)->back_dynamic : (br)->back_static); + +#define NQ_TARGET_ISNOT_EMPTY 0 +#define NQ_TARGET_IS_EMPTY 1 +#define NQ_TARGET_IS_EMPTY_MEM 2 +#define NQ_TARGET_IS_EMPTY_REC 3 + +/* status bits */ +#define NST_MIN_FIXED (1<<0) +#define NST_MAX_FIXED (1<<1) +#define NST_CLEN_FIXED (1<<2) +#define NST_MARK1 (1<<3) +#define NST_MARK2 (1<<4) +#define NST_MEM_BACKREFED (1<<5) +#define NST_STOP_BT_SIMPLE_REPEAT (1<<6) +#define NST_RECURSION (1<<7) +#define NST_CALLED (1<<8) +#define NST_ADDR_FIXED (1<<9) +#define NST_NAMED_GROUP (1<<10) +#define NST_NAME_REF (1<<11) +#define NST_IN_REPEAT (1<<12) /* STK_REPEAT is nested in stack. */ +#define NST_NEST_LEVEL (1<<13) +#define NST_BY_NUMBER (1<<14) /* {n,m} */ + +#define SET_ENCLOSE_STATUS(node,f) (node)->u.enclose.state |= (f) +#define CLEAR_ENCLOSE_STATUS(node,f) (node)->u.enclose.state &= ~(f) + +#define IS_ENCLOSE_CALLED(en) (((en)->state & NST_CALLED) != 0) +#define IS_ENCLOSE_ADDR_FIXED(en) (((en)->state & NST_ADDR_FIXED) != 0) +#define IS_ENCLOSE_RECURSION(en) (((en)->state & NST_RECURSION) != 0) +#define IS_ENCLOSE_MARK1(en) (((en)->state & NST_MARK1) != 0) +#define IS_ENCLOSE_MARK2(en) (((en)->state & NST_MARK2) != 0) +#define IS_ENCLOSE_MIN_FIXED(en) (((en)->state & NST_MIN_FIXED) != 0) +#define IS_ENCLOSE_MAX_FIXED(en) (((en)->state & NST_MAX_FIXED) != 0) +#define IS_ENCLOSE_CLEN_FIXED(en) (((en)->state & NST_CLEN_FIXED) != 0) +#define IS_ENCLOSE_STOP_BT_SIMPLE_REPEAT(en) \ + (((en)->state & NST_STOP_BT_SIMPLE_REPEAT) != 0) +#define IS_ENCLOSE_NAMED_GROUP(en) (((en)->state & NST_NAMED_GROUP) != 0) +#define IS_ENCLOSE_NAME_REF(en) (((en)->state & NST_NAME_REF) != 0) + +#define SET_CALL_RECURSION(node) (node)->u.call.state |= NST_RECURSION +#define IS_CALL_RECURSION(cn) (((cn)->state & NST_RECURSION) != 0) +#define IS_CALL_NAME_REF(cn) (((cn)->state & NST_NAME_REF) != 0) +#define IS_BACKREF_NAME_REF(bn) (((bn)->state & NST_NAME_REF) != 0) +#define IS_BACKREF_NEST_LEVEL(bn) (((bn)->state & NST_NEST_LEVEL) != 0) +#define IS_QUANTIFIER_IN_REPEAT(qn) (((qn)->state & NST_IN_REPEAT) != 0) +#define IS_QUANTIFIER_BY_NUMBER(qn) (((qn)->state & NST_BY_NUMBER) != 0) + +#define CALLNODE_REFNUM_UNDEF -1 + +typedef struct { + NodeBase base; + UChar* s; + UChar* end; + unsigned int flag; + int capa; /* (allocated size - 1) or 0: use buf[] */ + UChar buf[NODE_STR_BUF_SIZE]; +} StrNode; + +typedef struct { + NodeBase base; + int state; + struct _Node* target; + int lower; + int upper; + int greedy; + int target_empty_info; + struct _Node* head_exact; + struct _Node* next_head_exact; + int is_refered; /* include called node. don't eliminate even if {0} */ +#ifdef USE_COMBINATION_EXPLOSION_CHECK + int comb_exp_check_num; /* 1,2,3...: check, 0: no check */ +#endif +} QtfrNode; + +typedef struct { + NodeBase base; + int state; + int type; + int regnum; + OnigOptionType option; + AbsAddrType call_addr; + struct _Node* target; + /* for multiple call reference */ + OnigDistance min_len; /* min length (byte) */ + OnigDistance max_len; /* max length (byte) */ + int char_len; /* character length */ + int opt_count; /* referenced count in optimize_node_left() */ +} EncloseNode; + +#ifdef USE_SUBEXP_CALL + +typedef struct { + int offset; + struct _Node* target; +} UnsetAddr; + +typedef struct { + int num; + int alloc; + UnsetAddr* us; +} UnsetAddrList; + +typedef struct { + NodeBase base; + int state; + int group_num; + UChar* name; + UChar* name_end; + struct _Node* target; /* EncloseNode : ENCLOSE_MEMORY */ + UnsetAddrList* unset_addr_list; +} CallNode; + +#endif + +typedef struct { + NodeBase base; + int state; + int back_num; + int back_static[NODE_BACKREFS_SIZE]; + int* back_dynamic; + int nest_level; +} BRefNode; + +typedef struct { + NodeBase base; + int type; + struct _Node* target; + int char_len; + int ascii_range; +} AnchorNode; + +typedef struct { + NodeBase base; + struct _Node* car; + struct _Node* cdr; +} ConsAltNode; + +typedef struct { + NodeBase base; + int ctype; + int not; + int ascii_range; +} CtypeNode; + +typedef struct _Node { + union { + NodeBase base; + StrNode str; + CClassNode cclass; + QtfrNode qtfr; + EncloseNode enclose; + BRefNode bref; + AnchorNode anchor; + ConsAltNode cons; + CtypeNode ctype; +#ifdef USE_SUBEXP_CALL + CallNode call; +#endif + } u; +} Node; + + +#define NULL_NODE ((Node* )0) + +#define SCANENV_MEMNODES_SIZE 8 +#define SCANENV_MEM_NODES(senv) \ + (IS_NOT_NULL((senv)->mem_nodes_dynamic) ? \ + (senv)->mem_nodes_dynamic : (senv)->mem_nodes_static) + +typedef struct { + OnigOptionType option; + OnigCaseFoldType case_fold_flag; + OnigEncoding enc; + const OnigSyntaxType* syntax; + BitStatusType capture_history; + BitStatusType bt_mem_start; + BitStatusType bt_mem_end; + BitStatusType backrefed_mem; + UChar* pattern; + UChar* pattern_end; + UChar* error; + UChar* error_end; + regex_t* reg; /* for reg->names only */ +#ifdef USE_SUBEXP_CALL + UnsetAddrList* unset_addr_list; +#endif + int num_call; + int num_mem; +#ifdef USE_NAMED_GROUP + int num_named; +#endif + int mem_alloc; + Node* mem_nodes_static[SCANENV_MEMNODES_SIZE]; + Node** mem_nodes_dynamic; +#ifdef USE_COMBINATION_EXPLOSION_CHECK + int num_comb_exp_check; + int comb_exp_max_regnum; + int curr_max_regnum; + int has_recursion; +#endif + unsigned int parse_depth; + int warnings_flag; +#ifdef RUBY + const char* sourcefile; + int sourceline; +#endif +} ScanEnv; + + +#define IS_SYNTAX_OP(syn, opm) (((syn)->op & (opm)) != 0) +#define IS_SYNTAX_OP2(syn, opm) (((syn)->op2 & (opm)) != 0) +#define IS_SYNTAX_BV(syn, bvm) (((syn)->behavior & (bvm)) != 0) + +#ifdef USE_NAMED_GROUP +typedef struct { + int new_val; +} GroupNumRemap; + +extern int onig_renumber_name_table(regex_t* reg, GroupNumRemap* map); +#endif + +extern int onig_strncmp(const UChar* s1, const UChar* s2, int n); +extern void onig_strcpy(UChar* dest, const UChar* src, const UChar* end); +extern void onig_scan_env_set_error_string(ScanEnv* env, int ecode, UChar* arg, UChar* arg_end); +extern int onig_scan_unsigned_number(UChar** src, const UChar* end, OnigEncoding enc); +extern void onig_reduce_nested_quantifier(Node* pnode, Node* cnode); +extern void onig_node_conv_to_str_node(Node* node, int raw); +extern int onig_node_str_cat(Node* node, const UChar* s, const UChar* end); +extern int onig_node_str_set(Node* node, const UChar* s, const UChar* end); +extern void onig_node_free(Node* node); +extern Node* onig_node_new_enclose(int type); +extern Node* onig_node_new_anchor(int type); +extern Node* onig_node_new_str(const UChar* s, const UChar* end); +extern Node* onig_node_new_list(Node* left, Node* right); +extern Node* onig_node_list_add(Node* list, Node* x); +extern Node* onig_node_new_alt(Node* left, Node* right); +extern void onig_node_str_clear(Node* node); +extern int onig_names_free(regex_t* reg); +extern int onig_parse_make_tree(Node** root, const UChar* pattern, const UChar* end, regex_t* reg, ScanEnv* env); +extern int onig_free_shared_cclass_table(void); + +#ifdef ONIG_DEBUG +# ifdef USE_NAMED_GROUP +extern int onig_print_names(FILE*, regex_t*); +# endif +#endif + +RUBY_SYMBOL_EXPORT_END + +#endif /* ONIGMO_REGPARSE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/revision.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/revision.h new file mode 100644 index 0000000..e506e3e --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/revision.h @@ -0,0 +1 @@ +#define RUBY_REVISION 67245 diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_assert.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_assert.h new file mode 100644 index 0000000..3383e4f --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_assert.h @@ -0,0 +1,54 @@ +#ifndef RUBY_ASSERT_H +#define RUBY_ASSERT_H + +#include "ruby/ruby.h" + +#if defined(__cplusplus) +extern "C" { +#if 0 +} /* satisfy cc-mode */ +#endif +#endif + +NORETURN(void rb_assert_failure(const char *, int, const char *, const char *)); +#ifdef RUBY_FUNCTION_NAME_STRING +# define RUBY_ASSERT_FAIL(expr) \ + rb_assert_failure(__FILE__, __LINE__, RUBY_FUNCTION_NAME_STRING, expr) +#else +# define RUBY_ASSERT_FAIL(expr) \ + rb_assert_failure(__FILE__, __LINE__, NULL, expr) +#endif +#define RUBY_ASSERT_MESG(expr, mesg) \ + ((expr) ? (void)0 : RUBY_ASSERT_FAIL(mesg)) +#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P +# define RUBY_ASSERT_MESG_WHEN(cond, expr, mesg) \ + __builtin_choose_expr( \ + __builtin_constant_p(cond), \ + __builtin_choose_expr(cond, RUBY_ASSERT_MESG(expr, mesg), (void)0), \ + RUBY_ASSERT_MESG(!(cond) || (expr), mesg)) +#else +# define RUBY_ASSERT_MESG_WHEN(cond, expr, mesg) \ + RUBY_ASSERT_MESG(!(cond) || (expr), mesg) +#endif +#define RUBY_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(!RUBY_NDEBUG+0, expr, #expr) +#define RUBY_ASSERT_WHEN(cond, expr) RUBY_ASSERT_MESG_WHEN(cond, expr, #expr) + +#undef assert +#define assert RUBY_ASSERT + +#ifndef RUBY_NDEBUG +# ifdef NDEBUG +# define RUBY_NDEBUG 1 +# else +# define RUBY_NDEBUG 0 +# endif +#endif + +#if defined(__cplusplus) +#if 0 +{ /* satisfy cc-mode */ +#endif +} /* extern "C" { */ +#endif + +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_atomic.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_atomic.h new file mode 100644 index 0000000..4bc9f37 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/ruby_atomic.h @@ -0,0 +1,233 @@ +#ifndef RUBY_ATOMIC_H +#define RUBY_ATOMIC_H + +#if 0 +#elif defined HAVE_GCC_ATOMIC_BUILTINS +typedef unsigned int rb_atomic_t; +# define ATOMIC_SET(var, val) (void)__atomic_exchange_n(&(var), (val), __ATOMIC_SEQ_CST) +# define ATOMIC_INC(var) __atomic_fetch_add(&(var), 1, __ATOMIC_SEQ_CST) +# define ATOMIC_DEC(var) __atomic_fetch_sub(&(var), 1, __ATOMIC_SEQ_CST) +# define ATOMIC_OR(var, val) __atomic_fetch_or(&(var), (val), __ATOMIC_SEQ_CST) +# define ATOMIC_EXCHANGE(var, val) __atomic_exchange_n(&(var), (val), __ATOMIC_SEQ_CST) +# define ATOMIC_CAS(var, oldval, newval) \ +({ __typeof__(var) oldvaldup = (oldval); /* oldval should not be modified */ \ + __atomic_compare_exchange_n(&(var), &oldvaldup, (newval), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ + oldvaldup; }) + +# define ATOMIC_SIZE_ADD(var, val) __atomic_fetch_add(&(var), (val), __ATOMIC_SEQ_CST) +# define ATOMIC_SIZE_SUB(var, val) __atomic_fetch_sub(&(var), (val), __ATOMIC_SEQ_CST) + +# define RUBY_ATOMIC_GENERIC_MACRO 1 + +#elif defined HAVE_GCC_SYNC_BUILTINS +/* @shyouhei hack to support atomic operations in case of gcc. Gcc + * has its own pseudo-insns to support them. See info, or + * http://gcc.gnu.org/onlinedocs/gcc/Atomic-Builtins.html */ + +typedef unsigned int rb_atomic_t; /* Anything OK */ +# define ATOMIC_SET(var, val) (void)__sync_lock_test_and_set(&(var), (val)) +# define ATOMIC_INC(var) __sync_fetch_and_add(&(var), 1) +# define ATOMIC_DEC(var) __sync_fetch_and_sub(&(var), 1) +# define ATOMIC_OR(var, val) __sync_fetch_and_or(&(var), (val)) +# define ATOMIC_EXCHANGE(var, val) __sync_lock_test_and_set(&(var), (val)) +# define ATOMIC_CAS(var, oldval, newval) __sync_val_compare_and_swap(&(var), (oldval), (newval)) + +# define ATOMIC_SIZE_ADD(var, val) __sync_fetch_and_add(&(var), (val)) +# define ATOMIC_SIZE_SUB(var, val) __sync_fetch_and_sub(&(var), (val)) + +# define RUBY_ATOMIC_GENERIC_MACRO 1 + +#elif defined _WIN32 +#if defined _MSC_VER && _MSC_VER > 1200 +#pragma intrinsic(_InterlockedOr) +#endif +typedef LONG rb_atomic_t; + +# define ATOMIC_SET(var, val) InterlockedExchange(&(var), (val)) +# define ATOMIC_INC(var) InterlockedIncrement(&(var)) +# define ATOMIC_DEC(var) InterlockedDecrement(&(var)) +#if defined __GNUC__ +# define ATOMIC_OR(var, val) __asm__("lock\n\t" "orl\t%1, %0" : "=m"(var) : "Ir"(val)) +#elif defined _MSC_VER && _MSC_VER <= 1200 +# define ATOMIC_OR(var, val) rb_w32_atomic_or(&(var), (val)) +static inline void +rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val) +{ +#ifdef _M_IX86 + __asm mov eax, var; + __asm mov ecx, val; + __asm lock or [eax], ecx; +#else +#error unsupported architecture +#endif +} +#else +# define ATOMIC_OR(var, val) _InterlockedOr(&(var), (val)) +#endif +# define ATOMIC_EXCHANGE(var, val) InterlockedExchange(&(var), (val)) +# define ATOMIC_CAS(var, oldval, newval) InterlockedCompareExchange(&(var), (newval), (oldval)) +# if defined _MSC_VER && _MSC_VER <= 1200 +static inline rb_atomic_t +rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval) +{ + return (rb_atomic_t)InterlockedCompareExchange((PVOID *)var, (PVOID)newval, (PVOID)oldval); +} +# undef ATOMIC_CAS +# define ATOMIC_CAS(var, oldval, newval) rb_w32_atomic_cas(&(var), (oldval), (newval)) +# endif +# ifdef _M_AMD64 +# define ATOMIC_SIZE_ADD(var, val) InterlockedExchangeAdd64((LONG_LONG *)&(var), (val)) +# define ATOMIC_SIZE_SUB(var, val) InterlockedExchangeAdd64((LONG_LONG *)&(var), -(LONG)(val)) +# define ATOMIC_SIZE_INC(var) InterlockedIncrement64(&(var)) +# define ATOMIC_SIZE_DEC(var) InterlockedDecrement64(&(var)) +# define ATOMIC_SIZE_EXCHANGE(var, val) InterlockedExchange64(&(var), (val)) +# define ATOMIC_SIZE_CAS(var, oldval, newval) InterlockedCompareExchange64(&(var), (newval), (oldval)) +# else +# define ATOMIC_SIZE_ADD(var, val) InterlockedExchangeAdd((LONG *)&(var), (val)) +# define ATOMIC_SIZE_SUB(var, val) InterlockedExchangeAdd((LONG *)&(var), -(LONG)(val)) +# define ATOMIC_SIZE_INC(var) InterlockedIncrement((LONG *)&(var)) +# define ATOMIC_SIZE_DEC(var) InterlockedDecrement((LONG *)&(var)) +# define ATOMIC_SIZE_EXCHANGE(var, val) InterlockedExchange((LONG *)&(var), (val)) +# endif + +#elif defined(__sun) && defined(HAVE_ATOMIC_H) +#include +typedef unsigned int rb_atomic_t; + +# define ATOMIC_SET(var, val) (void)atomic_swap_uint(&(var), (val)) +# define ATOMIC_INC(var) atomic_inc_uint(&(var)) +# define ATOMIC_DEC(var) atomic_dec_uint(&(var)) +# define ATOMIC_OR(var, val) atomic_or_uint(&(var), (val)) +# define ATOMIC_EXCHANGE(var, val) atomic_swap_uint(&(var), (val)) +# define ATOMIC_CAS(var, oldval, newval) atomic_cas_uint(&(var), (oldval), (newval)) + +# if SIZEOF_SIZE_T == SIZEOF_LONG +# define ATOMIC_SIZE_ADD(var, val) atomic_add_long(&(var), (val)) +# define ATOMIC_SIZE_SUB(var, val) atomic_add_long(&(var), -(val)) +# define ATOMIC_SIZE_INC(var) atomic_inc_ulong(&(var)) +# define ATOMIC_SIZE_DEC(var) atomic_dec_ulong(&(var)) +# define ATOMIC_SIZE_EXCHANGE(var, val) atomic_swap_ulong(&(var), (val)) +# define ATOMIC_SIZE_CAS(var, oldval, val) atomic_cas_ulong(&(var), (oldval), (val)) +# else +# define ATOMIC_SIZE_ADD(var, val) atomic_add_int(&(var), (val)) +# define ATOMIC_SIZE_SUB(var, val) atomic_add_int(&(var), -(val)) +# define ATOMIC_SIZE_INC(var) atomic_inc_uint(&(var)) +# define ATOMIC_SIZE_DEC(var) atomic_dec_uint(&(var)) +# define ATOMIC_SIZE_EXCHANGE(var, val) atomic_swap_uint(&(var), (val)) +# endif + +#else +typedef int rb_atomic_t; +#define NEED_RUBY_ATOMIC_OPS +extern rb_atomic_t ruby_atomic_exchange(rb_atomic_t *ptr, rb_atomic_t val); +extern rb_atomic_t ruby_atomic_compare_and_swap(rb_atomic_t *ptr, + rb_atomic_t cmp, + rb_atomic_t newval); + +# define ATOMIC_SET(var, val) (void)((var) = (val)) +# define ATOMIC_INC(var) ((var)++) +# define ATOMIC_DEC(var) ((var)--) +# define ATOMIC_OR(var, val) ((var) |= (val)) +# define ATOMIC_EXCHANGE(var, val) ruby_atomic_exchange(&(var), (val)) +# define ATOMIC_CAS(var, oldval, newval) ruby_atomic_compare_and_swap(&(var), (oldval), (newval)) + +# define ATOMIC_SIZE_ADD(var, val) (void)((var) += (val)) +# define ATOMIC_SIZE_SUB(var, val) (void)((var) -= (val)) +# define ATOMIC_SIZE_EXCHANGE(var, val) ruby_atomic_size_exchange(&(var), (val)) +static inline size_t +ruby_atomic_size_exchange(size_t *ptr, size_t val) +{ + size_t old = *ptr; + *ptr = val; + return old; +} +#endif + +#ifndef ATOMIC_SIZE_INC +# define ATOMIC_SIZE_INC(var) ATOMIC_INC(var) +#endif +#ifndef ATOMIC_SIZE_DEC +# define ATOMIC_SIZE_DEC(var) ATOMIC_DEC(var) +#endif +#ifndef ATOMIC_SIZE_EXCHANGE +# define ATOMIC_SIZE_EXCHANGE(var, val) ATOMIC_EXCHANGE(var, val) +#endif +#ifndef ATOMIC_SIZE_CAS +# define ATOMIC_SIZE_CAS(var, oldval, val) ATOMIC_CAS(var, oldval, val) +#endif + +#if RUBY_ATOMIC_GENERIC_MACRO +# ifndef ATOMIC_PTR_EXCHANGE +# define ATOMIC_PTR_EXCHANGE(var, val) ATOMIC_EXCHANGE(var, val) +# endif +# ifndef ATOMIC_PTR_CAS +# define ATOMIC_PTR_CAS(var, oldval, newval) ATOMIC_CAS(var, oldval, newval) +# endif + +# ifndef ATOMIC_VALUE_EXCHANGE +# define ATOMIC_VALUE_EXCHANGE(var, val) ATOMIC_EXCHANGE(var, val) +# endif +# ifndef ATOMIC_VALUE_CAS +# define ATOMIC_VALUE_CAS(var, oldval, val) ATOMIC_CAS(var, oldval, val) +# endif +#endif + +#ifndef ATOMIC_PTR_EXCHANGE +# if SIZEOF_VOIDP == SIZEOF_SIZE_T +# define ATOMIC_PTR_EXCHANGE(var, val) (void *)ATOMIC_SIZE_EXCHANGE(*(size_t *)&(var), (size_t)(val)) +# else +# define ATOMIC_PTR_EXCHANGE(var, val) ruby_atomic_ptr_exchange((const void **)&(var), (val)) +static inline void * +ruby_atomic_ptr_exchange(const void **ptr, const void *val) +{ + const void *const old = *ptr; + *ptr = val; + return (void *)old; +} +# endif +#endif +#ifndef ATOMIC_PTR_CAS +# if SIZEOF_VOIDP == SIZEOF_SIZE_T +# define ATOMIC_PTR_CAS(var, oldval, val) (void *)ATOMIC_SIZE_CAS(*(size_t *)&(var), (size_t)(oldval), (size_t)(val)) +# else +# define ATOMIC_PTR_CAS(var, oldval, val) ruby_atomic_ptr_cas(&(var), (oldval), (val)) +static inline void * +ruby_atomic_ptr_cas(const void **ptr, const void *oldval, const void *val) +{ + const void *const old = *ptr; + if (old == oldval) *ptr = val; + return (void *)old; +} +# endif +#endif + +#ifndef ATOMIC_VALUE_EXCHANGE +# if SIZEOF_VALUE == SIZEOF_SIZE_T +# define ATOMIC_VALUE_EXCHANGE(var, val) ATOMIC_SIZE_EXCHANGE(*(size_t *)&(var), (size_t)(val)) +# else +# define ATOMIC_VALUE_EXCHANGE(var, val) ruby_atomic_value_exchange(&(var), (val)) +static inline VALUE +ruby_atomic_value_exchange(VALUE *ptr, VALUE val) +{ + const VALUE old = *ptr; + *ptr = val; + return old; +} +# endif +#endif +#ifndef ATOMIC_VALUE_CAS +# if SIZEOF_VALUE == SIZEOF_SIZE_T +# define ATOMIC_VALUE_CAS(var, oldval, val) ATOMIC_SIZE_CAS(*(size_t *)&(var), (size_t)(oldval), (size_t)(val)) +# else +# define ATOMIC_VALUE_CAS(var, oldval, val) ruby_atomic_value_cas(&(var), (oldval), (val)) +static inline VALUE +ruby_atomic_value_cas(VALUE *ptr, VALUE oldval, VALUE val) +{ + const VALUE old = *ptr; + if (old == oldval) *ptr = val; + return old; +} +# endif +#endif + +#endif /* RUBY_ATOMIC_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/siphash.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/siphash.h new file mode 100644 index 0000000..2e7553f --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/siphash.h @@ -0,0 +1,48 @@ +#ifndef SIPHASH_H +#define SIPHASH_H 1 +#include +#ifdef HAVE_STDINT_H +#include +#endif +#ifdef HAVE_INTTYPES_H +#include +#endif + +#ifndef HAVE_UINT64_T +typedef struct { + uint32_t u32[2]; +} sip_uint64_t; +#define uint64_t sip_uint64_t +#else +typedef uint64_t sip_uint64_t; +#endif + +typedef struct { + int c; + int d; + uint64_t v[4]; + uint8_t buf[sizeof(uint64_t)]; + uint8_t buflen; + uint8_t msglen_byte; +} sip_state; + +typedef struct sip_interface_st sip_interface; + +typedef struct { + sip_state state[1]; + const sip_interface *methods; +} sip_hash; + +sip_hash *sip_hash_new(const uint8_t key[16], int c, int d); +sip_hash *sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d); +int sip_hash_update(sip_hash *h, const uint8_t *data, size_t len); +int sip_hash_final(sip_hash *h, uint8_t **digest, size_t *len); +int sip_hash_final_integer(sip_hash *h, uint64_t *digest); +int sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len); +int sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest); +void sip_hash_free(sip_hash *h); +void sip_hash_dump(sip_hash *h); + +uint64_t sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len); + +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/symbol.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/symbol.h new file mode 100644 index 0000000..1f0b139 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/symbol.h @@ -0,0 +1,108 @@ +/********************************************************************** + + symbol.h - + + $Author$ + created at: Tue Jul 8 15:49:54 JST 2014 + + Copyright (C) 2014 Yukihiro Matsumoto + +**********************************************************************/ + +#ifndef RUBY_SYMBOL_H +#define RUBY_SYMBOL_H 1 + +#include "id.h" + +#define DYNAMIC_ID_P(id) (!(id&ID_STATIC_SYM)&&id>tLAST_OP_ID) +#define STATIC_ID2SYM(id) (((VALUE)(id)<tLAST_OP_ID) +#define is_local_id(id) (id_type(id)==ID_LOCAL) +#define is_global_id(id) (id_type(id)==ID_GLOBAL) +#define is_instance_id(id) (id_type(id)==ID_INSTANCE) +#define is_attrset_id(id) ((id)==idASET||id_type(id)==ID_ATTRSET) +#define is_const_id(id) (id_type(id)==ID_CONST) +#define is_class_id(id) (id_type(id)==ID_CLASS) +#define is_junk_id(id) (id_type(id)==ID_JUNK) + +static inline int +id_type(ID id) +{ + if (is_notop_id(id)) { + return (int)(id&ID_SCOPE_MASK); + } + else { + return -1; + } +} + +typedef uint32_t rb_id_serial_t; + +static inline rb_id_serial_t +rb_id_to_serial(ID id) +{ + if (is_notop_id(id)) { + return (rb_id_serial_t)(id >> ID_SCOPE_SHIFT); + } + else { + return (rb_id_serial_t)id; + } +} + +static inline int +sym_type(VALUE sym) +{ + ID id; + if (STATIC_SYM_P(sym)) { + id = RSHIFT(sym, RUBY_SPECIAL_SHIFT); + if (id<=tLAST_OP_ID) { + return -1; + } + } + else { + id = RSYMBOL(sym)->id; + } + return (int)(id&ID_SCOPE_MASK); +} + +#define is_local_sym(sym) (sym_type(sym)==ID_LOCAL) +#define is_global_sym(sym) (sym_type(sym)==ID_GLOBAL) +#define is_instance_sym(sym) (sym_type(sym)==ID_INSTANCE) +#define is_attrset_sym(sym) (sym_type(sym)==ID_ATTRSET) +#define is_const_sym(sym) (sym_type(sym)==ID_CONST) +#define is_class_sym(sym) (sym_type(sym)==ID_CLASS) +#define is_junk_sym(sym) (sym_type(sym)==ID_JUNK) + +RUBY_FUNC_EXPORTED const unsigned int ruby_global_name_punct_bits[(0x7e - 0x20 + 31) / 32]; + +static inline int +is_global_name_punct(const int c) +{ + if (c <= 0x20 || 0x7e < c) return 0; + return (ruby_global_name_punct_bits[(c - 0x20) / 32] >> (c % 32)) & 1; +} + +ID rb_intern_cstr_without_pindown(const char *, long, rb_encoding *); + +RUBY_SYMBOL_EXPORT_BEGIN + +size_t rb_sym_immortal_count(void); + +RUBY_SYMBOL_EXPORT_END +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_pthread.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_pthread.h new file mode 100644 index 0000000..31930e0 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_pthread.h @@ -0,0 +1,54 @@ +/********************************************************************** + + thread_pthread.h - + + $Author: kosaki $ + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_THREAD_PTHREAD_H +#define RUBY_THREAD_PTHREAD_H + +#ifdef HAVE_PTHREAD_NP_H +#include +#endif + +#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER +#define RB_NATIVETHREAD_COND_INIT { PTHREAD_COND_INITIALIZER, } + +typedef struct rb_thread_cond_struct { + pthread_cond_t cond; +#ifdef HAVE_CLOCKID_T + clockid_t clockid; +#endif +} rb_nativethread_cond_t; + +typedef struct native_thread_data_struct { + struct list_node ubf_list; + rb_nativethread_cond_t sleep_cond; +} native_thread_data_t; + +#undef except +#undef try +#undef leave +#undef finally + +typedef struct rb_global_vm_lock_struct { + /* fast path */ + unsigned long acquired; + rb_nativethread_lock_t lock; + + /* slow path */ + volatile unsigned long waiting; + rb_nativethread_cond_t cond; + + /* yield */ + rb_nativethread_cond_t switch_cond; + rb_nativethread_cond_t switch_wait_cond; + int need_yield; + int wait_yield; +} rb_global_vm_lock_t; + +#endif /* RUBY_THREAD_PTHREAD_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_win32.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_win32.h new file mode 100644 index 0000000..c2c04b1 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/thread_win32.h @@ -0,0 +1,36 @@ +/********************************************************************** + + thread_win32.h - + + $Author: ko1 $ + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +/* interface */ +#ifndef RUBY_THREAD_WIN32_H +#define RUBY_THREAD_WIN32_H + +# ifdef __CYGWIN__ +# undef _WIN32 +# endif + +WINBASEAPI BOOL WINAPI +TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); + +typedef struct rb_thread_cond_struct { + struct cond_event_entry *next; + struct cond_event_entry *prev; +} rb_nativethread_cond_t; + +typedef struct native_thread_data_struct { + HANDLE interrupt_event; +} native_thread_data_t; + +typedef struct rb_global_vm_lock_struct { + HANDLE lock; +} rb_global_vm_lock_t; + +#endif /* RUBY_THREAD_WIN32_H */ + diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/timev.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/timev.h new file mode 100644 index 0000000..3947477 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/timev.h @@ -0,0 +1,42 @@ +#ifndef RUBY_TIMEV_H +#define RUBY_TIMEV_H + +PACKED_STRUCT_UNALIGNED(struct vtm { + VALUE year; /* 2000 for example. Integer. */ + VALUE subsecx; /* 0 <= subsecx < TIME_SCALE. possibly Rational. */ + VALUE utc_offset; /* -3600 as -01:00 for example. possibly Rational. */ + const char *zone; /* "JST", "EST", "EDT", etc. */ + uint16_t yday:9; /* 1..366 */ + uint8_t mon:4; /* 1..12 */ + uint8_t mday:5; /* 1..31 */ + uint8_t hour:5; /* 0..23 */ + uint8_t min:6; /* 0..59 */ + uint8_t sec:6; /* 0..60 */ + uint8_t wday:3; /* 0:Sunday, 1:Monday, ..., 6:Saturday 7:init */ + uint8_t isdst:2; /* 0:StandardTime 1:DayLightSavingTime 3:init */ +}); + +#define TIME_SCALE 1000000000 + +#ifndef TYPEOF_TIMEVAL_TV_SEC +# define TYPEOF_TIMEVAL_TV_SEC time_t +#endif +#ifndef TYPEOF_TIMEVAL_TV_USEC +# if INT_MAX >= 1000000 +# define TYPEOF_TIMEVAL_TV_USEC int +# else +# define TYPEOF_TIMEVAL_TV_USEC long +# endif +#endif + +#if SIZEOF_TIME_T == SIZEOF_LONG +typedef unsigned long unsigned_time_t; +#elif SIZEOF_TIME_T == SIZEOF_INT +typedef unsigned int unsigned_time_t; +#elif SIZEOF_TIME_T == SIZEOF_LONG_LONG +typedef unsigned LONG_LONG unsigned_time_t; +#else +# error cannot find integer type which size is same as time_t. +#endif + +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/transcode_data.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/transcode_data.h new file mode 100644 index 0000000..3c357d4 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/transcode_data.h @@ -0,0 +1,139 @@ +/********************************************************************** + + transcode_data.h - + + $Author: nobu $ + created at: Mon 10 Dec 2007 14:01:47 JST 2007 + + Copyright (C) 2007 Martin Duerst + +**********************************************************************/ + +#include "ruby/ruby.h" + +#ifndef RUBY_TRANSCODE_DATA_H +#define RUBY_TRANSCODE_DATA_H 1 + +RUBY_SYMBOL_EXPORT_BEGIN + +#define WORDINDEX_SHIFT_BITS 2 +#define WORDINDEX2INFO(widx) ((widx) << WORDINDEX_SHIFT_BITS) +#define INFO2WORDINDEX(info) ((info) >> WORDINDEX_SHIFT_BITS) +#define BYTE_LOOKUP_BASE(bl) ((bl)[0]) +#define BYTE_LOOKUP_INFO(bl) ((bl)[1]) + +#define PType (unsigned int) + +#define NOMAP (PType 0x01) /* direct map */ +#define ONEbt (0x02) /* one byte payload */ +#define TWObt (0x03) /* two bytes payload */ +#define THREEbt (0x05) /* three bytes payload */ +#define FOURbt (0x06) /* four bytes payload, UTF-8 only, macros start at getBT0 */ +#define INVALID (PType 0x07) /* invalid byte sequence */ +#define UNDEF (PType 0x09) /* legal but undefined */ +#define ZERObt (PType 0x0A) /* zero bytes of payload, i.e. remove */ +#define FUNii (PType 0x0B) /* function from info to info */ +#define FUNsi (PType 0x0D) /* function from start to info */ +#define FUNio (PType 0x0E) /* function from info to output */ +#define FUNso (PType 0x0F) /* function from start to output */ +#define STR1 (PType 0x11) /* string 4 <= len <= 259 bytes: 1byte length + content */ +#define GB4bt (PType 0x12) /* GB18030 four bytes payload */ +#define FUNsio (PType 0x13) /* function from start and info to output */ + +#define STR1_LENGTH(byte_addr) (unsigned int)(*(byte_addr) + 4) +#define STR1_BYTEINDEX(w) ((w) >> 6) +#define makeSTR1(bi) (((bi) << 6) | STR1) +#define makeSTR1LEN(len) ((len)-4) + +#define o1(b1) (PType((((unsigned char)(b1))<<8)|ONEbt)) +#define o2(b1,b2) (PType((((unsigned char)(b1))<<8)|\ + (((unsigned char)(b2))<<16)|\ + TWObt)) +#define o3(b1,b2,b3) (PType(((((unsigned char)(b1))<<8)|\ + (((unsigned char)(b2))<<16)|\ + (((unsigned int)(unsigned char)(b3))<<24)|\ + THREEbt)&\ + 0xffffffffU)) +#define o4(b0,b1,b2,b3) (PType(((((unsigned char)(b1))<<8)|\ + (((unsigned char)(b2))<<16)|\ + (((unsigned int)(unsigned char)(b3))<<24)|\ + ((((unsigned char)(b0))&0x07)<<5)|\ + FOURbt)&\ + 0xffffffffU)) +#define g4(b0,b1,b2,b3) (PType(((((unsigned char)(b0))<<8)|\ + (((unsigned char)(b2))<<16)|\ + ((((unsigned char)(b1))&0x0f)<<24)|\ + ((((unsigned int)(unsigned char)(b3))&0x0f)<<28)|\ + GB4bt)&\ + 0xffffffffU)) +#define funsio(diff) (PType((((unsigned int)(diff))<<8)|FUNsio)) + +#define getBT1(a) ((unsigned char)((a)>> 8)) +#define getBT2(a) ((unsigned char)((a)>>16)) +#define getBT3(a) ((unsigned char)((a)>>24)) +#define getBT0(a) (((unsigned char)((a)>> 5)&0x07)|0xF0) /* for UTF-8 only!!! */ + +#define getGB4bt0(a) ((unsigned char)((a)>> 8)) +#define getGB4bt1(a) (((unsigned char)((a)>>24)&0x0F)|0x30) +#define getGB4bt2(a) ((unsigned char)((a)>>16)) +#define getGB4bt3(a) (((unsigned char)((a)>>28)&0x0F)|0x30) + +#define o2FUNii(b1,b2) (PType((((unsigned char)(b1))<<8)|(((unsigned char)(b2))<<16)|FUNii)) + +/* do we need these??? maybe not, can be done with simple tables */ +#define ONETRAIL /* legal but undefined if one more trailing UTF-8 */ +#define TWOTRAIL /* legal but undefined if two more trailing UTF-8 */ +#define THREETRAIL /* legal but undefined if three more trailing UTF-8 */ + +typedef enum { + asciicompat_converter, /* ASCII-compatible -> ASCII-compatible */ + asciicompat_decoder, /* ASCII-incompatible -> ASCII-compatible */ + asciicompat_encoder /* ASCII-compatible -> ASCII-incompatible */ + /* ASCII-incompatible -> ASCII-incompatible is intentionally omitted. */ +} rb_transcoder_asciicompat_type_t; + +typedef struct rb_transcoder rb_transcoder; + +/* static structure, one per supported encoding pair */ +struct rb_transcoder { + const char *src_encoding; + const char *dst_encoding; + unsigned int conv_tree_start; + const unsigned char *byte_array; + unsigned int byte_array_length; + const unsigned int *word_array; + unsigned int word_array_length; + int word_size; + int input_unit_length; + int max_input; + int max_output; + rb_transcoder_asciicompat_type_t asciicompat_type; + size_t state_size; + int (*state_init_func)(void*); /* ret==0:success ret!=0:failure(errno) */ + int (*state_fini_func)(void*); /* ret==0:success ret!=0:failure(errno) */ + VALUE (*func_ii)(void*, VALUE); /* info -> info */ + VALUE (*func_si)(void*, const unsigned char*, size_t); /* start -> info */ + ssize_t (*func_io)(void*, VALUE, const unsigned char*, size_t); /* info -> output */ + ssize_t (*func_so)(void*, const unsigned char*, size_t, unsigned char*, size_t); /* start -> output */ + ssize_t (*finish_func)(void*, unsigned char*, size_t); /* -> output */ + ssize_t (*resetsize_func)(void*); /* -> len */ + ssize_t (*resetstate_func)(void*, unsigned char*, size_t); /* -> output */ + ssize_t (*func_sio)(void*, const unsigned char*, size_t, VALUE, unsigned char*, size_t); /* start -> output */ +}; + +void rb_declare_transcoder(const char *enc1, const char *enc2, const char *lib); +void rb_register_transcoder(const rb_transcoder *); + +/* + * To get rid of collision of initializer symbols in statically-linked encodings + * and transcoders + */ +#if defined(EXTSTATIC) && EXTSTATIC +# define TRANS_INIT(name) void Init_trans_ ## name(void) +#else +# define TRANS_INIT(name) void Init_ ## name(void) +#endif + +RUBY_SYMBOL_EXPORT_END + +#endif /* RUBY_TRANSCODE_DATA_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/version.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/version.h new file mode 100644 index 0000000..4240022 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/version.h @@ -0,0 +1,73 @@ +#define RUBY_VERSION "2.5.4" +#define RUBY_RELEASE_DATE "2019-03-13" +#define RUBY_PATCHLEVEL 155 + +#define RUBY_RELEASE_YEAR 2019 +#define RUBY_RELEASE_MONTH 3 +#define RUBY_RELEASE_DAY 13 + +#include "ruby/version.h" + +#ifndef TOKEN_PASTE +#define TOKEN_PASTE(x,y) x##y +#endif +#define ONLY_ONE_DIGIT(x) TOKEN_PASTE(10,x) < 1000 +#define WITH_ZERO_PADDING(x) TOKEN_PASTE(0,x) +#define RUBY_BIRTH_YEAR_STR STRINGIZE(RUBY_BIRTH_YEAR) +#define RUBY_RELEASE_YEAR_STR STRINGIZE(RUBY_RELEASE_YEAR) +#if ONLY_ONE_DIGIT(RUBY_RELEASE_MONTH) +#define RUBY_RELEASE_MONTH_STR STRINGIZE(WITH_ZERO_PADDING(RUBY_RELEASE_MONTH)) +#else +#define RUBY_RELEASE_MONTH_STR STRINGIZE(RUBY_RELEASE_MONTH) +#endif +#if ONLY_ONE_DIGIT(RUBY_RELEASE_DAY) +#define RUBY_RELEASE_DAY_STR STRINGIZE(WITH_ZERO_PADDING(RUBY_RELEASE_DAY)) +#else +#define RUBY_RELEASE_DAY_STR STRINGIZE(RUBY_RELEASE_DAY) +#endif + +#if !defined RUBY_LIB_VERSION && defined RUBY_LIB_VERSION_STYLE +# if RUBY_LIB_VERSION_STYLE == 3 +# define RUBY_LIB_VERSION STRINGIZE(RUBY_API_VERSION_MAJOR)"."STRINGIZE(RUBY_API_VERSION_MINOR)"."STRINGIZE(RUBY_API_VERSION_TEENY) +# elif RUBY_LIB_VERSION_STYLE == 2 +# define RUBY_LIB_VERSION STRINGIZE(RUBY_API_VERSION_MAJOR)"."STRINGIZE(RUBY_API_VERSION_MINOR) +# endif +#endif + +#if RUBY_PATCHLEVEL == -1 +#define RUBY_PATCHLEVEL_STR "dev" +#else +#define RUBY_PATCHLEVEL_STR "p"STRINGIZE(RUBY_PATCHLEVEL) +#endif + +#ifndef RUBY_REVISION +# include "revision.h" +#endif +#ifndef RUBY_REVISION +# define RUBY_REVISION 0 +#endif + +#if RUBY_REVISION +# if RUBY_PATCHLEVEL == -1 +# ifndef RUBY_BRANCH_NAME +# define RUBY_BRANCH_NAME "trunk" +# endif +# define RUBY_REVISION_STR " "RUBY_BRANCH_NAME" "STRINGIZE(RUBY_REVISION) +# else +# define RUBY_REVISION_STR " revision "STRINGIZE(RUBY_REVISION) +# endif +#else +# define RUBY_REVISION_STR "" +#endif + +# define RUBY_DESCRIPTION \ + "ruby "RUBY_VERSION \ + RUBY_PATCHLEVEL_STR \ + " ("RUBY_RELEASE_DATE \ + RUBY_REVISION_STR") " \ + "["RUBY_PLATFORM"]" +# define RUBY_COPYRIGHT \ + "ruby - Copyright (C) " \ + RUBY_BIRTH_YEAR_STR"-" \ + RUBY_RELEASE_YEAR_STR" " \ + RUBY_AUTHOR diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm.inc new file mode 100644 index 0000000..0a3fa21 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm.inc @@ -0,0 +1,3667 @@ +/* -*-c-*- *********************************************************/ +/*******************************************************************/ +/*******************************************************************/ +/** + This file is VM main loop. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'insns.def' + */ + + +INSN_ENTRY(nop){START_OF_ORIGINAL_INSN(nop); +{ + + + DEBUG_ENTER_INSN("nop"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_nop 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_nop_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(nop)); +{ +#line 40 "insns.def" + /* none */ + +#line 32 "vm.inc" +#undef CURRENT_INSN_nop +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(nop);}}} +INSN_ENTRY(getlocal){START_OF_ORIGINAL_INSN(getlocal); +{ + VALUE val; + rb_num_t level = (rb_num_t)GET_OPERAND(2); + lindex_t idx = (lindex_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getlocal"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getlocal 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getlocal_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getlocal)); + COLLECT_USAGE_OPERAND(BIN(getlocal), 0, idx); + COLLECT_USAGE_OPERAND(BIN(getlocal), 1, level); +{ +#line 60 "insns.def" + val = *(vm_get_ep(GET_EP(), level) - idx); + RB_DEBUG_COUNTER_INC(lvar_get); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_get_dynamic, level > 0); + +#line 60 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getlocal +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getlocal);}}} +INSN_ENTRY(setlocal){START_OF_ORIGINAL_INSN(setlocal); +{ + rb_num_t level = (rb_num_t)GET_OPERAND(2); + lindex_t idx = (lindex_t)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setlocal"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setlocal 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setlocal_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setlocal)); + COLLECT_USAGE_OPERAND(BIN(setlocal), 0, idx); + COLLECT_USAGE_OPERAND(BIN(setlocal), 1, level); +{ +#line 78 "insns.def" + vm_env_write(vm_get_ep(GET_EP(), level), -(int)idx, val); + RB_DEBUG_COUNTER_INC(lvar_set); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_set_dynamic, level > 0); + +#line 90 "vm.inc" +#undef CURRENT_INSN_setlocal +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setlocal);}}} +INSN_ENTRY(getblockparam){START_OF_ORIGINAL_INSN(getblockparam); +{ + VALUE val; + rb_num_t level = (rb_num_t)GET_OPERAND(2); + lindex_t idx = (lindex_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getblockparam"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getblockparam 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getblockparam_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getblockparam)); + COLLECT_USAGE_OPERAND(BIN(getblockparam), 0, idx); + COLLECT_USAGE_OPERAND(BIN(getblockparam), 1, level); +{ +#line 94 "insns.def" + const VALUE *ep = vm_get_ep(GET_EP(), level); + VM_ASSERT(VM_ENV_LOCAL_P(ep)); + + if (!VM_ENV_FLAGS(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM)) { + val = rb_vm_bh_to_procval(ec, VM_ENV_BLOCK_HANDLER(ep)); + vm_env_write(ep, -(int)idx, val); + VM_ENV_FLAGS_SET(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM); + } + else { + val = *(ep - idx); + RB_DEBUG_COUNTER_INC(lvar_get); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_get_dynamic, level > 0); + } + +#line 128 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getblockparam +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getblockparam);}}} +INSN_ENTRY(setblockparam){START_OF_ORIGINAL_INSN(setblockparam); +{ + rb_num_t level = (rb_num_t)GET_OPERAND(2); + lindex_t idx = (lindex_t)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setblockparam"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setblockparam 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setblockparam_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setblockparam)); + COLLECT_USAGE_OPERAND(BIN(setblockparam), 0, idx); + COLLECT_USAGE_OPERAND(BIN(setblockparam), 1, level); +{ +#line 120 "insns.def" + const VALUE *ep = vm_get_ep(GET_EP(), level); + VM_ASSERT(VM_ENV_LOCAL_P(ep)); + + vm_env_write(ep, -(int)idx, val); + RB_DEBUG_COUNTER_INC(lvar_set); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_set_dynamic, level > 0); + + VM_ENV_FLAGS_SET(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM); + +#line 163 "vm.inc" +#undef CURRENT_INSN_setblockparam +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setblockparam);}}} +INSN_ENTRY(getspecial){START_OF_ORIGINAL_INSN(getspecial); +{ + VALUE val; + rb_num_t type = (rb_num_t)GET_OPERAND(2); + rb_num_t key = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getspecial"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getspecial 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getspecial_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getspecial)); + COLLECT_USAGE_OPERAND(BIN(getspecial), 0, key); + COLLECT_USAGE_OPERAND(BIN(getspecial), 1, type); +{ +#line 141 "insns.def" + val = vm_getspecial(ec, GET_LEP(), key, type); + +#line 189 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getspecial +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getspecial);}}} +INSN_ENTRY(setspecial){START_OF_ORIGINAL_INSN(setspecial); +{ + rb_num_t key = (rb_num_t)GET_OPERAND(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("setspecial"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setspecial 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setspecial_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setspecial)); + COLLECT_USAGE_OPERAND(BIN(setspecial), 0, key); +{ +#line 155 "insns.def" + lep_svar_set(ec, GET_LEP(), key, obj); + +#line 215 "vm.inc" +#undef CURRENT_INSN_setspecial +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setspecial);}}} +INSN_ENTRY(getinstancevariable){START_OF_ORIGINAL_INSN(getinstancevariable); +{ + VALUE val; + IC ic = (IC)GET_OPERAND(2); + ID id = (ID)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getinstancevariable"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getinstancevariable 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getinstancevariable_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getinstancevariable)); + COLLECT_USAGE_OPERAND(BIN(getinstancevariable), 0, id); + COLLECT_USAGE_OPERAND(BIN(getinstancevariable), 1, ic); +{ +#line 169 "insns.def" + val = vm_getinstancevariable(GET_SELF(), id, ic); + +#line 241 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getinstancevariable +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getinstancevariable);}}} +INSN_ENTRY(setinstancevariable){START_OF_ORIGINAL_INSN(setinstancevariable); +{ + IC ic = (IC)GET_OPERAND(2); + ID id = (ID)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setinstancevariable"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setinstancevariable 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setinstancevariable_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setinstancevariable)); + COLLECT_USAGE_OPERAND(BIN(setinstancevariable), 0, id); + COLLECT_USAGE_OPERAND(BIN(setinstancevariable), 1, ic); +{ +#line 183 "insns.def" + vm_setinstancevariable(GET_SELF(), id, val, ic); + +#line 269 "vm.inc" +#undef CURRENT_INSN_setinstancevariable +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setinstancevariable);}}} +INSN_ENTRY(getclassvariable){START_OF_ORIGINAL_INSN(getclassvariable); +{ + VALUE val; + ID id = (ID)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getclassvariable"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getclassvariable 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getclassvariable_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getclassvariable)); + COLLECT_USAGE_OPERAND(BIN(getclassvariable), 0, id); +{ +#line 197 "insns.def" + val = rb_cvar_get(vm_get_cvar_base(rb_vm_get_cref(GET_EP()), GET_CFP()), id); + +#line 293 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getclassvariable +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getclassvariable);}}} +INSN_ENTRY(setclassvariable){START_OF_ORIGINAL_INSN(setclassvariable); +{ + ID id = (ID)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setclassvariable"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setclassvariable 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setclassvariable_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setclassvariable)); + COLLECT_USAGE_OPERAND(BIN(setclassvariable), 0, id); +{ +#line 211 "insns.def" + vm_ensure_not_refinement_module(GET_SELF()); + rb_cvar_set(vm_get_cvar_base(rb_vm_get_cref(GET_EP()), GET_CFP()), id, val); + +#line 320 "vm.inc" +#undef CURRENT_INSN_setclassvariable +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setclassvariable);}}} +INSN_ENTRY(getconstant){START_OF_ORIGINAL_INSN(getconstant); +{ + VALUE val; + ID id = (ID)GET_OPERAND(1); + VALUE klass = TOPN(0); + DEBUG_ENTER_INSN("getconstant"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_getconstant 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getconstant_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getconstant)); + COLLECT_USAGE_OPERAND(BIN(getconstant), 0, id); +{ +#line 233 "insns.def" + val = vm_get_ev_const(ec, klass, id, 0); + +#line 345 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getconstant +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getconstant);}}} +INSN_ENTRY(setconstant){START_OF_ORIGINAL_INSN(setconstant); +{ + ID id = (ID)GET_OPERAND(1); + VALUE val = TOPN(1); + VALUE cbase = TOPN(0); + DEBUG_ENTER_INSN("setconstant"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_setconstant 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setconstant_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setconstant)); + COLLECT_USAGE_OPERAND(BIN(setconstant), 0, id); +{ +#line 255 "insns.def" + vm_check_if_namespace(cbase); + vm_ensure_not_refinement_module(GET_SELF()); + rb_const_set(cbase, id, val); + +#line 374 "vm.inc" +#undef CURRENT_INSN_setconstant +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setconstant);}}} +INSN_ENTRY(getglobal){START_OF_ORIGINAL_INSN(getglobal); +{ + VALUE val; + GENTRY entry = (GENTRY)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getglobal"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getglobal 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getglobal_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getglobal)); + COLLECT_USAGE_OPERAND(BIN(getglobal), 0, entry); +{ +#line 271 "insns.def" + val = GET_GLOBAL((VALUE)entry); + +#line 398 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getglobal +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getglobal);}}} +INSN_ENTRY(setglobal){START_OF_ORIGINAL_INSN(setglobal); +{ + GENTRY entry = (GENTRY)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setglobal"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setglobal 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setglobal_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setglobal)); + COLLECT_USAGE_OPERAND(BIN(setglobal), 0, entry); +{ +#line 285 "insns.def" + SET_GLOBAL((VALUE)entry, val); + +#line 424 "vm.inc" +#undef CURRENT_INSN_setglobal +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setglobal);}}} +INSN_ENTRY(putnil){START_OF_ORIGINAL_INSN(putnil); +{ + VALUE val; + + + DEBUG_ENTER_INSN("putnil"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putnil 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putnil_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putnil)); +{ +#line 304 "insns.def" + val = Qnil; + +#line 447 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_putnil +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putnil);}}} +INSN_ENTRY(putself){START_OF_ORIGINAL_INSN(putself); +{ + VALUE val; + + + DEBUG_ENTER_INSN("putself"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putself 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putself_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putself)); +{ +#line 318 "insns.def" + val = GET_SELF(); + +#line 472 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_putself +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putself);}}} +INSN_ENTRY(putobject){START_OF_ORIGINAL_INSN(putobject); +{ + VALUE val = (VALUE)GET_OPERAND(1); + + DEBUG_ENTER_INSN("putobject"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putobject 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putobject_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putobject)); + COLLECT_USAGE_OPERAND(BIN(putobject), 0, val); +{ +#line 334 "insns.def" + /* */ + +#line 497 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_putobject +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putobject);}}} +INSN_ENTRY(putspecialobject){START_OF_ORIGINAL_INSN(putspecialobject); +{ + VALUE val; + rb_num_t value_type = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("putspecialobject"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putspecialobject 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putspecialobject_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putspecialobject)); + COLLECT_USAGE_OPERAND(BIN(putspecialobject), 0, value_type); +{ +#line 349 "insns.def" + enum vm_special_object_type type; + + type = (enum vm_special_object_type)value_type; + val = vm_get_special_object(GET_EP(), type); + +#line 526 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_putspecialobject +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putspecialobject);}}} +INSN_ENTRY(putiseq){START_OF_ORIGINAL_INSN(putiseq); +{ + VALUE ret; + ISEQ iseq = (ISEQ)GET_OPERAND(1); + + DEBUG_ENTER_INSN("putiseq"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putiseq 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putiseq_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putiseq)); + COLLECT_USAGE_OPERAND(BIN(putiseq), 0, iseq); +{ +#line 366 "insns.def" + ret = (VALUE)iseq; + +#line 552 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(ret); +#undef CURRENT_INSN_putiseq +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putiseq);}}} +INSN_ENTRY(putstring){START_OF_ORIGINAL_INSN(putstring); +{ + VALUE val; + VALUE str = (VALUE)GET_OPERAND(1); + + DEBUG_ENTER_INSN("putstring"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putstring 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putstring_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putstring)); + COLLECT_USAGE_OPERAND(BIN(putstring), 0, str); +{ +#line 380 "insns.def" + val = rb_str_resurrect(str); + +#line 578 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_putstring +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putstring);}}} +INSN_ENTRY(concatstrings){START_OF_ORIGINAL_INSN(concatstrings); +{ + VALUE val; + rb_num_t num = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("concatstrings"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_concatstrings 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_concatstrings_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(concatstrings)); + COLLECT_USAGE_OPERAND(BIN(concatstrings), 0, num); +{ +#line 394 "insns.def" + val = rb_str_concat_literals(num, STACK_ADDR_FROM_TOP(num)); + POPN(num); + +#line 605 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_concatstrings +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(concatstrings);}}} +INSN_ENTRY(tostring){START_OF_ORIGINAL_INSN(tostring); +{ + + VALUE val = TOPN(1); + VALUE str = TOPN(0); + DEBUG_ENTER_INSN("tostring"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_tostring 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_tostring_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(tostring)); +{ +#line 409 "insns.def" + VALUE rb_obj_as_string_result(VALUE str, VALUE obj); + val = rb_obj_as_string_result(str, val); + +#line 632 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_tostring +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(tostring);}}} +INSN_ENTRY(freezestring){START_OF_ORIGINAL_INSN(freezestring); +{ + VALUE debug_info = (VALUE)GET_OPERAND(1); + VALUE str = TOPN(0); + DEBUG_ENTER_INSN("freezestring"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_freezestring 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_freezestring_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(freezestring)); + COLLECT_USAGE_OPERAND(BIN(freezestring), 0, debug_info); +{ +#line 424 "insns.def" + vm_freezestring(str, debug_info); + +#line 658 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(str); +#undef CURRENT_INSN_freezestring +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(freezestring);}}} +INSN_ENTRY(toregexp){START_OF_ORIGINAL_INSN(toregexp); +{ + VALUE val; + rb_num_t cnt = (rb_num_t)GET_OPERAND(2); + rb_num_t opt = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("toregexp"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_toregexp 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_toregexp_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(toregexp)); + COLLECT_USAGE_OPERAND(BIN(toregexp), 0, opt); + COLLECT_USAGE_OPERAND(BIN(toregexp), 1, cnt); +{ +#line 440 "insns.def" + VALUE rb_reg_new_ary(VALUE ary, int options); + VALUE rb_ary_tmp_new_from_values(VALUE, long, const VALUE *); + const VALUE ary = rb_ary_tmp_new_from_values(0, cnt, STACK_ADDR_FROM_TOP(cnt)); + POPN(cnt); + val = rb_reg_new_ary(ary, (int)opt); + rb_ary_clear(ary); + +#line 691 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_toregexp +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(toregexp);}}} +INSN_ENTRY(intern){START_OF_ORIGINAL_INSN(intern); +{ + VALUE sym; + + VALUE str = TOPN(0); + DEBUG_ENTER_INSN("intern"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_intern 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_intern_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(intern)); +{ +#line 459 "insns.def" + sym = rb_str_intern(str); + +#line 717 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(sym); +#undef CURRENT_INSN_intern +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(intern);}}} +INSN_ENTRY(newarray){START_OF_ORIGINAL_INSN(newarray); +{ + VALUE val; + rb_num_t num = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("newarray"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_newarray 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_newarray_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(newarray)); + COLLECT_USAGE_OPERAND(BIN(newarray), 0, num); +{ +#line 473 "insns.def" + val = rb_ary_new4(num, STACK_ADDR_FROM_TOP(num)); + POPN(num); + +#line 744 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_newarray +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(newarray);}}} +INSN_ENTRY(duparray){START_OF_ORIGINAL_INSN(duparray); +{ + VALUE val; + VALUE ary = (VALUE)GET_OPERAND(1); + + DEBUG_ENTER_INSN("duparray"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_duparray 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_duparray_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(duparray)); + COLLECT_USAGE_OPERAND(BIN(duparray), 0, ary); +{ +#line 488 "insns.def" + val = rb_ary_resurrect(ary); + +#line 770 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_duparray +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(duparray);}}} +INSN_ENTRY(expandarray){START_OF_ORIGINAL_INSN(expandarray); +{ + rb_num_t flag = (rb_num_t)GET_OPERAND(2); + rb_num_t num = (rb_num_t)GET_OPERAND(1); + VALUE ary = TOPN(0); + DEBUG_ENTER_INSN("expandarray"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_expandarray 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_expandarray_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(expandarray)); + COLLECT_USAGE_OPERAND(BIN(expandarray), 0, num); + COLLECT_USAGE_OPERAND(BIN(expandarray), 1, flag); +{ +#line 516 "insns.def" + vm_expandarray(GET_CFP(), ary, num, (int)flag); + +#line 798 "vm.inc" +#undef CURRENT_INSN_expandarray +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(expandarray);}}} +INSN_ENTRY(concatarray){START_OF_ORIGINAL_INSN(concatarray); +{ + VALUE ary; + + VALUE ary1 = TOPN(1); + VALUE ary2 = TOPN(0); + DEBUG_ENTER_INSN("concatarray"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_concatarray 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_concatarray_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(concatarray)); +{ +#line 530 "insns.def" + ary = vm_concat_array(ary1, ary2); + +#line 823 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(ary); +#undef CURRENT_INSN_concatarray +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(concatarray);}}} +INSN_ENTRY(splatarray){START_OF_ORIGINAL_INSN(splatarray); +{ + VALUE obj; + VALUE flag = (VALUE)GET_OPERAND(1); + VALUE ary = TOPN(0); + DEBUG_ENTER_INSN("splatarray"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_splatarray 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_splatarray_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(splatarray)); + COLLECT_USAGE_OPERAND(BIN(splatarray), 0, flag); +{ +#line 544 "insns.def" + obj = vm_splat_array(flag, ary); + +#line 850 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(obj); +#undef CURRENT_INSN_splatarray +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(splatarray);}}} +INSN_ENTRY(newhash){START_OF_ORIGINAL_INSN(newhash); +{ + VALUE val; + rb_num_t num = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("newhash"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_newhash 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_newhash_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(newhash)); + COLLECT_USAGE_OPERAND(BIN(newhash), 0, num); +{ +#line 559 "insns.def" + RUBY_DTRACE_CREATE_HOOK(HASH, num); + + val = rb_hash_new_with_size(num / 2); + + if (num) { + rb_hash_bulk_insert(num, STACK_ADDR_FROM_TOP(num), val); + } + POPN(num); + +#line 883 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_newhash +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(newhash);}}} +INSN_ENTRY(newrange){START_OF_ORIGINAL_INSN(newrange); +{ + VALUE val; + rb_num_t flag = (rb_num_t)GET_OPERAND(1); + VALUE low = TOPN(1); + VALUE high = TOPN(0); + DEBUG_ENTER_INSN("newrange"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_newrange 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_newrange_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(newrange)); + COLLECT_USAGE_OPERAND(BIN(newrange), 0, flag); +{ +#line 580 "insns.def" + val = rb_range_new(low, high, (int)flag); + +#line 911 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_newrange +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(newrange);}}} +INSN_ENTRY(pop){START_OF_ORIGINAL_INSN(pop); +{ + + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("pop"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_pop 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_pop_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(pop)); +{ +#line 598 "insns.def" + (void)val; + /* none */ + +#line 937 "vm.inc" +#undef CURRENT_INSN_pop +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(pop);}}} +INSN_ENTRY(dup){START_OF_ORIGINAL_INSN(dup); +{ + VALUE val2; + VALUE val1; + + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("dup"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_dup 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_dup_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(dup)); +{ +#line 613 "insns.def" + val1 = val2 = val; + +#line 962 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 2); + PUSH(val1); + PUSH(val2); +#undef CURRENT_INSN_dup +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(dup);}}} +INSN_ENTRY(dupn){START_OF_ORIGINAL_INSN(dupn); +{ + rb_num_t n = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("dupn"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_dupn 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_dupn_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(dupn)); + COLLECT_USAGE_OPERAND(BIN(dupn), 0, n); +{ +#line 627 "insns.def" + void *dst = GET_SP(); + void *src = STACK_ADDR_FROM_TOP(n); + + INC_SP(n); /* alloca */ + MEMCPY(dst, src, VALUE, n); + +#line 992 "vm.inc" +#undef CURRENT_INSN_dupn +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(dupn);}}} +INSN_ENTRY(swap){START_OF_ORIGINAL_INSN(swap); +{ + + VALUE val = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("swap"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_swap 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_swap_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(swap)); +{ +#line 646 "insns.def" + /* none */ + +#line 1016 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 2); + PUSH(obj); + PUSH(val); +#undef CURRENT_INSN_swap +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(swap);}}} +INSN_ENTRY(reverse){START_OF_ORIGINAL_INSN(reverse); +{ + rb_num_t n = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("reverse"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_reverse 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_reverse_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(reverse)); + COLLECT_USAGE_OPERAND(BIN(reverse), 0, n); +{ +#line 660 "insns.def" + rb_num_t i; + VALUE *sp = STACK_ADDR_FROM_TOP(n); + + for (i=0; ibody->iseq_encoded, GET_SP(), + class_iseq->body->local_table_size, + class_iseq->body->stack_max); + RESTORE_REGS(); + NEXT_INSN(); + +#line 1307 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_defineclass +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(defineclass);}}} +INSN_ENTRY(send){START_OF_ORIGINAL_INSN(send); +{ + VALUE val; + ISEQ blockiseq = (ISEQ)GET_OPERAND(3); + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + + DEBUG_ENTER_INSN("send"); + ADD_PC(1+3); + PREFETCH(GET_PC()); + #define CURRENT_INSN_send 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_send_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(send)); + COLLECT_USAGE_OPERAND(BIN(send), 0, ci); + COLLECT_USAGE_OPERAND(BIN(send), 1, cc); + COLLECT_USAGE_OPERAND(BIN(send), 2, blockiseq); +{ +#line 846 "insns.def" + struct rb_calling_info calling; + + vm_caller_setup_arg_block(ec, reg_cfp, &calling, ci, blockiseq, FALSE); + vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc)); + CALL_METHOD(&calling, ci, cc); + +#line 1341 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_send +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(send);}}} +INSN_ENTRY(opt_str_freeze){START_OF_ORIGINAL_INSN(opt_str_freeze); +{ + VALUE val; + VALUE str = (VALUE)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_str_freeze"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_str_freeze 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_str_freeze_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_str_freeze)); + COLLECT_USAGE_OPERAND(BIN(opt_str_freeze), 0, str); +{ +#line 859 "insns.def" + if (BASIC_OP_UNREDEFINED_P(BOP_FREEZE, STRING_REDEFINED_OP_FLAG)) { + val = str; + } + else { + val = rb_funcall(rb_str_resurrect(str), idFreeze, 0); + } + +#line 1372 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_str_freeze +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_str_freeze);}}} +INSN_ENTRY(opt_str_uminus){START_OF_ORIGINAL_INSN(opt_str_uminus); +{ + VALUE val; + VALUE str = (VALUE)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_str_uminus"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_str_uminus 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_str_uminus_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_str_uminus)); + COLLECT_USAGE_OPERAND(BIN(opt_str_uminus), 0, str); +{ +#line 873 "insns.def" + if (BASIC_OP_UNREDEFINED_P(BOP_UMINUS, STRING_REDEFINED_OP_FLAG)) { + val = str; + } + else { + val = rb_funcall(rb_str_resurrect(str), idUMinus, 0); + } + +#line 1403 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_str_uminus +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_str_uminus);}}} +INSN_ENTRY(opt_newarray_max){START_OF_ORIGINAL_INSN(opt_newarray_max); +{ + VALUE val; + rb_num_t num = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_newarray_max"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_newarray_max 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_newarray_max_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_newarray_max)); + COLLECT_USAGE_OPERAND(BIN(opt_newarray_max), 0, num); +{ +#line 887 "insns.def" + val = vm_opt_newarray_max(num, STACK_ADDR_FROM_TOP(num)); + POPN(num); + +#line 1430 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_newarray_max +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_newarray_max);}}} +INSN_ENTRY(opt_newarray_min){START_OF_ORIGINAL_INSN(opt_newarray_min); +{ + VALUE val; + rb_num_t num = (rb_num_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_newarray_min"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_newarray_min 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_newarray_min_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_newarray_min)); + COLLECT_USAGE_OPERAND(BIN(opt_newarray_min), 0, num); +{ +#line 897 "insns.def" + val = vm_opt_newarray_min(num, STACK_ADDR_FROM_TOP(num)); + POPN(num); + +#line 1457 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_newarray_min +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_newarray_min);}}} +INSN_ENTRY(opt_send_without_block){START_OF_ORIGINAL_INSN(opt_send_without_block); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_send_without_block"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_send_without_block 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_send_without_block_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_send_without_block)); + COLLECT_USAGE_OPERAND(BIN(opt_send_without_block), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_send_without_block), 1, cc); +{ +#line 912 "insns.def" + struct rb_calling_info calling; + calling.block_handler = VM_BLOCK_HANDLER_NONE; + vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc)); + CALL_METHOD(&calling, ci, cc); + +#line 1488 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_send_without_block +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_send_without_block);}}} +INSN_ENTRY(invokesuper){START_OF_ORIGINAL_INSN(invokesuper); +{ + VALUE val; + ISEQ blockiseq = (ISEQ)GET_OPERAND(3); + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + + DEBUG_ENTER_INSN("invokesuper"); + ADD_PC(1+3); + PREFETCH(GET_PC()); + #define CURRENT_INSN_invokesuper 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_invokesuper_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(invokesuper)); + COLLECT_USAGE_OPERAND(BIN(invokesuper), 0, ci); + COLLECT_USAGE_OPERAND(BIN(invokesuper), 1, cc); + COLLECT_USAGE_OPERAND(BIN(invokesuper), 2, blockiseq); +{ +#line 929 "insns.def" + struct rb_calling_info calling; + calling.argc = ci->orig_argc; + + vm_caller_setup_arg_block(ec, reg_cfp, &calling, ci, blockiseq, TRUE); + calling.recv = GET_SELF(); + vm_search_super_method(ec, GET_CFP(), &calling, ci, cc); + CALL_METHOD(&calling, ci, cc); + +#line 1524 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_invokesuper +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(invokesuper);}}} +INSN_ENTRY(invokeblock){START_OF_ORIGINAL_INSN(invokeblock); +{ + VALUE val; + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + + DEBUG_ENTER_INSN("invokeblock"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_invokeblock 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_invokeblock_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(invokeblock)); + COLLECT_USAGE_OPERAND(BIN(invokeblock), 0, ci); +{ +#line 949 "insns.def" + struct rb_calling_info calling; + calling.argc = ci->orig_argc; + calling.block_handler = VM_BLOCK_HANDLER_NONE; + calling.recv = GET_SELF(); + + val = vm_invoke_block(ec, GET_CFP(), &calling, ci); + if (val == Qundef) { + RESTORE_REGS(); + NEXT_INSN(); + } + +#line 1559 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_invokeblock +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(invokeblock);}}} +INSN_ENTRY(leave){START_OF_ORIGINAL_INSN(leave); +{ + + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("leave"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_leave 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_leave_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(leave)); +{ +#line 972 "insns.def" + if (OPT_CHECKED_RUN) { + const VALUE *const bp = vm_base_ptr(reg_cfp); + if (reg_cfp->sp != bp) { + vm_stack_consistency_error(ec, reg_cfp, bp); + } + } + + RUBY_VM_CHECK_INTS(ec); + + if (vm_pop_frame(ec, GET_CFP(), GET_EP())) { +#if OPT_CALL_THREADED_CODE + rb_ec_thread_ptr(ec)->retval = val; + return 0; +#else + return val; +#endif + } + else { + RESTORE_REGS(); + } + +#line 1603 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_leave +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(leave);}}} +INSN_ENTRY(throw){START_OF_ORIGINAL_INSN(throw); +{ + VALUE val; + rb_num_t throw_state = (rb_num_t)GET_OPERAND(1); + VALUE throwobj = TOPN(0); + DEBUG_ENTER_INSN("throw"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_throw 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_throw_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(throw)); + COLLECT_USAGE_OPERAND(BIN(throw), 0, throw_state); +{ +#line 1009 "insns.def" + RUBY_VM_CHECK_INTS(ec); + val = vm_throw(ec, GET_CFP(), throw_state, throwobj); + THROW_EXCEPTION(val); + /* unreachable */ + +#line 1633 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_throw +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(throw);}}} +INSN_ENTRY(jump){START_OF_ORIGINAL_INSN(jump); +{ + OFFSET dst = (OFFSET)GET_OPERAND(1); + + DEBUG_ENTER_INSN("jump"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_jump 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_jump_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(jump)); + COLLECT_USAGE_OPERAND(BIN(jump), 0, dst); +{ +#line 1030 "insns.def" + RUBY_VM_CHECK_INTS(ec); + JUMP(dst); + +#line 1659 "vm.inc" +#undef CURRENT_INSN_jump +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(jump);}}} +INSN_ENTRY(branchif){START_OF_ORIGINAL_INSN(branchif); +{ + OFFSET dst = (OFFSET)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("branchif"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_branchif 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_branchif_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(branchif)); + COLLECT_USAGE_OPERAND(BIN(branchif), 0, dst); +{ +#line 1045 "insns.def" + if (RTEST(val)) { + RUBY_VM_CHECK_INTS(ec); + JUMP(dst); + } + +#line 1686 "vm.inc" +#undef CURRENT_INSN_branchif +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(branchif);}}} +INSN_ENTRY(branchunless){START_OF_ORIGINAL_INSN(branchunless); +{ + OFFSET dst = (OFFSET)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("branchunless"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_branchunless 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_branchunless_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(branchunless)); + COLLECT_USAGE_OPERAND(BIN(branchunless), 0, dst); +{ +#line 1062 "insns.def" + if (!RTEST(val)) { + RUBY_VM_CHECK_INTS(ec); + JUMP(dst); + } + +#line 1713 "vm.inc" +#undef CURRENT_INSN_branchunless +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(branchunless);}}} +INSN_ENTRY(branchnil){START_OF_ORIGINAL_INSN(branchnil); +{ + OFFSET dst = (OFFSET)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("branchnil"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_branchnil 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_branchnil_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(branchnil)); + COLLECT_USAGE_OPERAND(BIN(branchnil), 0, dst); +{ +#line 1079 "insns.def" + if (NIL_P(val)) { + RUBY_VM_CHECK_INTS(ec); + JUMP(dst); + } + +#line 1740 "vm.inc" +#undef CURRENT_INSN_branchnil +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(branchnil);}}} +INSN_ENTRY(branchiftype){START_OF_ORIGINAL_INSN(branchiftype); +{ + OFFSET dst = (OFFSET)GET_OPERAND(2); + rb_num_t type = (rb_num_t)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("branchiftype"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_branchiftype 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_branchiftype_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(branchiftype)); + COLLECT_USAGE_OPERAND(BIN(branchiftype), 0, type); + COLLECT_USAGE_OPERAND(BIN(branchiftype), 1, dst); +{ +#line 1096 "insns.def" + if (TYPE(val) == (int)type) { + RUBY_VM_CHECK_INTS(ec); + JUMP(dst); + } + +#line 1769 "vm.inc" +#undef CURRENT_INSN_branchiftype +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(branchiftype);}}} +INSN_ENTRY(getinlinecache){START_OF_ORIGINAL_INSN(getinlinecache); +{ + VALUE val; + IC ic = (IC)GET_OPERAND(2); + OFFSET dst = (OFFSET)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getinlinecache"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getinlinecache 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getinlinecache_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getinlinecache)); + COLLECT_USAGE_OPERAND(BIN(getinlinecache), 0, dst); + COLLECT_USAGE_OPERAND(BIN(getinlinecache), 1, ic); +{ +#line 1118 "insns.def" + val = vm_ic_hit_p(ic, GET_EP()); + if (val != Qnil) { + JUMP(dst); + } + +#line 1798 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_getinlinecache +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getinlinecache);}}} +INSN_ENTRY(setinlinecache){START_OF_ORIGINAL_INSN(setinlinecache); +{ + IC ic = (IC)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setinlinecache"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setinlinecache 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setinlinecache_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setinlinecache)); + COLLECT_USAGE_OPERAND(BIN(setinlinecache), 0, ic); +{ +#line 1135 "insns.def" + vm_ic_update(ic, val, GET_EP()); + +#line 1824 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_setinlinecache +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setinlinecache);}}} +INSN_ENTRY(once){START_OF_ORIGINAL_INSN(once); +{ + VALUE val; + IC ic = (IC)GET_OPERAND(2); + ISEQ iseq = (ISEQ)GET_OPERAND(1); + + DEBUG_ENTER_INSN("once"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + #define CURRENT_INSN_once 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_once_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(once)); + COLLECT_USAGE_OPERAND(BIN(once), 0, iseq); + COLLECT_USAGE_OPERAND(BIN(once), 1, ic); +{ +#line 1149 "insns.def" + val = vm_once_dispatch(ec, iseq, ic); + +#line 1852 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_once +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(once);}}} +INSN_ENTRY(opt_case_dispatch){START_OF_ORIGINAL_INSN(opt_case_dispatch); +{ + OFFSET else_offset = (OFFSET)GET_OPERAND(2); + CDHASH hash = (CDHASH)GET_OPERAND(1); + VALUE key = TOPN(0); + DEBUG_ENTER_INSN("opt_case_dispatch"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_case_dispatch 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_case_dispatch_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_case_dispatch)); + COLLECT_USAGE_OPERAND(BIN(opt_case_dispatch), 0, hash); + COLLECT_USAGE_OPERAND(BIN(opt_case_dispatch), 1, else_offset); +{ +#line 1163 "insns.def" + OFFSET dst = vm_case_dispatch(hash, else_offset, key); + + if (dst) { + JUMP(dst); + } + +#line 1884 "vm.inc" +#undef CURRENT_INSN_opt_case_dispatch +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_case_dispatch);}}} +INSN_ENTRY(opt_plus){START_OF_ORIGINAL_INSN(opt_plus); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_plus"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_plus 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_plus_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_plus)); + COLLECT_USAGE_OPERAND(BIN(opt_plus), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_plus), 1, cc); +{ +#line 1183 "insns.def" + val = vm_opt_plus(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 1919 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_plus +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_plus);}}} +INSN_ENTRY(opt_minus){START_OF_ORIGINAL_INSN(opt_minus); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_minus"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_minus 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_minus_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_minus)); + COLLECT_USAGE_OPERAND(BIN(opt_minus), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_minus), 1, cc); +{ +#line 1204 "insns.def" + val = vm_opt_minus(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 1956 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_minus +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_minus);}}} +INSN_ENTRY(opt_mult){START_OF_ORIGINAL_INSN(opt_mult); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_mult"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_mult 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_mult_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_mult)); + COLLECT_USAGE_OPERAND(BIN(opt_mult), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_mult), 1, cc); +{ +#line 1225 "insns.def" + val = vm_opt_mult(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 1993 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_mult +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_mult);}}} +INSN_ENTRY(opt_div){START_OF_ORIGINAL_INSN(opt_div); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_div"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_div 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_div_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_div)); + COLLECT_USAGE_OPERAND(BIN(opt_div), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_div), 1, cc); +{ +#line 1246 "insns.def" + val = vm_opt_div(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2030 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_div +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_div);}}} +INSN_ENTRY(opt_mod){START_OF_ORIGINAL_INSN(opt_mod); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_mod"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_mod 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_mod_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_mod)); + COLLECT_USAGE_OPERAND(BIN(opt_mod), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_mod), 1, cc); +{ +#line 1267 "insns.def" + val = vm_opt_mod(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2067 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_mod +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_mod);}}} +INSN_ENTRY(opt_eq){START_OF_ORIGINAL_INSN(opt_eq); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_eq"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_eq 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_eq_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_eq)); + COLLECT_USAGE_OPERAND(BIN(opt_eq), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_eq), 1, cc); +{ +#line 1288 "insns.def" + val = opt_eq_func(recv, obj, ci, cc); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2104 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_eq +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_eq);}}} +INSN_ENTRY(opt_neq){START_OF_ORIGINAL_INSN(opt_neq); +{ + VALUE val; + CALL_CACHE cc_eq = (CALL_CACHE)GET_OPERAND(4); + CALL_INFO ci_eq = (CALL_INFO)GET_OPERAND(3); + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_neq"); + ADD_PC(1+4); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_neq 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_neq_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_neq)); + COLLECT_USAGE_OPERAND(BIN(opt_neq), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_neq), 1, cc); + COLLECT_USAGE_OPERAND(BIN(opt_neq), 2, ci_eq); + COLLECT_USAGE_OPERAND(BIN(opt_neq), 3, cc_eq); +{ +#line 1309 "insns.def" + val = vm_opt_neq(ci, cc, ci_eq, cc_eq, recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2145 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_neq +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_neq);}}} +INSN_ENTRY(opt_lt){START_OF_ORIGINAL_INSN(opt_lt); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_lt"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_lt 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_lt_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_lt)); + COLLECT_USAGE_OPERAND(BIN(opt_lt), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_lt), 1, cc); +{ +#line 1330 "insns.def" + val = vm_opt_lt(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2182 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_lt +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_lt);}}} +INSN_ENTRY(opt_le){START_OF_ORIGINAL_INSN(opt_le); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_le"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_le 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_le_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_le)); + COLLECT_USAGE_OPERAND(BIN(opt_le), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_le), 1, cc); +{ +#line 1351 "insns.def" + val = vm_opt_le(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2219 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_le +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_le);}}} +INSN_ENTRY(opt_gt){START_OF_ORIGINAL_INSN(opt_gt); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_gt"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_gt 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_gt_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_gt)); + COLLECT_USAGE_OPERAND(BIN(opt_gt), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_gt), 1, cc); +{ +#line 1372 "insns.def" + val = vm_opt_gt(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2256 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_gt +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_gt);}}} +INSN_ENTRY(opt_ge){START_OF_ORIGINAL_INSN(opt_ge); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_ge"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_ge 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_ge_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_ge)); + COLLECT_USAGE_OPERAND(BIN(opt_ge), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_ge), 1, cc); +{ +#line 1393 "insns.def" + val = vm_opt_ge(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2293 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_ge +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_ge);}}} +INSN_ENTRY(opt_ltlt){START_OF_ORIGINAL_INSN(opt_ltlt); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_ltlt"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_ltlt 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_ltlt_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_ltlt)); + COLLECT_USAGE_OPERAND(BIN(opt_ltlt), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_ltlt), 1, cc); +{ +#line 1414 "insns.def" + val = vm_opt_ltlt(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2330 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_ltlt +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_ltlt);}}} +INSN_ENTRY(opt_aref){START_OF_ORIGINAL_INSN(opt_aref); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_aref"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_aref 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_aref_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_aref)); + COLLECT_USAGE_OPERAND(BIN(opt_aref), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_aref), 1, cc); +{ +#line 1435 "insns.def" + val = vm_opt_aref(recv, obj); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + CALL_SIMPLE_METHOD(recv); + } + +#line 2367 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_aref +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_aref);}}} +INSN_ENTRY(opt_aset){START_OF_ORIGINAL_INSN(opt_aset); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(2); + VALUE obj = TOPN(1); + VALUE set = TOPN(0); + DEBUG_ENTER_INSN("opt_aset"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(3); + #define CURRENT_INSN_opt_aset 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_aset_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_aset)); + COLLECT_USAGE_OPERAND(BIN(opt_aset), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_aset), 1, cc); +{ +#line 1456 "insns.def" + val = vm_opt_aset(recv, obj, set); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(obj); + PUSH(set); + CALL_SIMPLE_METHOD(recv); + } + +#line 2406 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_aset +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_aset);}}} +INSN_ENTRY(opt_aset_with){START_OF_ORIGINAL_INSN(opt_aset_with); +{ + VALUE key = (VALUE)GET_OPERAND(3); + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("opt_aset_with"); + ADD_PC(1+3); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_aset_with 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_aset_with_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_aset_with)); + COLLECT_USAGE_OPERAND(BIN(opt_aset_with), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_aset_with), 1, cc); + COLLECT_USAGE_OPERAND(BIN(opt_aset_with), 2, key); +{ +#line 1478 "insns.def" + VALUE tmp = vm_opt_aset_with(recv, key, val); + + if (tmp != Qundef) { + val = tmp; + } + else { + /* other */ + PUSH(recv); + PUSH(rb_str_resurrect(key)); + PUSH(val); + CALL_SIMPLE_METHOD(recv); + } + +#line 2448 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_aset_with +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_aset_with);}}} +INSN_ENTRY(opt_aref_with){START_OF_ORIGINAL_INSN(opt_aref_with); +{ + VALUE val; + VALUE key = (VALUE)GET_OPERAND(3); + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_aref_with"); + ADD_PC(1+3); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_aref_with 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_aref_with_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_aref_with)); + COLLECT_USAGE_OPERAND(BIN(opt_aref_with), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_aref_with), 1, cc); + COLLECT_USAGE_OPERAND(BIN(opt_aref_with), 2, key); +{ +#line 1503 "insns.def" + val = vm_opt_aref_with(recv, key); + + if (val == Qundef) { + /* other */ + PUSH(recv); + PUSH(rb_str_resurrect(key)); + CALL_SIMPLE_METHOD(recv); + } + +#line 2486 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_aref_with +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_aref_with);}}} +INSN_ENTRY(opt_length){START_OF_ORIGINAL_INSN(opt_length); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_length"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_length 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_length_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_length)); + COLLECT_USAGE_OPERAND(BIN(opt_length), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_length), 1, cc); +{ +#line 1524 "insns.def" + val = vm_opt_length(recv, BOP_LENGTH); + + if (val == Qundef) { + /* other */ + PUSH(recv); + CALL_SIMPLE_METHOD(recv); + } + +#line 2521 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_length +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_length);}}} +INSN_ENTRY(opt_size){START_OF_ORIGINAL_INSN(opt_size); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_size"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_size 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_size_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_size)); + COLLECT_USAGE_OPERAND(BIN(opt_size), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_size), 1, cc); +{ +#line 1544 "insns.def" + val = vm_opt_length(recv, BOP_SIZE); + + if (val == Qundef) { + /* other */ + PUSH(recv); + CALL_SIMPLE_METHOD(recv); + } + +#line 2556 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_size +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_size);}}} +INSN_ENTRY(opt_empty_p){START_OF_ORIGINAL_INSN(opt_empty_p); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_empty_p"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_empty_p 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_empty_p_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_empty_p)); + COLLECT_USAGE_OPERAND(BIN(opt_empty_p), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_empty_p), 1, cc); +{ +#line 1564 "insns.def" + val = vm_opt_empty_p(recv); + + if (val == Qundef) { + /* other */ + PUSH(recv); + CALL_SIMPLE_METHOD(recv); + } + +#line 2591 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_empty_p +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_empty_p);}}} +INSN_ENTRY(opt_succ){START_OF_ORIGINAL_INSN(opt_succ); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_succ"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_succ 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_succ_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_succ)); + COLLECT_USAGE_OPERAND(BIN(opt_succ), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_succ), 1, cc); +{ +#line 1584 "insns.def" + val = vm_opt_succ(recv); + + if (val == Qundef) { + /* other */ + PUSH(recv); + CALL_SIMPLE_METHOD(recv); + } + +#line 2626 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_succ +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_succ);}}} +INSN_ENTRY(opt_not){START_OF_ORIGINAL_INSN(opt_not); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE recv = TOPN(0); + DEBUG_ENTER_INSN("opt_not"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_not 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_not_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_not)); + COLLECT_USAGE_OPERAND(BIN(opt_not), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_not), 1, cc); +{ +#line 1604 "insns.def" + val = vm_opt_not(ci, cc, recv); + + if (val == Qundef) { + /* other */ + PUSH(recv); + CALL_SIMPLE_METHOD(recv); + } + +#line 2661 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_not +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_not);}}} +INSN_ENTRY(opt_regexpmatch1){START_OF_ORIGINAL_INSN(opt_regexpmatch1); +{ + VALUE val; + VALUE recv = (VALUE)GET_OPERAND(1); + VALUE obj = TOPN(0); + DEBUG_ENTER_INSN("opt_regexpmatch1"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_opt_regexpmatch1 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_regexpmatch1_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_regexpmatch1)); + COLLECT_USAGE_OPERAND(BIN(opt_regexpmatch1), 0, recv); +{ +#line 1625 "insns.def" + val = vm_opt_regexpmatch1(recv, obj); + +#line 2688 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_regexpmatch1 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_regexpmatch1);}}} +INSN_ENTRY(opt_regexpmatch2){START_OF_ORIGINAL_INSN(opt_regexpmatch2); +{ + VALUE val; + CALL_CACHE cc = (CALL_CACHE)GET_OPERAND(2); + CALL_INFO ci = (CALL_INFO)GET_OPERAND(1); + VALUE obj2 = TOPN(1); + VALUE obj1 = TOPN(0); + DEBUG_ENTER_INSN("opt_regexpmatch2"); + ADD_PC(1+2); + PREFETCH(GET_PC()); + POPN(2); + #define CURRENT_INSN_opt_regexpmatch2 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_regexpmatch2_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_regexpmatch2)); + COLLECT_USAGE_OPERAND(BIN(opt_regexpmatch2), 0, ci); + COLLECT_USAGE_OPERAND(BIN(opt_regexpmatch2), 1, cc); +{ +#line 1639 "insns.def" + val = vm_opt_regexpmatch2(obj2, obj1); + + if (val == Qundef) { + /* other */ + PUSH(obj2); + PUSH(obj1); + CALL_SIMPLE_METHOD(obj2); + } + +#line 2725 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef CURRENT_INSN_opt_regexpmatch2 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_regexpmatch2);}}} +INSN_ENTRY(opt_call_c_function){START_OF_ORIGINAL_INSN(opt_call_c_function); +{ + rb_insn_func_t funcptr = (rb_insn_func_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("opt_call_c_function"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_opt_call_c_function 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_opt_call_c_function_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(opt_call_c_function)); + COLLECT_USAGE_OPERAND(BIN(opt_call_c_function), 0, funcptr); +{ +#line 1660 "insns.def" + reg_cfp = (funcptr)(ec, reg_cfp); + + if (reg_cfp == 0) { + VALUE err = ec->errinfo; + ec->errinfo = Qnil; + THROW_EXCEPTION(err); + } + + RESTORE_REGS(); + NEXT_INSN(); + +#line 2759 "vm.inc" +#undef CURRENT_INSN_opt_call_c_function +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(opt_call_c_function);}}} +INSN_ENTRY(bitblt){START_OF_ORIGINAL_INSN(bitblt); +{ + VALUE ret; + + + DEBUG_ENTER_INSN("bitblt"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_bitblt 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_bitblt_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(bitblt)); +{ +#line 1683 "insns.def" + ret = rb_str_new2("a bit of bacon, lettuce and tomato"); + +#line 2782 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(ret); +#undef CURRENT_INSN_bitblt +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(bitblt);}}} +INSN_ENTRY(answer){START_OF_ORIGINAL_INSN(answer); +{ + VALUE ret; + + + DEBUG_ENTER_INSN("answer"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_answer 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_answer_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(answer)); +{ +#line 1697 "insns.def" + ret = INT2FIX(42); + +#line 2807 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(ret); +#undef CURRENT_INSN_answer +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(answer);}}} +INSN_ENTRY(getlocal_OP__WC__0){START_OF_ORIGINAL_INSN(getlocal_OP__WC__0); +{ + VALUE val; + #define level 0 + lindex_t idx = (lindex_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getlocal_OP__WC__0"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getlocal_OP__WC__0 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getlocal_OP__WC__0_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getlocal_OP__WC__0)); + COLLECT_USAGE_OPERAND(BIN(getlocal_OP__WC__0), 0, idx); +{ +#line 60 "insns.def" + val = *(vm_get_ep(GET_EP(), level) - idx); + RB_DEBUG_COUNTER_INC(lvar_get); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_get_dynamic, level > 0); + +#line 2836 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef level +#undef CURRENT_INSN_getlocal_OP__WC__0 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getlocal_OP__WC__0);}}} +INSN_ENTRY(getlocal_OP__WC__1){START_OF_ORIGINAL_INSN(getlocal_OP__WC__1); +{ + VALUE val; + #define level 1 + lindex_t idx = (lindex_t)GET_OPERAND(1); + + DEBUG_ENTER_INSN("getlocal_OP__WC__1"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + #define CURRENT_INSN_getlocal_OP__WC__1 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_getlocal_OP__WC__1_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(getlocal_OP__WC__1)); + COLLECT_USAGE_OPERAND(BIN(getlocal_OP__WC__1), 0, idx); +{ +#line 60 "insns.def" + val = *(vm_get_ep(GET_EP(), level) - idx); + RB_DEBUG_COUNTER_INC(lvar_get); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_get_dynamic, level > 0); + +#line 2866 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef level +#undef CURRENT_INSN_getlocal_OP__WC__1 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(getlocal_OP__WC__1);}}} +INSN_ENTRY(setlocal_OP__WC__0){START_OF_ORIGINAL_INSN(setlocal_OP__WC__0); +{ + #define level 0 + lindex_t idx = (lindex_t)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setlocal_OP__WC__0"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setlocal_OP__WC__0 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setlocal_OP__WC__0_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setlocal_OP__WC__0)); + COLLECT_USAGE_OPERAND(BIN(setlocal_OP__WC__0), 0, idx); +{ +#line 78 "insns.def" + vm_env_write(vm_get_ep(GET_EP(), level), -(int)idx, val); + RB_DEBUG_COUNTER_INC(lvar_set); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_set_dynamic, level > 0); + +#line 2896 "vm.inc" +#undef level +#undef CURRENT_INSN_setlocal_OP__WC__0 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setlocal_OP__WC__0);}}} +INSN_ENTRY(setlocal_OP__WC__1){START_OF_ORIGINAL_INSN(setlocal_OP__WC__1); +{ + #define level 1 + lindex_t idx = (lindex_t)GET_OPERAND(1); + VALUE val = TOPN(0); + DEBUG_ENTER_INSN("setlocal_OP__WC__1"); + ADD_PC(1+1); + PREFETCH(GET_PC()); + POPN(1); + #define CURRENT_INSN_setlocal_OP__WC__1 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_setlocal_OP__WC__1_##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(setlocal_OP__WC__1)); + COLLECT_USAGE_OPERAND(BIN(setlocal_OP__WC__1), 0, idx); +{ +#line 78 "insns.def" + vm_env_write(vm_get_ep(GET_EP(), level), -(int)idx, val); + RB_DEBUG_COUNTER_INC(lvar_set); + (void)RB_DEBUG_COUNTER_INC_IF(lvar_set_dynamic, level > 0); + +#line 2924 "vm.inc" +#undef level +#undef CURRENT_INSN_setlocal_OP__WC__1 +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(setlocal_OP__WC__1);}}} +INSN_ENTRY(putobject_OP_INT2FIX_O_0_C_){START_OF_ORIGINAL_INSN(putobject_OP_INT2FIX_O_0_C_); +{ + #define val INT2FIX(0) + + + DEBUG_ENTER_INSN("putobject_OP_INT2FIX_O_0_C_"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putobject_OP_INT2FIX_O_0_C_ 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putobject_OP_INT2FIX_O_0_C__##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putobject_OP_INT2FIX_O_0_C_)); +{ +#line 334 "insns.def" + /* */ + +#line 2948 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef val +#undef CURRENT_INSN_putobject_OP_INT2FIX_O_0_C_ +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putobject_OP_INT2FIX_O_0_C_);}}} +INSN_ENTRY(putobject_OP_INT2FIX_O_1_C_){START_OF_ORIGINAL_INSN(putobject_OP_INT2FIX_O_1_C_); +{ + #define val INT2FIX(1) + + + DEBUG_ENTER_INSN("putobject_OP_INT2FIX_O_1_C_"); + ADD_PC(1+0); + PREFETCH(GET_PC()); + #define CURRENT_INSN_putobject_OP_INT2FIX_O_1_C_ 1 + #define INSN_IS_SC() 0 + #define INSN_LABEL(lab) LABEL_putobject_OP_INT2FIX_O_1_C__##lab + #define LABEL_IS_SC(lab) LABEL_##lab##_##t + COLLECT_USAGE_INSN(BIN(putobject_OP_INT2FIX_O_1_C_)); +{ +#line 334 "insns.def" + /* */ + +#line 2974 "vm.inc" + CHECK_VM_STACK_OVERFLOW_FOR_INSN(VM_REG_CFP, 1); + PUSH(val); +#undef val +#undef CURRENT_INSN_putobject_OP_INT2FIX_O_1_C_ +#undef INSN_IS_SC +#undef INSN_LABEL +#undef LABEL_IS_SC + END_INSN(putobject_OP_INT2FIX_O_1_C_);}}} +INSN_ENTRY(trace_nop){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(nop); + + END_INSN(trace_nop);}}} +INSN_ENTRY(trace_getlocal){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getlocal); + + END_INSN(trace_getlocal);}}} +INSN_ENTRY(trace_setlocal){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setlocal); + + END_INSN(trace_setlocal);}}} +INSN_ENTRY(trace_getblockparam){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getblockparam); + + END_INSN(trace_getblockparam);}}} +INSN_ENTRY(trace_setblockparam){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setblockparam); + + END_INSN(trace_setblockparam);}}} +INSN_ENTRY(trace_getspecial){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getspecial); + + END_INSN(trace_getspecial);}}} +INSN_ENTRY(trace_setspecial){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setspecial); + + END_INSN(trace_setspecial);}}} +INSN_ENTRY(trace_getinstancevariable){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getinstancevariable); + + END_INSN(trace_getinstancevariable);}}} +INSN_ENTRY(trace_setinstancevariable){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setinstancevariable); + + END_INSN(trace_setinstancevariable);}}} +INSN_ENTRY(trace_getclassvariable){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getclassvariable); + + END_INSN(trace_getclassvariable);}}} +INSN_ENTRY(trace_setclassvariable){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setclassvariable); + + END_INSN(trace_setclassvariable);}}} +INSN_ENTRY(trace_getconstant){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getconstant); + + END_INSN(trace_getconstant);}}} +INSN_ENTRY(trace_setconstant){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setconstant); + + END_INSN(trace_setconstant);}}} +INSN_ENTRY(trace_getglobal){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getglobal); + + END_INSN(trace_getglobal);}}} +INSN_ENTRY(trace_setglobal){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setglobal); + + END_INSN(trace_setglobal);}}} +INSN_ENTRY(trace_putnil){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putnil); + + END_INSN(trace_putnil);}}} +INSN_ENTRY(trace_putself){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putself); + + END_INSN(trace_putself);}}} +INSN_ENTRY(trace_putobject){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putobject); + + END_INSN(trace_putobject);}}} +INSN_ENTRY(trace_putspecialobject){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putspecialobject); + + END_INSN(trace_putspecialobject);}}} +INSN_ENTRY(trace_putiseq){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putiseq); + + END_INSN(trace_putiseq);}}} +INSN_ENTRY(trace_putstring){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putstring); + + END_INSN(trace_putstring);}}} +INSN_ENTRY(trace_concatstrings){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(concatstrings); + + END_INSN(trace_concatstrings);}}} +INSN_ENTRY(trace_tostring){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(tostring); + + END_INSN(trace_tostring);}}} +INSN_ENTRY(trace_freezestring){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(freezestring); + + END_INSN(trace_freezestring);}}} +INSN_ENTRY(trace_toregexp){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(toregexp); + + END_INSN(trace_toregexp);}}} +INSN_ENTRY(trace_intern){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(intern); + + END_INSN(trace_intern);}}} +INSN_ENTRY(trace_newarray){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(newarray); + + END_INSN(trace_newarray);}}} +INSN_ENTRY(trace_duparray){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(duparray); + + END_INSN(trace_duparray);}}} +INSN_ENTRY(trace_expandarray){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(expandarray); + + END_INSN(trace_expandarray);}}} +INSN_ENTRY(trace_concatarray){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(concatarray); + + END_INSN(trace_concatarray);}}} +INSN_ENTRY(trace_splatarray){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(splatarray); + + END_INSN(trace_splatarray);}}} +INSN_ENTRY(trace_newhash){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(newhash); + + END_INSN(trace_newhash);}}} +INSN_ENTRY(trace_newrange){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(newrange); + + END_INSN(trace_newrange);}}} +INSN_ENTRY(trace_pop){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(pop); + + END_INSN(trace_pop);}}} +INSN_ENTRY(trace_dup){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(dup); + + END_INSN(trace_dup);}}} +INSN_ENTRY(trace_dupn){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(dupn); + + END_INSN(trace_dupn);}}} +INSN_ENTRY(trace_swap){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(swap); + + END_INSN(trace_swap);}}} +INSN_ENTRY(trace_reverse){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(reverse); + + END_INSN(trace_reverse);}}} +INSN_ENTRY(trace_reput){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(reput); + + END_INSN(trace_reput);}}} +INSN_ENTRY(trace_topn){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(topn); + + END_INSN(trace_topn);}}} +INSN_ENTRY(trace_setn){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setn); + + END_INSN(trace_setn);}}} +INSN_ENTRY(trace_adjuststack){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(adjuststack); + + END_INSN(trace_adjuststack);}}} +INSN_ENTRY(trace_defined){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(defined); + + END_INSN(trace_defined);}}} +INSN_ENTRY(trace_checkmatch){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(checkmatch); + + END_INSN(trace_checkmatch);}}} +INSN_ENTRY(trace_checkkeyword){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(checkkeyword); + + END_INSN(trace_checkkeyword);}}} +INSN_ENTRY(trace_tracecoverage){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(tracecoverage); + + END_INSN(trace_tracecoverage);}}} +INSN_ENTRY(trace_defineclass){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(defineclass); + + END_INSN(trace_defineclass);}}} +INSN_ENTRY(trace_send){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(send); + + END_INSN(trace_send);}}} +INSN_ENTRY(trace_opt_str_freeze){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_str_freeze); + + END_INSN(trace_opt_str_freeze);}}} +INSN_ENTRY(trace_opt_str_uminus){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_str_uminus); + + END_INSN(trace_opt_str_uminus);}}} +INSN_ENTRY(trace_opt_newarray_max){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_newarray_max); + + END_INSN(trace_opt_newarray_max);}}} +INSN_ENTRY(trace_opt_newarray_min){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_newarray_min); + + END_INSN(trace_opt_newarray_min);}}} +INSN_ENTRY(trace_opt_send_without_block){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_send_without_block); + + END_INSN(trace_opt_send_without_block);}}} +INSN_ENTRY(trace_invokesuper){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(invokesuper); + + END_INSN(trace_invokesuper);}}} +INSN_ENTRY(trace_invokeblock){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(invokeblock); + + END_INSN(trace_invokeblock);}}} +INSN_ENTRY(trace_leave){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(leave); + + END_INSN(trace_leave);}}} +INSN_ENTRY(trace_throw){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(throw); + + END_INSN(trace_throw);}}} +INSN_ENTRY(trace_jump){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(jump); + + END_INSN(trace_jump);}}} +INSN_ENTRY(trace_branchif){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(branchif); + + END_INSN(trace_branchif);}}} +INSN_ENTRY(trace_branchunless){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(branchunless); + + END_INSN(trace_branchunless);}}} +INSN_ENTRY(trace_branchnil){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(branchnil); + + END_INSN(trace_branchnil);}}} +INSN_ENTRY(trace_branchiftype){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(branchiftype); + + END_INSN(trace_branchiftype);}}} +INSN_ENTRY(trace_getinlinecache){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getinlinecache); + + END_INSN(trace_getinlinecache);}}} +INSN_ENTRY(trace_setinlinecache){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setinlinecache); + + END_INSN(trace_setinlinecache);}}} +INSN_ENTRY(trace_once){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(once); + + END_INSN(trace_once);}}} +INSN_ENTRY(trace_opt_case_dispatch){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_case_dispatch); + + END_INSN(trace_opt_case_dispatch);}}} +INSN_ENTRY(trace_opt_plus){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_plus); + + END_INSN(trace_opt_plus);}}} +INSN_ENTRY(trace_opt_minus){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_minus); + + END_INSN(trace_opt_minus);}}} +INSN_ENTRY(trace_opt_mult){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_mult); + + END_INSN(trace_opt_mult);}}} +INSN_ENTRY(trace_opt_div){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_div); + + END_INSN(trace_opt_div);}}} +INSN_ENTRY(trace_opt_mod){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_mod); + + END_INSN(trace_opt_mod);}}} +INSN_ENTRY(trace_opt_eq){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_eq); + + END_INSN(trace_opt_eq);}}} +INSN_ENTRY(trace_opt_neq){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_neq); + + END_INSN(trace_opt_neq);}}} +INSN_ENTRY(trace_opt_lt){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_lt); + + END_INSN(trace_opt_lt);}}} +INSN_ENTRY(trace_opt_le){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_le); + + END_INSN(trace_opt_le);}}} +INSN_ENTRY(trace_opt_gt){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_gt); + + END_INSN(trace_opt_gt);}}} +INSN_ENTRY(trace_opt_ge){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_ge); + + END_INSN(trace_opt_ge);}}} +INSN_ENTRY(trace_opt_ltlt){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_ltlt); + + END_INSN(trace_opt_ltlt);}}} +INSN_ENTRY(trace_opt_aref){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_aref); + + END_INSN(trace_opt_aref);}}} +INSN_ENTRY(trace_opt_aset){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_aset); + + END_INSN(trace_opt_aset);}}} +INSN_ENTRY(trace_opt_aset_with){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_aset_with); + + END_INSN(trace_opt_aset_with);}}} +INSN_ENTRY(trace_opt_aref_with){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_aref_with); + + END_INSN(trace_opt_aref_with);}}} +INSN_ENTRY(trace_opt_length){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_length); + + END_INSN(trace_opt_length);}}} +INSN_ENTRY(trace_opt_size){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_size); + + END_INSN(trace_opt_size);}}} +INSN_ENTRY(trace_opt_empty_p){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_empty_p); + + END_INSN(trace_opt_empty_p);}}} +INSN_ENTRY(trace_opt_succ){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_succ); + + END_INSN(trace_opt_succ);}}} +INSN_ENTRY(trace_opt_not){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_not); + + END_INSN(trace_opt_not);}}} +INSN_ENTRY(trace_opt_regexpmatch1){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_regexpmatch1); + + END_INSN(trace_opt_regexpmatch1);}}} +INSN_ENTRY(trace_opt_regexpmatch2){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_regexpmatch2); + + END_INSN(trace_opt_regexpmatch2);}}} +INSN_ENTRY(trace_opt_call_c_function){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(opt_call_c_function); + + END_INSN(trace_opt_call_c_function);}}} +INSN_ENTRY(trace_bitblt){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(bitblt); + + END_INSN(trace_bitblt);}}} +INSN_ENTRY(trace_answer){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(answer); + + END_INSN(trace_answer);}}} +INSN_ENTRY(trace_getlocal_OP__WC__0){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getlocal_OP__WC__0); + + END_INSN(trace_getlocal_OP__WC__0);}}} +INSN_ENTRY(trace_getlocal_OP__WC__1){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(getlocal_OP__WC__1); + + END_INSN(trace_getlocal_OP__WC__1);}}} +INSN_ENTRY(trace_setlocal_OP__WC__0){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setlocal_OP__WC__0); + + END_INSN(trace_setlocal_OP__WC__0);}}} +INSN_ENTRY(trace_setlocal_OP__WC__1){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(setlocal_OP__WC__1); + + END_INSN(trace_setlocal_OP__WC__1);}}} +INSN_ENTRY(trace_putobject_OP_INT2FIX_O_0_C_){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putobject_OP_INT2FIX_O_0_C_); + + END_INSN(trace_putobject_OP_INT2FIX_O_0_C_);}}} +INSN_ENTRY(trace_putobject_OP_INT2FIX_O_1_C_){ +{ +{ + vm_trace(ec, GET_CFP(), GET_PC()); + DISPATCH_ORIGINAL_INSN(putobject_OP_INT2FIX_O_1_C_); + + END_INSN(trace_putobject_OP_INT2FIX_O_1_C_);}}} diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_call_iseq_optimized.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_call_iseq_optimized.inc new file mode 100644 index 0000000..6f97ca7 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_call_iseq_optimized.inc @@ -0,0 +1,213 @@ +/* -*- c -*- */ +#if 1 /* enable or disable this optimization */ + +/* DO NOT EDIT THIS FILE DIRECTLY + * + * This file is generated by tool/mk_call_iseq_optimized.rb + */ + +static VALUE +vm_call_iseq_setup_normal_0start_0params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 0); +} + +static VALUE +vm_call_iseq_setup_normal_0start_0params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 1); +} + +static VALUE +vm_call_iseq_setup_normal_0start_0params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 2); +} + +static VALUE +vm_call_iseq_setup_normal_0start_0params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 3); +} + +static VALUE +vm_call_iseq_setup_normal_0start_0params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 4); +} + +static VALUE +vm_call_iseq_setup_normal_0start_0params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 5); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 0); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 1); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 2); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 3); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 4); +} + +static VALUE +vm_call_iseq_setup_normal_0start_1params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 5); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 0); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 1); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 2); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 3); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 4); +} + +static VALUE +vm_call_iseq_setup_normal_0start_2params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 5); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 0); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 1); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 2); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 3); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 4); +} + +static VALUE +vm_call_iseq_setup_normal_0start_3params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 5); +} + +/* vm_call_iseq_handlers[param][local] */ +static const vm_call_handler vm_call_iseq_handlers[][6] = { +{vm_call_iseq_setup_normal_0start_0params_0locals, + vm_call_iseq_setup_normal_0start_0params_1locals, + vm_call_iseq_setup_normal_0start_0params_2locals, + vm_call_iseq_setup_normal_0start_0params_3locals, + vm_call_iseq_setup_normal_0start_0params_4locals, + vm_call_iseq_setup_normal_0start_0params_5locals}, +{vm_call_iseq_setup_normal_0start_1params_0locals, + vm_call_iseq_setup_normal_0start_1params_1locals, + vm_call_iseq_setup_normal_0start_1params_2locals, + vm_call_iseq_setup_normal_0start_1params_3locals, + vm_call_iseq_setup_normal_0start_1params_4locals, + vm_call_iseq_setup_normal_0start_1params_5locals}, +{vm_call_iseq_setup_normal_0start_2params_0locals, + vm_call_iseq_setup_normal_0start_2params_1locals, + vm_call_iseq_setup_normal_0start_2params_2locals, + vm_call_iseq_setup_normal_0start_2params_3locals, + vm_call_iseq_setup_normal_0start_2params_4locals, + vm_call_iseq_setup_normal_0start_2params_5locals}, +{vm_call_iseq_setup_normal_0start_3params_0locals, + vm_call_iseq_setup_normal_0start_3params_1locals, + vm_call_iseq_setup_normal_0start_3params_2locals, + vm_call_iseq_setup_normal_0start_3params_3locals, + vm_call_iseq_setup_normal_0start_3params_4locals, + vm_call_iseq_setup_normal_0start_3params_5locals} +}; + +static inline vm_call_handler +vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size) +{ + if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) { + return &vm_call_iseq_setup_tailcall_0start; + } + else if (0) { /* to disable optimize */ + return &vm_call_iseq_setup_normal_0start; + } + else { + if (param_size <= 3 && + local_size <= 5) { + VM_ASSERT(local_size >= 0); + return vm_call_iseq_handlers[param_size][local_size]; + } + return &vm_call_iseq_setup_normal_0start; + } +} + +#else + + +static inline vm_call_handler +vm_call_iseq_setup_func(const struct rb_call_info *ci, struct rb_call_cache *cc) +{ + if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) { + return &vm_call_iseq_setup_tailcall_0start; + } + else { + return &vm_call_iseq_setup_normal_0start; + } +} +#endif diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_core.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_core.h new file mode 100644 index 0000000..967b79d --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_core.h @@ -0,0 +1,1768 @@ +/********************************************************************** + + vm_core.h - + + $Author: nagachika $ + created at: 04/01/01 19:41:38 JST + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_VM_CORE_H +#define RUBY_VM_CORE_H + +/* + * Enable check mode. + * 1: enable local assertions. + */ +#ifndef VM_CHECK_MODE +#define VM_CHECK_MODE 0 +#endif + +/** + * VM Debug Level + * + * debug level: + * 0: no debug output + * 1: show instruction name + * 2: show stack frame when control stack frame is changed + * 3: show stack status + * 4: show register + * 5: + * 10: gc check + */ + +#ifndef VMDEBUG +#define VMDEBUG 0 +#endif + +#if 0 +#undef VMDEBUG +#define VMDEBUG 3 +#endif + +#include "ruby_assert.h" + +#if VM_CHECK_MODE > 0 +#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr) + +#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable") + +#else +#define VM_ASSERT(expr) ((void)0) +#define VM_UNREACHABLE(func) UNREACHABLE +#endif + +#define RUBY_VM_THREAD_MODEL 2 + +#include "ruby/ruby.h" +#include "ruby/st.h" + +#include "node.h" +#include "vm_debug.h" +#include "vm_opts.h" +#include "id.h" +#include "method.h" +#include "ruby_atomic.h" +#include "ccan/list/list.h" + +#include "ruby/thread_native.h" +#if defined(_WIN32) +#include "thread_win32.h" +#elif defined(HAVE_PTHREAD_H) +#include "thread_pthread.h" +#endif + +#include +#include + +#ifndef NSIG +# define NSIG (_SIGMAX + 1) /* For QNX */ +#endif + +#define RUBY_NSIG NSIG + +#ifdef HAVE_STDARG_PROTOTYPES +#include +#define va_init_list(a,b) va_start((a),(b)) +#else +#include +#define va_init_list(a,b) va_start((a)) +#endif + +#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__) +#define USE_SIGALTSTACK +#endif + +/*****************/ +/* configuration */ +/*****************/ + +/* gcc ver. check */ +#if defined(__GNUC__) && __GNUC__ >= 2 + +#if OPT_TOKEN_THREADED_CODE +#if OPT_DIRECT_THREADED_CODE +#undef OPT_DIRECT_THREADED_CODE +#endif +#endif + +#else /* defined(__GNUC__) && __GNUC__ >= 2 */ + +/* disable threaded code options */ +#if OPT_DIRECT_THREADED_CODE +#undef OPT_DIRECT_THREADED_CODE +#endif +#if OPT_TOKEN_THREADED_CODE +#undef OPT_TOKEN_THREADED_CODE +#endif +#endif + +/* call threaded code */ +#if OPT_CALL_THREADED_CODE +#if OPT_DIRECT_THREADED_CODE +#undef OPT_DIRECT_THREADED_CODE +#endif /* OPT_DIRECT_THREADED_CODE */ +#if OPT_STACK_CACHING +#undef OPT_STACK_CACHING +#endif /* OPT_STACK_CACHING */ +#endif /* OPT_CALL_THREADED_CODE */ + +typedef unsigned long rb_num_t; + +enum ruby_tag_type { + RUBY_TAG_NONE = 0x0, + RUBY_TAG_RETURN = 0x1, + RUBY_TAG_BREAK = 0x2, + RUBY_TAG_NEXT = 0x3, + RUBY_TAG_RETRY = 0x4, + RUBY_TAG_REDO = 0x5, + RUBY_TAG_RAISE = 0x6, + RUBY_TAG_THROW = 0x7, + RUBY_TAG_FATAL = 0x8, + RUBY_TAG_MASK = 0xf +}; + +#define TAG_NONE RUBY_TAG_NONE +#define TAG_RETURN RUBY_TAG_RETURN +#define TAG_BREAK RUBY_TAG_BREAK +#define TAG_NEXT RUBY_TAG_NEXT +#define TAG_RETRY RUBY_TAG_RETRY +#define TAG_REDO RUBY_TAG_REDO +#define TAG_RAISE RUBY_TAG_RAISE +#define TAG_THROW RUBY_TAG_THROW +#define TAG_FATAL RUBY_TAG_FATAL +#define TAG_MASK RUBY_TAG_MASK + +enum ruby_vm_throw_flags { + VM_THROW_NO_ESCAPE_FLAG = 0x8000, + VM_THROW_LEVEL_SHIFT = 16, + VM_THROW_STATE_MASK = 0xff +}; + +/* forward declarations */ +struct rb_thread_struct; +struct rb_control_frame_struct; + +/* iseq data type */ +typedef struct rb_compile_option_struct rb_compile_option_t; + +struct iseq_inline_cache_entry { + rb_serial_t ic_serial; + const rb_cref_t *ic_cref; + union { + size_t index; + VALUE value; + } ic_value; +}; + +union iseq_inline_storage_entry { + struct { + struct rb_thread_struct *running_thread; + VALUE value; + } once; + struct iseq_inline_cache_entry cache; +}; + +enum method_missing_reason { + MISSING_NOENTRY = 0x00, + MISSING_PRIVATE = 0x01, + MISSING_PROTECTED = 0x02, + MISSING_FCALL = 0x04, + MISSING_VCALL = 0x08, + MISSING_SUPER = 0x10, + MISSING_MISSING = 0x20, + MISSING_NONE = 0x40 +}; + +struct rb_call_info { + /* fixed at compile time */ + ID mid; + unsigned int flag; + int orig_argc; +}; + +struct rb_call_info_kw_arg { + int keyword_len; + VALUE keywords[1]; +}; + +struct rb_call_info_with_kwarg { + struct rb_call_info ci; + struct rb_call_info_kw_arg *kw_arg; +}; + +struct rb_calling_info { + VALUE block_handler; + VALUE recv; + int argc; +}; + +struct rb_call_cache; +struct rb_execution_context_struct; +typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc); + +struct rb_call_cache { + /* inline cache: keys */ + rb_serial_t method_state; + rb_serial_t class_serial; + + /* inline cache: values */ + const rb_callable_method_entry_t *me; + + vm_call_handler call; + + union { + unsigned int index; /* used by ivar */ + enum method_missing_reason method_missing_reason; /* used by method_missing */ + int inc_sp; /* used by cfunc */ + } aux; +}; + +#if 1 +#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj) +#else +#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj) +#endif +#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type)) + +typedef struct rb_iseq_location_struct { + VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */ + VALUE base_label; /* String */ + VALUE label; /* String */ + VALUE first_lineno; /* TODO: may be unsigned short */ + rb_code_range_t code_range; +} rb_iseq_location_t; + +#define PATHOBJ_PATH 0 +#define PATHOBJ_REALPATH 1 + +static inline VALUE +pathobj_path(VALUE pathobj) +{ + if (RB_TYPE_P(pathobj, T_STRING)) { + return pathobj; + } + else { + VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY)); + return RARRAY_AREF(pathobj, PATHOBJ_PATH); + } +} + +static inline VALUE +pathobj_realpath(VALUE pathobj) +{ + if (RB_TYPE_P(pathobj, T_STRING)) { + return pathobj; + } + else { + VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY)); + return RARRAY_AREF(pathobj, PATHOBJ_REALPATH); + } +} + +struct rb_iseq_constant_body { + enum iseq_type { + ISEQ_TYPE_TOP, + ISEQ_TYPE_METHOD, + ISEQ_TYPE_BLOCK, + ISEQ_TYPE_CLASS, + ISEQ_TYPE_RESCUE, + ISEQ_TYPE_ENSURE, + ISEQ_TYPE_EVAL, + ISEQ_TYPE_MAIN, + ISEQ_TYPE_DEFINED_GUARD + } type; /* instruction sequence type */ + + unsigned int iseq_size; + const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */ + + /** + * parameter information + * + * def m(a1, a2, ..., aM, # mandatory + * b1=(...), b2=(...), ..., bN=(...), # optional + * *c, # rest + * d1, d2, ..., dO, # post + * e1:(...), e2:(...), ..., eK:(...), # keyword + * **f, # keyword_rest + * &g) # block + * => + * + * lead_num = M + * opt_num = N + * rest_start = M+N + * post_start = M+N+(*1) + * post_num = O + * keyword_num = K + * block_start = M+N+(*1)+O+K + * keyword_bits = M+N+(*1)+O+K+(&1) + * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size. + */ + + struct { + struct { + unsigned int has_lead : 1; + unsigned int has_opt : 1; + unsigned int has_rest : 1; + unsigned int has_post : 1; + unsigned int has_kw : 1; + unsigned int has_kwrest : 1; + unsigned int has_block : 1; + + unsigned int ambiguous_param0 : 1; /* {|a|} */ + } flags; + + unsigned int size; + + int lead_num; + int opt_num; + int rest_start; + int post_start; + int post_num; + int block_start; + + const VALUE *opt_table; /* (opt_num + 1) entries. */ + /* opt_num and opt_table: + * + * def foo o1=e1, o2=e2, ..., oN=eN + * #=> + * # prologue code + * A1: e1 + * A2: e2 + * ... + * AN: eN + * AL: body + * opt_num = N + * opt_table = [A1, A2, ..., AN, AL] + */ + + const struct rb_iseq_param_keyword { + int num; + int required_num; + int bits_start; + int rest_start; + const ID *table; + const VALUE *default_values; + } *keyword; + } param; + + rb_iseq_location_t location; + + /* insn info, must be freed */ + const struct iseq_insn_info_entry *insns_info; + + const ID *local_table; /* must free */ + + /* catch table */ + const struct iseq_catch_table *catch_table; + + /* for child iseq */ + const struct rb_iseq_struct *parent_iseq; + struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */ + + union iseq_inline_storage_entry *is_entries; + struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size]; + * struct rb_call_info_with_kwarg cikw_entries[ci_kw_size]; + * So that: + * struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size]; + */ + struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */ + + VALUE mark_ary; /* Array: includes operands which should be GC marked */ + + unsigned int local_table_size; + unsigned int is_size; + unsigned int ci_size; + unsigned int ci_kw_size; + unsigned int insns_info_size; + unsigned int stack_max; /* for stack overflow check */ +}; + +/* T_IMEMO/iseq */ +/* typedef rb_iseq_t is in method.h */ +struct rb_iseq_struct { + VALUE flags; + VALUE reserved1; + struct rb_iseq_constant_body *body; + + union { /* 4, 5 words */ + struct iseq_compile_data *compile_data; /* used at compile time */ + + struct { + VALUE obj; + int index; + } loader; + + rb_event_flag_t trace_events; + } aux; +}; + +#ifndef USE_LAZY_LOAD +#define USE_LAZY_LOAD 0 +#endif + +#if USE_LAZY_LOAD +const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq); +#endif + +static inline const rb_iseq_t * +rb_iseq_check(const rb_iseq_t *iseq) +{ +#if USE_LAZY_LOAD + if (iseq->body == NULL) { + rb_iseq_complete((rb_iseq_t *)iseq); + } +#endif + return iseq; +} + +enum ruby_special_exceptions { + ruby_error_reenter, + ruby_error_nomemory, + ruby_error_sysstack, + ruby_error_stackfatal, + ruby_error_stream_closed, + ruby_special_error_count +}; + +enum ruby_basic_operators { + BOP_PLUS, + BOP_MINUS, + BOP_MULT, + BOP_DIV, + BOP_MOD, + BOP_EQ, + BOP_EQQ, + BOP_LT, + BOP_LE, + BOP_LTLT, + BOP_AREF, + BOP_ASET, + BOP_LENGTH, + BOP_SIZE, + BOP_EMPTY_P, + BOP_SUCC, + BOP_GT, + BOP_GE, + BOP_NOT, + BOP_NEQ, + BOP_MATCH, + BOP_FREEZE, + BOP_UMINUS, + BOP_MAX, + BOP_MIN, + + BOP_LAST_ +}; + +#define GetVMPtr(obj, ptr) \ + GetCoreDataFromValue((obj), rb_vm_t, (ptr)) + +struct rb_vm_struct; +typedef void rb_vm_at_exit_func(struct rb_vm_struct*); + +typedef struct rb_at_exit_list { + rb_vm_at_exit_func *func; + struct rb_at_exit_list *next; +} rb_at_exit_list; + +struct rb_objspace; +struct rb_objspace *rb_objspace_alloc(void); +void rb_objspace_free(struct rb_objspace *); + +typedef struct rb_hook_list_struct { + struct rb_event_hook_struct *hooks; + rb_event_flag_t events; + int need_clean; +} rb_hook_list_t; + +typedef struct rb_vm_struct { + VALUE self; + + rb_global_vm_lock_t gvl; + rb_nativethread_lock_t thread_destruct_lock; + + struct rb_thread_struct *main_thread; + struct rb_thread_struct *running_thread; + + rb_serial_t fork_gen; + struct list_head waiting_fds; /* <=> struct waiting_fd */ + struct list_head living_threads; + size_t living_thread_num; + VALUE thgroup_default; + + unsigned int running: 1; + unsigned int thread_abort_on_exception: 1; + unsigned int thread_report_on_exception: 1; + int trace_running; + volatile int sleeper; + + /* object management */ + VALUE mark_object_ary; + const VALUE special_exceptions[ruby_special_error_count]; + + /* load */ + VALUE top_self; + VALUE load_path; + VALUE load_path_snapshot; + VALUE load_path_check_cache; + VALUE expanded_load_path; + VALUE loaded_features; + VALUE loaded_features_snapshot; + struct st_table *loaded_features_index; + struct st_table *loading_table; + + /* signal */ + struct { + VALUE cmd[RUBY_NSIG]; + unsigned char safe[RUBY_NSIG]; + } trap_list; + + /* hook */ + rb_hook_list_t event_hooks; + + /* relation table of ensure - rollback for callcc */ + struct st_table *ensure_rollback_table; + + /* postponed_job */ + struct rb_postponed_job_struct *postponed_job_buffer; + int postponed_job_index; + + int src_encoding_index; + + VALUE verbose, debug, orig_progname, progname; + VALUE coverages; + int coverage_mode; + + VALUE defined_module_hash; + + struct rb_objspace *objspace; + + rb_at_exit_list *at_exit; + + VALUE *defined_strings; + st_table *frozen_strings; + + /* params */ + struct { /* size in byte */ + size_t thread_vm_stack_size; + size_t thread_machine_stack_size; + size_t fiber_vm_stack_size; + size_t fiber_machine_stack_size; + } default_params; + + short redefined_flag[BOP_LAST_]; +} rb_vm_t; + +/* default values */ + +#define RUBY_VM_SIZE_ALIGN 4096 + +#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */ +#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */ +#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */ +#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */ + +#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */ +#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */ +#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */ +#if defined(__powerpc64__) +#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */ +#else +#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */ +#endif + +/* optimize insn */ +#define INTEGER_REDEFINED_OP_FLAG (1 << 0) +#define FLOAT_REDEFINED_OP_FLAG (1 << 1) +#define STRING_REDEFINED_OP_FLAG (1 << 2) +#define ARRAY_REDEFINED_OP_FLAG (1 << 3) +#define HASH_REDEFINED_OP_FLAG (1 << 4) +/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */ +#define SYMBOL_REDEFINED_OP_FLAG (1 << 6) +#define TIME_REDEFINED_OP_FLAG (1 << 7) +#define REGEXP_REDEFINED_OP_FLAG (1 << 8) +#define NIL_REDEFINED_OP_FLAG (1 << 9) +#define TRUE_REDEFINED_OP_FLAG (1 << 10) +#define FALSE_REDEFINED_OP_FLAG (1 << 11) + +#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0)) + +#ifndef VM_DEBUG_BP_CHECK +#define VM_DEBUG_BP_CHECK 0 +#endif + +#ifndef VM_DEBUG_VERIFY_METHOD_CACHE +#define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0) +#endif + +struct rb_captured_block { + VALUE self; + const VALUE *ep; + union { + const rb_iseq_t *iseq; + const struct vm_ifunc *ifunc; + VALUE val; + } code; +}; + +enum rb_block_handler_type { + block_handler_type_iseq, + block_handler_type_ifunc, + block_handler_type_symbol, + block_handler_type_proc +}; + +enum rb_block_type { + block_type_iseq, + block_type_ifunc, + block_type_symbol, + block_type_proc +}; + +struct rb_block { + union { + struct rb_captured_block captured; + VALUE symbol; + VALUE proc; + } as; + enum rb_block_type type; +}; + +typedef struct rb_control_frame_struct { + const VALUE *pc; /* cfp[0] */ + VALUE *sp; /* cfp[1] */ + const rb_iseq_t *iseq; /* cfp[2] */ + VALUE self; /* cfp[3] / block[0] */ + const VALUE *ep; /* cfp[4] / block[1] */ + const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */ + +#if VM_DEBUG_BP_CHECK + VALUE *bp_check; /* cfp[6] */ +#endif +} rb_control_frame_t; + +extern const rb_data_type_t ruby_threadptr_data_type; + +static inline struct rb_thread_struct * +rb_thread_ptr(VALUE thval) +{ + return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type); +} + +enum rb_thread_status { + THREAD_RUNNABLE, + THREAD_STOPPED, + THREAD_STOPPED_FOREVER, + THREAD_KILLED +}; + +typedef RUBY_JMP_BUF rb_jmpbuf_t; + +/* + the members which are written in EC_PUSH_TAG() should be placed at + the beginning and the end, so that entire region is accessible. +*/ +struct rb_vm_tag { + VALUE tag; + VALUE retval; + rb_jmpbuf_t buf; + struct rb_vm_tag *prev; + enum ruby_tag_type state; +}; + +STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0); +STATIC_ASSERT(rb_vm_tag_buf_end, + offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) < + sizeof(struct rb_vm_tag)); + +struct rb_vm_protect_tag { + struct rb_vm_protect_tag *prev; +}; + +struct rb_unblock_callback { + rb_unblock_function_t *func; + void *arg; +}; + +struct rb_mutex_struct; + +typedef struct rb_thread_list_struct{ + struct rb_thread_list_struct *next; + struct rb_thread_struct *th; +} rb_thread_list_t; + +typedef struct rb_ensure_entry { + VALUE marker; + VALUE (*e_proc)(ANYARGS); + VALUE data2; +} rb_ensure_entry_t; + +typedef struct rb_ensure_list { + struct rb_ensure_list *next; + struct rb_ensure_entry entry; +} rb_ensure_list_t; + +typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3]; + +typedef struct rb_fiber_struct rb_fiber_t; + +typedef struct rb_execution_context_struct { + /* execution information */ + VALUE *vm_stack; /* must free, must mark */ + size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */ + rb_control_frame_t *cfp; + + struct rb_vm_tag *tag; + struct rb_vm_protect_tag *protect_tag; + int safe_level; + int raised_flag; + + /* interrupt flags */ + rb_atomic_t interrupt_flag; + unsigned long interrupt_mask; + + rb_fiber_t *fiber_ptr; + struct rb_thread_struct *thread_ptr; + + /* storage (ec (fiber) local) */ + st_table *local_storage; + VALUE local_storage_recursive_hash; + VALUE local_storage_recursive_hash_for_trace; + + /* eval env */ + const VALUE *root_lep; + VALUE root_svar; + + /* ensure & callcc */ + rb_ensure_list_t *ensure_list; + + /* trace information */ + struct rb_trace_arg_struct *trace_arg; + + /* temporary places */ + VALUE errinfo; + VALUE passed_block_handler; /* for rb_iterate */ + const rb_callable_method_entry_t *passed_bmethod_me; /* for bmethod */ + enum method_missing_reason method_missing_reason; + + /* for GC */ + struct { + VALUE *stack_start; + VALUE *stack_end; + size_t stack_maxsize; +#ifdef __ia64 + VALUE *register_stack_start; + VALUE *register_stack_end; + size_t register_stack_maxsize; +#endif + jmp_buf regs; + } machine; +} rb_execution_context_t; + +void ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size); + +typedef struct rb_thread_struct { + struct list_node vmlt_node; + VALUE self; + rb_vm_t *vm; + + rb_execution_context_t *ec; + + VALUE last_status; /* $? */ + + /* for cfunc */ + struct rb_calling_info *calling; + + /* for load(true) */ + VALUE top_self; + VALUE top_wrapper; + + /* thread control */ + rb_nativethread_id_t thread_id; +#ifdef NON_SCALAR_THREAD_ID + rb_thread_id_string_t thread_id_string; +#endif + enum rb_thread_status status; + int to_kill; + int priority; + + native_thread_data_t native_thread_data; + void *blocking_region_buffer; + + VALUE thgroup; + VALUE value; + + /* temporary place of retval on OPT_CALL_THREADED_CODE */ +#if OPT_CALL_THREADED_CODE + VALUE retval; +#endif + + /* async errinfo queue */ + VALUE pending_interrupt_queue; + VALUE pending_interrupt_mask_stack; + int pending_interrupt_queue_checked; + + /* interrupt management */ + rb_nativethread_lock_t interrupt_lock; + struct rb_unblock_callback unblock; + VALUE locking_mutex; + struct rb_mutex_struct *keeping_mutexes; + + rb_thread_list_t *join_list; + + VALUE first_proc; + VALUE first_args; + VALUE (*first_func)(ANYARGS); + + /* statistics data for profiler */ + VALUE stat_insn_usage; + + /* fiber */ + rb_fiber_t *root_fiber; + rb_jmpbuf_t root_jmpbuf; + + /* misc */ + unsigned int abort_on_exception: 1; + unsigned int report_on_exception: 1; +#ifdef USE_SIGALTSTACK + void *altstack; +#endif + uint32_t running_time_us; /* 12500..800000 */ + VALUE name; +} rb_thread_t; + +typedef enum { + VM_DEFINECLASS_TYPE_CLASS = 0x00, + VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01, + VM_DEFINECLASS_TYPE_MODULE = 0x02, + /* 0x03..0x06 is reserved */ + VM_DEFINECLASS_TYPE_MASK = 0x07 +} rb_vm_defineclass_type_t; + +#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK) +#define VM_DEFINECLASS_FLAG_SCOPED 0x08 +#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10 +#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED) +#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \ + ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS) + +/* iseq.c */ +RUBY_SYMBOL_EXPORT_BEGIN + +/* node -> iseq */ +rb_iseq_t *rb_iseq_new (const NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type); +rb_iseq_t *rb_iseq_new_top (const NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent); +rb_iseq_t *rb_iseq_new_main (const NODE *node, VALUE path, VALUE realpath, const rb_iseq_t *parent); +rb_iseq_t *rb_iseq_new_with_opt(const NODE *node, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, + const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*); + +/* src -> iseq */ +rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line); +rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block); +rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, const struct rb_block *base_block, VALUE opt); + +VALUE rb_iseq_disasm(const rb_iseq_t *iseq); +int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child); +const char *ruby_node_name(int node); + +VALUE rb_iseq_coverage(const rb_iseq_t *iseq); + +RUBY_EXTERN VALUE rb_cISeq; +RUBY_EXTERN VALUE rb_cRubyVM; +RUBY_EXTERN VALUE rb_mRubyVMFrozenCore; +RUBY_SYMBOL_EXPORT_END + +#define GetProcPtr(obj, ptr) \ + GetCoreDataFromValue((obj), rb_proc_t, (ptr)) + +typedef struct { + const struct rb_block block; + int8_t safe_level; /* 0..1 */ + int8_t is_from_method; /* bool */ + int8_t is_lambda; /* bool */ +} rb_proc_t; + +typedef struct { + VALUE flags; /* imemo header */ + const rb_iseq_t *iseq; + const VALUE *ep; + const VALUE *env; + unsigned int env_size; +} rb_env_t; + +extern const rb_data_type_t ruby_binding_data_type; + +#define GetBindingPtr(obj, ptr) \ + GetCoreDataFromValue((obj), rb_binding_t, (ptr)) + +typedef struct { + const struct rb_block block; + const VALUE pathobj; + unsigned short first_lineno; +} rb_binding_t; + +/* used by compile time and send insn */ + +enum vm_check_match_type { + VM_CHECKMATCH_TYPE_WHEN = 1, + VM_CHECKMATCH_TYPE_CASE = 2, + VM_CHECKMATCH_TYPE_RESCUE = 3 +}; + +#define VM_CHECKMATCH_TYPE_MASK 0x03 +#define VM_CHECKMATCH_ARRAY 0x04 + +enum vm_call_flag_bits { + VM_CALL_ARGS_SPLAT_bit, /* m(*args) */ + VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */ + VM_CALL_ARGS_BLOCKARG_BLOCKPARAM_bit, /* m(&block) and block is a passed block parameter */ + VM_CALL_FCALL_bit, /* m(...) */ + VM_CALL_VCALL_bit, /* m */ + VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */ + VM_CALL_BLOCKISEQ_bit, /* has blockiseq */ + VM_CALL_KWARG_bit, /* has kwarg */ + VM_CALL_KW_SPLAT_bit, /* m(**opts) */ + VM_CALL_TAILCALL_bit, /* located at tail position */ + VM_CALL_SUPER_bit, /* super */ + VM_CALL_OPT_SEND_bit, /* internal flag */ + VM_CALL__END +}; + +#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit) +#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit) +#define VM_CALL_ARGS_BLOCKARG_BLOCKPARAM (0x01 << VM_CALL_ARGS_BLOCKARG_BLOCKPARAM_bit) +#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit) +#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit) +#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit) +#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit) +#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit) +#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit) +#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit) +#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit) +#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit) + +enum vm_special_object_type { + VM_SPECIAL_OBJECT_VMCORE = 1, + VM_SPECIAL_OBJECT_CBASE, + VM_SPECIAL_OBJECT_CONST_BASE +}; + +enum vm_svar_index { + VM_SVAR_LASTLINE = 0, /* $_ */ + VM_SVAR_BACKREF = 1, /* $~ */ + + VM_SVAR_EXTRA_START = 2, + VM_SVAR_FLIPFLOP_START = 2 /* flipflop */ +}; + +/* inline cache */ +typedef struct iseq_inline_cache_entry *IC; +typedef struct rb_call_info *CALL_INFO; +typedef struct rb_call_cache *CALL_CACHE; + +void rb_vm_change_state(void); + +typedef VALUE CDHASH; + +#ifndef FUNC_FASTCALL +#define FUNC_FASTCALL(x) x +#endif + +typedef rb_control_frame_t * + (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *); + +#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag)) +#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask)) + +#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01) +#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03) +#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01) + +enum { + /* Frame/Environment flag bits: + * MMMM MMMM MMMM MMMM ____ __FF FFFF EEEX (LSB) + * + * X : tag for GC marking (It seems as Fixnum) + * EEE : 3 bits Env flags + * FF..: 6 bits Frame flags + * MM..: 16 bits frame magic (to check frame corruption) + */ + + /* frame types */ + VM_FRAME_MAGIC_METHOD = 0x11110001, + VM_FRAME_MAGIC_BLOCK = 0x22220001, + VM_FRAME_MAGIC_CLASS = 0x33330001, + VM_FRAME_MAGIC_TOP = 0x44440001, + VM_FRAME_MAGIC_CFUNC = 0x55550001, + VM_FRAME_MAGIC_IFUNC = 0x66660001, + VM_FRAME_MAGIC_EVAL = 0x77770001, + VM_FRAME_MAGIC_RESCUE = 0x88880001, + VM_FRAME_MAGIC_DUMMY = 0x99990001, + + VM_FRAME_MAGIC_MASK = 0xffff0001, + + /* frame flag */ + VM_FRAME_FLAG_PASSED = 0x0010, + VM_FRAME_FLAG_FINISH = 0x0020, + VM_FRAME_FLAG_BMETHOD = 0x0040, + VM_FRAME_FLAG_CFRAME = 0x0080, + VM_FRAME_FLAG_LAMBDA = 0x0100, + VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200, + + /* env flag */ + VM_ENV_FLAG_LOCAL = 0x0002, + VM_ENV_FLAG_ESCAPED = 0x0004, + VM_ENV_FLAG_WB_REQUIRED = 0x0008 +}; + +#define VM_ENV_DATA_SIZE ( 3) + +#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */ +#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */ +#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */ +#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */ +#define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */ + +#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE) + +static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value); + +static inline void +VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag) +{ + VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS]; + VM_ASSERT(FIXNUM_P(flags)); + VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag); +} + +static inline void +VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag) +{ + VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS]; + VM_ASSERT(FIXNUM_P(flags)); + VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag); +} + +static inline unsigned long +VM_ENV_FLAGS(const VALUE *ep, long flag) +{ + VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS]; + VM_ASSERT(FIXNUM_P(flags)); + return flags & flag; +} + +static inline unsigned long +VM_FRAME_TYPE(const rb_control_frame_t *cfp) +{ + return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK); +} + +static inline int +VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp) +{ + return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0; +} + +static inline int +VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp) +{ + return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0; +} + +static inline int +VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp) +{ + return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0; +} + +static inline int +rb_obj_is_iseq(VALUE iseq) +{ + return imemo_type_p(iseq, imemo_iseq); +} + +#if VM_CHECK_MODE > 0 +#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq) +#endif + +static inline int +VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp) +{ + int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0; + VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p); + return cframe_p; +} + +static inline int +VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp) +{ + return !VM_FRAME_CFRAME_P(cfp); +} + +#define RUBYVM_CFUNC_FRAME_P(cfp) \ + (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC) + +#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep) +#define VM_BLOCK_HANDLER_NONE 0 + +static inline int +VM_ENV_LOCAL_P(const VALUE *ep) +{ + return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0; +} + +static inline const VALUE * +VM_ENV_PREV_EP(const VALUE *ep) +{ + VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0); + return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]); +} + +static inline VALUE +VM_ENV_BLOCK_HANDLER(const VALUE *ep) +{ + VM_ASSERT(VM_ENV_LOCAL_P(ep)); + return ep[VM_ENV_DATA_INDEX_SPECVAL]; +} + +#if VM_CHECK_MODE > 0 +int rb_vm_ep_in_heap_p(const VALUE *ep); +#endif + +static inline int +VM_ENV_ESCAPED_P(const VALUE *ep) +{ + VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)); + return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0; +} + +#if VM_CHECK_MODE > 0 +static inline int +vm_assert_env(VALUE obj) +{ + VM_ASSERT(imemo_type_p(obj, imemo_env)); + return 1; +} +#endif + +static inline VALUE +VM_ENV_ENVVAL(const VALUE *ep) +{ + VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; + VM_ASSERT(VM_ENV_ESCAPED_P(ep)); + VM_ASSERT(vm_assert_env(envval)); + return envval; +} + +static inline const rb_env_t * +VM_ENV_ENVVAL_PTR(const VALUE *ep) +{ + return (const rb_env_t *)VM_ENV_ENVVAL(ep); +} + +static inline VALUE +VM_ENV_PROCVAL(const VALUE *ep) +{ + VM_ASSERT(VM_ENV_ESCAPED_P(ep)); + VM_ASSERT(VM_ENV_LOCAL_P(ep)); + VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE); + + return ep[VM_ENV_DATA_INDEX_ENV_PROC]; +} + +static inline const rb_env_t * +vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq) +{ + rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq); + env->env_size = env_size; + env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env; + return env; +} + +static inline void +VM_FORCE_WRITE(const VALUE *ptr, VALUE v) +{ + *((VALUE *)ptr) = v; +} + +static inline void +VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value) +{ + VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value)); + VM_FORCE_WRITE(ptr, special_const_value); +} + +static inline void +VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v) +{ + VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0); + VM_FORCE_WRITE(&ep[index], v); +} + +const VALUE *rb_vm_ep_local_ep(const VALUE *ep); +const VALUE *rb_vm_proc_local_ep(VALUE proc); +void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep); +void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src); + +VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp); + +#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1) +#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1) + +#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \ + ((void *)(ecfp) > (void *)(cfp)) + +static inline const rb_control_frame_t * +RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec) +{ + return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size); +} + +static inline int +RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) +{ + return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec)); +} + +static inline int +VM_BH_ISEQ_BLOCK_P(VALUE block_handler) +{ + if ((block_handler & 0x03) == 0x01) { +#if VM_CHECK_MODE > 0 + struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03); + VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq)); +#endif + return 1; + } + else { + return 0; + } +} + +static inline VALUE +VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured) +{ + VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01); + VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler)); + return block_handler; +} + +static inline const struct rb_captured_block * +VM_BH_TO_ISEQ_BLOCK(VALUE block_handler) +{ + struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03); + VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler)); + return captured; +} + +static inline int +VM_BH_IFUNC_P(VALUE block_handler) +{ + if ((block_handler & 0x03) == 0x03) { +#if VM_CHECK_MODE > 0 + struct rb_captured_block *captured = (void *)(block_handler & ~0x03); + VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc)); +#endif + return 1; + } + else { + return 0; + } +} + +static inline VALUE +VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured) +{ + VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03); + VM_ASSERT(VM_BH_IFUNC_P(block_handler)); + return block_handler; +} + +static inline const struct rb_captured_block * +VM_BH_TO_IFUNC_BLOCK(VALUE block_handler) +{ + struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03); + VM_ASSERT(VM_BH_IFUNC_P(block_handler)); + return captured; +} + +static inline const struct rb_captured_block * +VM_BH_TO_CAPT_BLOCK(VALUE block_handler) +{ + struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03); + VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler)); + return captured; +} + +static inline enum rb_block_handler_type +vm_block_handler_type(VALUE block_handler) +{ + if (VM_BH_ISEQ_BLOCK_P(block_handler)) { + return block_handler_type_iseq; + } + else if (VM_BH_IFUNC_P(block_handler)) { + return block_handler_type_ifunc; + } + else if (SYMBOL_P(block_handler)) { + return block_handler_type_symbol; + } + else { + VM_ASSERT(rb_obj_is_proc(block_handler)); + return block_handler_type_proc; + } +} + +static inline void +vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler)) +{ + VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE || + (vm_block_handler_type(block_handler), 1)); +} + +static inline enum rb_block_type +vm_block_type(const struct rb_block *block) +{ +#if VM_CHECK_MODE > 0 + switch (block->type) { + case block_type_iseq: + VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq)); + break; + case block_type_ifunc: + VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc)); + break; + case block_type_symbol: + VM_ASSERT(SYMBOL_P(block->as.symbol)); + break; + case block_type_proc: + VM_ASSERT(rb_obj_is_proc(block->as.proc)); + break; + } +#endif + return block->type; +} + +static inline void +vm_block_type_set(const struct rb_block *block, enum rb_block_type type) +{ + struct rb_block *mb = (struct rb_block *)block; + mb->type = type; +} + +static inline const struct rb_block * +vm_proc_block(VALUE procval) +{ + VM_ASSERT(rb_obj_is_proc(procval)); + return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block; +} + +static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block); +static inline const VALUE *vm_block_ep(const struct rb_block *block); + +static inline const rb_iseq_t * +vm_proc_iseq(VALUE procval) +{ + return vm_block_iseq(vm_proc_block(procval)); +} + +static inline const VALUE * +vm_proc_ep(VALUE procval) +{ + return vm_block_ep(vm_proc_block(procval)); +} + +static inline const rb_iseq_t * +vm_block_iseq(const struct rb_block *block) +{ + switch (vm_block_type(block)) { + case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq); + case block_type_proc: return vm_proc_iseq(block->as.proc); + case block_type_ifunc: + case block_type_symbol: return NULL; + } + VM_UNREACHABLE(vm_block_iseq); + return NULL; +} + +static inline const VALUE * +vm_block_ep(const struct rb_block *block) +{ + switch (vm_block_type(block)) { + case block_type_iseq: + case block_type_ifunc: return block->as.captured.ep; + case block_type_proc: return vm_proc_ep(block->as.proc); + case block_type_symbol: return NULL; + } + VM_UNREACHABLE(vm_block_ep); + return NULL; +} + +static inline VALUE +vm_block_self(const struct rb_block *block) +{ + switch (vm_block_type(block)) { + case block_type_iseq: + case block_type_ifunc: + return block->as.captured.self; + case block_type_proc: + return vm_block_self(vm_proc_block(block->as.proc)); + case block_type_symbol: + return Qundef; + } + VM_UNREACHABLE(vm_block_self); + return Qundef; +} + +static inline VALUE +VM_BH_TO_SYMBOL(VALUE block_handler) +{ + VM_ASSERT(SYMBOL_P(block_handler)); + return block_handler; +} + +static inline VALUE +VM_BH_FROM_SYMBOL(VALUE symbol) +{ + VM_ASSERT(SYMBOL_P(symbol)); + return symbol; +} + +static inline VALUE +VM_BH_TO_PROC(VALUE block_handler) +{ + VM_ASSERT(rb_obj_is_proc(block_handler)); + return block_handler; +} + +static inline VALUE +VM_BH_FROM_PROC(VALUE procval) +{ + VM_ASSERT(rb_obj_is_proc(procval)); + return procval; +} + +/* VM related object allocate functions */ +VALUE rb_thread_alloc(VALUE klass); +VALUE rb_proc_alloc(VALUE klass); +VALUE rb_binding_alloc(VALUE klass); + +/* for debug */ +extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); +extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc); +extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); + +#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp) +#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp)) +void rb_vm_bugreport(const void *); +NORETURN(void rb_bug_context(const void *, const char *fmt, ...)); + +/* functions about thread/vm execution */ +RUBY_SYMBOL_EXPORT_BEGIN +VALUE rb_iseq_eval(const rb_iseq_t *iseq); +VALUE rb_iseq_eval_main(const rb_iseq_t *iseq); +VALUE rb_iseq_path(const rb_iseq_t *iseq); +VALUE rb_iseq_realpath(const rb_iseq_t *iseq); +RUBY_SYMBOL_EXPORT_END + +VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath); +void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath); + +int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp); +void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause); + +VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler); + +VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda); +static inline VALUE +rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass) +{ + return rb_vm_make_proc_lambda(ec, captured, klass, 0); +} + +static inline VALUE +rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass) +{ + return rb_vm_make_proc_lambda(ec, captured, klass, 1); +} + +VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp); +VALUE rb_vm_env_local_variables(const rb_env_t *env); +const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env); +const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars); +void rb_vm_inc_const_missing_count(void); +void rb_vm_gvl_destroy(rb_vm_t *vm); +VALUE rb_vm_call(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc, + const VALUE *argv, const rb_callable_method_entry_t *me); +void rb_vm_pop_frame(rb_execution_context_t *ec); + +void rb_thread_start_timer_thread(void); +void rb_thread_stop_timer_thread(void); +void rb_thread_reset_timer_thread(void); +void rb_thread_wakeup_timer_thread(void); + +static inline void +rb_vm_living_threads_init(rb_vm_t *vm) +{ + list_head_init(&vm->waiting_fds); + list_head_init(&vm->living_threads); + vm->living_thread_num = 0; +} + +static inline void +rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th) +{ + list_add_tail(&vm->living_threads, &th->vmlt_node); + vm->living_thread_num++; +} + +static inline void +rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th) +{ + list_del(&th->vmlt_node); + vm->living_thread_num--; +} + +typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE); +rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); +rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); +int rb_vm_get_sourceline(const rb_control_frame_t *); +VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method); +void rb_vm_stack_to_heap(rb_execution_context_t *ec); +void ruby_thread_init_stack(rb_thread_t *th); +int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp); +void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp); +VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler); + +void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg); + +#define rb_vm_register_special_exception(sp, e, m) \ + rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m))) + +void rb_gc_mark_machine_stack(const rb_execution_context_t *ec); + +void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr); + +const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp); + +#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack] + +#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */ +#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \ + (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \ + !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \ + ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp)) +#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \ + if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */ +#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \ + WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow() +#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \ + WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow() + +VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr); + +/* for thread */ + +#if RUBY_VM_THREAD_MODEL == 2 +RUBY_SYMBOL_EXPORT_BEGIN + +extern rb_vm_t *ruby_current_vm_ptr; +extern rb_execution_context_t *ruby_current_execution_context_ptr; +extern rb_event_flag_t ruby_vm_event_flags; +extern rb_event_flag_t ruby_vm_event_enabled_flags; + +RUBY_SYMBOL_EXPORT_END + +#define GET_VM() rb_current_vm() +#define GET_THREAD() rb_current_thread() +#define GET_EC() rb_current_execution_context() + +static inline rb_thread_t * +rb_ec_thread_ptr(const rb_execution_context_t *ec) +{ + return ec->thread_ptr; +} + +static inline rb_vm_t * +rb_ec_vm_ptr(const rb_execution_context_t *ec) +{ + const rb_thread_t *th = rb_ec_thread_ptr(ec); + if (th) { + return th->vm; + } + else { + return NULL; + } +} + +static inline rb_execution_context_t * +rb_current_execution_context(void) +{ + return ruby_current_execution_context_ptr; +} + +static inline rb_thread_t * +rb_current_thread(void) +{ + const rb_execution_context_t *ec = GET_EC(); + return rb_ec_thread_ptr(ec); +} + +static inline rb_vm_t * +rb_current_vm(void) +{ + VM_ASSERT(ruby_current_vm_ptr == NULL || + ruby_current_execution_context_ptr == NULL || + rb_ec_thread_ptr(GET_EC()) == NULL || + rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr); + return ruby_current_vm_ptr; +} + +#define rb_thread_set_current_raw(th) (void)(ruby_current_execution_context_ptr = (th)->ec) +#define rb_thread_set_current(th) do { \ + if ((th)->vm->running_thread != (th)) { \ + (th)->running_time_us = 0; \ + } \ + rb_thread_set_current_raw(th); \ + (th)->vm->running_thread = (th); \ +} while (0) + +#else +#error "unsupported thread model" +#endif + +enum { + TIMER_INTERRUPT_MASK = 0x01, + PENDING_INTERRUPT_MASK = 0x02, + POSTPONED_JOB_INTERRUPT_MASK = 0x04, + TRAP_INTERRUPT_MASK = 0x08 +}; + +#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK) +#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK) +#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK) +#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK) +#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \ + (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK)) +#define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask) + +VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt); +int rb_signal_buff_size(void); +void rb_signal_exec(rb_thread_t *th, int sig); +void rb_threadptr_check_signal(rb_thread_t *mth); +void rb_threadptr_signal_raise(rb_thread_t *th, int sig); +void rb_threadptr_signal_exit(rb_thread_t *th); +void rb_threadptr_execute_interrupts(rb_thread_t *, int); +void rb_threadptr_interrupt(rb_thread_t *th); +void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th); +void rb_threadptr_pending_interrupt_clear(rb_thread_t *th); +void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v); +void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo); +void rb_execution_context_mark(const rb_execution_context_t *ec); +void rb_fiber_close(rb_fiber_t *fib); +void Init_native_thread(rb_thread_t *th); + +#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec) +static inline void +rb_vm_check_ints(rb_execution_context_t *ec) +{ + VM_ASSERT(ec == GET_EC()); + if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) { + rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0); + } +} + +/* tracer */ +struct rb_trace_arg_struct { + rb_event_flag_t event; + rb_execution_context_t *ec; + const rb_control_frame_t *cfp; + VALUE self; + ID id; + ID called_id; + VALUE klass; + VALUE data; + + int klass_solved; + + /* calc from cfp */ + int lineno; + VALUE path; +}; + +void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, int pop_p); + +#define EXEC_EVENT_HOOK_ORIG(ec_, flag_, vm_flags_, self_, id_, called_id_, klass_, data_, pop_p_) do { \ + const rb_event_flag_t flag_arg_ = (flag_); \ + if (UNLIKELY(vm_flags_ & (flag_arg_))) { \ + /* defer evaluating the other arguments */ \ + rb_exec_event_hook_orig(ec_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \ + } \ +} while (0) + +static inline void +rb_exec_event_hook_orig(rb_execution_context_t *ec, const rb_event_flag_t flag, + VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p) +{ + struct rb_trace_arg_struct trace_arg; + + VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events == ruby_vm_event_flags); + VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events & flag); + + trace_arg.event = flag; + trace_arg.ec = ec; + trace_arg.cfp = ec->cfp; + trace_arg.self = self; + trace_arg.id = id; + trace_arg.called_id = called_id; + trace_arg.klass = klass; + trace_arg.data = data; + trace_arg.path = Qundef; + trace_arg.klass_solved = 0; + rb_exec_event_hooks(&trace_arg, pop_p); +} + +#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \ + EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 0) + +#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \ + EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 1) + +RUBY_SYMBOL_EXPORT_BEGIN + +int rb_thread_check_trap_pending(void); + +/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */ +#define RUBY_EVENT_COVERAGE_LINE 0x010000 +#define RUBY_EVENT_COVERAGE_BRANCH 0x020000 + +extern VALUE rb_get_coverages(void); +extern void rb_set_coverages(VALUE, int, VALUE); +extern void rb_reset_coverages(void); + +void rb_postponed_job_flush(rb_vm_t *vm); + +RUBY_SYMBOL_EXPORT_END + +#endif /* RUBY_VM_CORE_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_debug.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_debug.h new file mode 100644 index 0000000..ff0fcff --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_debug.h @@ -0,0 +1,37 @@ +/********************************************************************** + + vm_debug.h - YARV Debug function interface + + $Author: nobu $ + created at: 04/08/25 02:33:49 JST + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_DEBUG_H +#define RUBY_DEBUG_H + +#include "ruby/ruby.h" +#include "node.h" + +RUBY_SYMBOL_EXPORT_BEGIN + +#define dpv(h,v) ruby_debug_print_value(-1, 0, (h), (v)) +#define dp(v) ruby_debug_print_value(-1, 0, "", (v)) +#define dpi(i) ruby_debug_print_id(-1, 0, "", (i)) +#define dpn(n) ruby_debug_print_node(-1, 0, "", (n)) + +#define bp() ruby_debug_breakpoint() + +VALUE ruby_debug_print_value(int level, int debug_level, const char *header, VALUE v); +ID ruby_debug_print_id(int level, int debug_level, const char *header, ID id); +NODE *ruby_debug_print_node(int level, int debug_level, const char *header, const NODE *node); +int ruby_debug_print_indent(int level, int debug_level, int indent_level); +void ruby_debug_breakpoint(void); +void ruby_debug_gc_check_func(void); +void ruby_set_debug_option(const char *str); + +RUBY_SYMBOL_EXPORT_END + +#endif /* RUBY_DEBUG_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_exec.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_exec.h new file mode 100644 index 0000000..b535ec8 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_exec.h @@ -0,0 +1,192 @@ +/********************************************************************** + + vm.h - + + $Author: nagachika $ + created at: 04/01/01 16:56:59 JST + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_VM_EXEC_H +#define RUBY_VM_EXEC_H + +typedef long OFFSET; +typedef unsigned long lindex_t; +typedef VALUE GENTRY; +typedef rb_iseq_t *ISEQ; + +#ifdef __GCC__ +/* TODO: machine dependent prefetch instruction */ +#define PREFETCH(pc) +#else +#define PREFETCH(pc) +#endif + +#if VMDEBUG > 0 +#define debugs printf +#define DEBUG_ENTER_INSN(insn) \ + rb_vmdebug_debug_print_pre(ec, GET_CFP(), GET_PC()); + +#if OPT_STACK_CACHING +#define SC_REGS() , reg_a, reg_b +#else +#define SC_REGS() +#endif + +#define DEBUG_END_INSN() \ + rb_vmdebug_debug_print_post(ec, GET_CFP() SC_REGS()); + +#else + +#define debugs +#define DEBUG_ENTER_INSN(insn) +#define DEBUG_END_INSN() +#endif + +#define throwdebug if(0)printf +/* #define throwdebug printf */ + +/************************************************/ +#if defined(DISPATCH_XXX) +error ! +/************************************************/ +#elif OPT_CALL_THREADED_CODE + +#define LABEL(x) insn_func_##x +#define ELABEL(x) +#define LABEL_PTR(x) &LABEL(x) + +#define INSN_ENTRY(insn) \ + static rb_control_frame_t * \ + FUNC_FASTCALL(LABEL(insn))(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp) { + +#define END_INSN(insn) return reg_cfp;} + +#define NEXT_INSN() return reg_cfp; + +#define START_OF_ORIGINAL_INSN(x) /* ignore */ +#define DISPATCH_ORIGINAL_INSN(x) return LABEL(x)(ec, reg_cfp); + +/************************************************/ +#elif OPT_TOKEN_THREADED_CODE || OPT_DIRECT_THREADED_CODE +/* threaded code with gcc */ + +#define LABEL(x) INSN_LABEL_##x +#define ELABEL(x) INSN_ELABEL_##x +#define LABEL_PTR(x) &&LABEL(x) + +#define INSN_ENTRY_SIG(insn) \ + if (0) fprintf(stderr, "exec: %s@(%d, %d)@%s:%d\n", #insn, \ + (int)(reg_pc - reg_cfp->iseq->body->iseq_encoded), \ + (int)(reg_cfp->pc - reg_cfp->iseq->body->iseq_encoded), \ + RSTRING_PTR(rb_iseq_path(reg_cfp->iseq)), \ + (int)(rb_iseq_line_no(reg_cfp->iseq, reg_pc - reg_cfp->iseq->body->iseq_encoded))); + +#define INSN_DISPATCH_SIG(insn) + +#define INSN_ENTRY(insn) \ + LABEL(insn): \ + INSN_ENTRY_SIG(insn); \ + +/* dispatcher */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && __GNUC__ == 3 +#define DISPATCH_ARCH_DEPEND_WAY(addr) \ + __asm__ __volatile__("jmp *%0;\t# -- inserted by vm.h\t[length = 2]" : : "r" (addr)) + +#else +#define DISPATCH_ARCH_DEPEND_WAY(addr) \ + /* do nothing */ +#endif + +/**********************************/ +#if OPT_DIRECT_THREADED_CODE + +/* for GCC 3.4.x */ +#define TC_DISPATCH(insn) \ + INSN_DISPATCH_SIG(insn); \ + goto *(void const *)GET_CURRENT_INSN(); \ + ; + +#else +/* token threaded code */ + +#define TC_DISPATCH(insn) \ + DISPATCH_ARCH_DEPEND_WAY(insns_address_table[GET_CURRENT_INSN()]); \ + INSN_DISPATCH_SIG(insn); \ + goto *insns_address_table[GET_CURRENT_INSN()]; \ + rb_bug("tc error"); + + +#endif /* DISPATCH_DIRECT_THREADED_CODE */ + +#define END_INSN(insn) \ + DEBUG_END_INSN(); \ + TC_DISPATCH(insn); + +#define INSN_DISPATCH() \ + TC_DISPATCH(__START__) \ + { + +#define END_INSNS_DISPATCH() \ + rb_bug("unknown insn: %"PRIdVALUE, GET_CURRENT_INSN()); \ + } /* end of while loop */ \ + +#define NEXT_INSN() TC_DISPATCH(__NEXT_INSN__) + +#define START_OF_ORIGINAL_INSN(x) start_of_##x: +#define DISPATCH_ORIGINAL_INSN(x) goto start_of_##x; + +/************************************************/ +#else /* no threaded code */ +/* most common method */ + +#define INSN_ENTRY(insn) \ +case BIN(insn): + +#define END_INSN(insn) \ + DEBUG_END_INSN(); \ + break; + +#define INSN_DISPATCH() \ + while (1) { \ + switch (GET_CURRENT_INSN()) { + +#define END_INSNS_DISPATCH() \ +default: \ + SDR(); \ + rb_bug("unknown insn: %ld", GET_CURRENT_INSN()); \ + } /* end of switch */ \ + } /* end of while loop */ \ + +#define NEXT_INSN() goto first + +#define START_OF_ORIGINAL_INSN(x) start_of_##x: +#define DISPATCH_ORIGINAL_INSN(x) goto start_of_##x; + +#endif + +#define VM_SP_CNT(ec, sp) ((sp) - (ec)->vm_stack) + +#if OPT_CALL_THREADED_CODE +#define THROW_EXCEPTION(exc) do { \ + ec->errinfo = (VALUE)(exc); \ + return 0; \ +} while (0) +#else +#define THROW_EXCEPTION(exc) return (VALUE)(exc) +#endif + +#define SCREG(r) (reg_##r) + +#define VM_DEBUG_STACKOVERFLOW 0 + +#if VM_DEBUG_STACKOVERFLOW +#define CHECK_VM_STACK_OVERFLOW_FOR_INSN(cfp, margin) \ + WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stack_overflow_for_insn() +#else +#define CHECK_VM_STACK_OVERFLOW_FOR_INSN(cfp, margin) +#endif + +#endif /* RUBY_VM_EXEC_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_insnhelper.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_insnhelper.h new file mode 100644 index 0000000..9b79d8c --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_insnhelper.h @@ -0,0 +1,255 @@ +/********************************************************************** + + insnhelper.h - helper macros to implement each instructions + + $Author: ko1 $ + created at: 04/01/01 15:50:34 JST + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + +#ifndef RUBY_INSNHELPER_H +#define RUBY_INSNHELPER_H + +RUBY_SYMBOL_EXPORT_BEGIN + +extern VALUE ruby_vm_const_missing_count; +extern rb_serial_t ruby_vm_global_method_state; +extern rb_serial_t ruby_vm_global_constant_state; +extern rb_serial_t ruby_vm_class_serial; + +RUBY_SYMBOL_EXPORT_END + +#if VM_COLLECT_USAGE_DETAILS +#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn) +#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op))) + +#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s)) +#else +#define COLLECT_USAGE_INSN(insn) /* none */ +#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */ +#define COLLECT_USAGE_REGISTER(reg, s) /* none */ +#endif + +/**********************************************************/ +/* deal with stack */ +/**********************************************************/ + +#define PUSH(x) (SET_SV(x), INC_SP(1)) +#define TOPN(n) (*(GET_SP()-(n)-1)) +#define POPN(n) (DEC_SP(n)) +#define POP() (DEC_SP(1)) +#define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n)) + +#define GET_TOS() (tos) /* dummy */ + +/**********************************************************/ +/* deal with registers */ +/**********************************************************/ + +#define VM_REG_CFP (reg_cfp) +#define VM_REG_PC (VM_REG_CFP->pc) +#define VM_REG_SP (VM_REG_CFP->sp) +#define VM_REG_EP (VM_REG_CFP->ep) + +#define RESTORE_REGS() do { \ + VM_REG_CFP = ec->cfp; \ +} while (0) + +#define REG_A reg_a +#define REG_B reg_b + +enum vm_regan_regtype { + VM_REGAN_PC = 0, + VM_REGAN_SP = 1, + VM_REGAN_EP = 2, + VM_REGAN_CFP = 3, + VM_REGAN_SELF = 4, + VM_REGAN_ISEQ = 5, +}; +enum vm_regan_acttype { + VM_REGAN_ACT_GET = 0, + VM_REGAN_ACT_SET = 1, +}; + +#if VM_COLLECT_USAGE_DETAILS +#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \ + (COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v)) +#else +#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v) +#endif + +/* PC */ +#define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC)) +#define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x)))) +#define GET_CURRENT_INSN() (*GET_PC()) +#define GET_OPERAND(n) (GET_PC()[(n)]) +#define ADD_PC(n) (SET_PC(VM_REG_PC + (n))) +#define JUMP(dst) (SET_PC(VM_REG_PC + (dst))) + +/* frame pointer, environment pointer */ +#define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP)) +#define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP)) +#define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x)))) +#define GET_LEP() (VM_EP_LEP(GET_EP())) + +/* SP */ +#define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP)) +#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x)))) +#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x)))) +#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x)))) +#define SET_SV(x) (*GET_SP() = (x)) + /* set current stack value as x */ + +/* instruction sequence C struct */ +#define GET_ISEQ() (GET_CFP()->iseq) + +/**********************************************************/ +/* deal with variables */ +/**********************************************************/ + +#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03)) + +#define GET_GLOBAL(entry) rb_gvar_get((struct rb_global_entry*)(entry)) +#define SET_GLOBAL(entry, val) rb_gvar_set((struct rb_global_entry*)(entry), (val)) + +#define GET_CONST_INLINE_CACHE(dst) ((IC) * (GET_PC() + (dst) + 2)) + +/**********************************************************/ +/* deal with values */ +/**********************************************************/ + +#define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self)) + +/**********************************************************/ +/* deal with control flow 2: method/iterator */ +/**********************************************************/ + +#define CALL_METHOD(calling, ci, cc) do { \ + VALUE v = (*(cc)->call)(ec, GET_CFP(), (calling), (ci), (cc)); \ + if (v == Qundef) { \ + RESTORE_REGS(); \ + NEXT_INSN(); \ + } \ + else { \ + val = v; \ + } \ +} while (0) + +/* set fastpath when cached method is *NOT* protected + * because inline method cache does not care about receiver. + */ + +#ifndef OPT_CALL_FASTPATH +#define OPT_CALL_FASTPATH 1 +#endif + +#if OPT_CALL_FASTPATH +#define CI_SET_FASTPATH(cc, func, enabled) do { \ + if (LIKELY(enabled)) ((cc)->call = (func)); \ +} while (0) +#else +#define CI_SET_FASTPATH(ci, func, enabled) /* do nothing */ +#endif + +#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL]) + +/**********************************************************/ +/* deal with control flow 3: exception */ +/**********************************************************/ + + +/**********************************************************/ +/* others */ +/**********************************************************/ + +/* optimize insn */ +#define FIXNUM_2_P(a, b) ((a) & (b) & 1) +#if USE_FLONUM +#define FLONUM_2_P(a, b) (((((a)^2) | ((b)^2)) & 3) == 0) /* (FLONUM_P(a) && FLONUM_P(b)) */ +#else +#define FLONUM_2_P(a, b) 0 +#endif +#define FLOAT_HEAP_P(x) (!SPECIAL_CONST_P(x) && RBASIC_CLASS(x) == rb_cFloat) +#define FLOAT_INSTANCE_P(x) (FLONUM_P(x) || FLOAT_HEAP_P(x)) + +#ifndef USE_IC_FOR_SPECIALIZED_METHOD +#define USE_IC_FOR_SPECIALIZED_METHOD 1 +#endif + +#define CALL_SIMPLE_METHOD(recv_) do { \ + struct rb_calling_info calling; \ + calling.block_handler = VM_BLOCK_HANDLER_NONE; \ + calling.argc = ci->orig_argc; \ + vm_search_method(ci, cc, calling.recv = (recv_)); \ + CALL_METHOD(&calling, ci, cc); \ +} while (0) + +#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial) +#define GET_GLOBAL_METHOD_STATE() (ruby_vm_global_method_state) +#define INC_GLOBAL_METHOD_STATE() (++ruby_vm_global_method_state) +#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state) +#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state) + +static VALUE make_no_method_exception(VALUE exc, VALUE format, VALUE obj, + int argc, const VALUE *argv, int priv); + +static inline struct vm_throw_data * +THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, VALUE st) +{ + return (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, st, 0); +} + +static inline VALUE +THROW_DATA_VAL(const struct vm_throw_data *obj) +{ + VM_ASSERT(THROW_DATA_P(obj)); + return obj->throw_obj; +} + +static inline const rb_control_frame_t * +THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj) +{ + VM_ASSERT(THROW_DATA_P(obj)); + return obj->catch_frame; +} + +static inline int +THROW_DATA_STATE(const struct vm_throw_data *obj) +{ + VM_ASSERT(THROW_DATA_P(obj)); + return (int)obj->throw_state; +} + +static inline int +THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj) +{ + VM_ASSERT(THROW_DATA_P(obj)); + return obj->flags & THROW_DATA_CONSUMED; +} + +static inline void +THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp) +{ + VM_ASSERT(THROW_DATA_P(obj)); + obj->catch_frame = cfp; +} + +static inline void +THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st) +{ + VM_ASSERT(THROW_DATA_P(obj)); + obj->throw_state = (VALUE)st; +} + +static inline void +THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj) +{ + if (THROW_DATA_P(obj) && + THROW_DATA_STATE(obj) == TAG_BREAK) { + obj->flags |= THROW_DATA_CONSUMED; + } +} + +#endif /* RUBY_INSNHELPER_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_opts.h b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_opts.h new file mode 100644 index 0000000..ee7febb --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vm_opts.h @@ -0,0 +1,56 @@ +/*-*-c-*-*/ +/********************************************************************** + + vm_opts.h - VM optimize option + + $Author: ko1 $ + + Copyright (C) 2004-2007 Koichi Sasada + +**********************************************************************/ + + +#ifndef RUBY_VM_OPTS_H +#define RUBY_VM_OPTS_H + +/* Compile options. + * You can change these options at runtime by VM::CompileOption. + * Following definitions are default values. + */ + +#define OPT_TAILCALL_OPTIMIZATION 0 +#define OPT_PEEPHOLE_OPTIMIZATION 1 +#define OPT_SPECIALISED_INSTRUCTION 1 +#define OPT_INLINE_CONST_CACHE 1 +#define OPT_FROZEN_STRING_LITERAL 0 +#define OPT_DEBUG_FROZEN_STRING_LITERAL 0 + +/* Build Options. + * You can't change these options at runtime. + */ + +/* C compiler dependent */ +#define OPT_DIRECT_THREADED_CODE 1 +#define OPT_TOKEN_THREADED_CODE 0 +#define OPT_CALL_THREADED_CODE 0 + +/* VM running option */ +#define OPT_CHECKED_RUN 1 +#define OPT_INLINE_METHOD_CACHE 1 +#define OPT_GLOBAL_METHOD_CACHE 1 +#define OPT_BLOCKINLINING 0 + +/* architecture independent, affects generated code */ +#define OPT_OPERANDS_UNIFICATION 1 +#define OPT_INSTRUCTIONS_UNIFICATION 0 +#define OPT_UNIFY_ALL_COMBINATION 0 +#define OPT_STACK_CACHING 0 + +/* misc */ +#define SUPPORT_JOKE 0 + +#ifndef VM_COLLECT_USAGE_DETAILS +#define VM_COLLECT_USAGE_DETAILS 0 +#endif + +#endif /* RUBY_VM_OPTS_H */ diff --git a/lib/debase/ruby_core_source/ruby-2.5.4-p155/vmtc.inc b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vmtc.inc new file mode 100644 index 0000000..f9bfc03 --- /dev/null +++ b/lib/debase/ruby_core_source/ruby-2.5.4-p155/vmtc.inc @@ -0,0 +1,214 @@ +/* -*-c-*- *********************************************************/ +/*******************************************************************/ +/*******************************************************************/ +/** + This file is for threaded code. + + ---- + This file is auto generated by insns2vm.rb + DO NOT TOUCH! + + If you want to fix something, you must edit 'template/vmtc.inc.tmpl' + or insns2vm.rb + */ + +static const void *const insns_address_table[] = { + LABEL_PTR(nop), + LABEL_PTR(getlocal), + LABEL_PTR(setlocal), + LABEL_PTR(getblockparam), + LABEL_PTR(setblockparam), + LABEL_PTR(getspecial), + LABEL_PTR(setspecial), + LABEL_PTR(getinstancevariable), + LABEL_PTR(setinstancevariable), + LABEL_PTR(getclassvariable), + LABEL_PTR(setclassvariable), + LABEL_PTR(getconstant), + LABEL_PTR(setconstant), + LABEL_PTR(getglobal), + LABEL_PTR(setglobal), + LABEL_PTR(putnil), + LABEL_PTR(putself), + LABEL_PTR(putobject), + LABEL_PTR(putspecialobject), + LABEL_PTR(putiseq), + LABEL_PTR(putstring), + LABEL_PTR(concatstrings), + LABEL_PTR(tostring), + LABEL_PTR(freezestring), + LABEL_PTR(toregexp), + LABEL_PTR(intern), + LABEL_PTR(newarray), + LABEL_PTR(duparray), + LABEL_PTR(expandarray), + LABEL_PTR(concatarray), + LABEL_PTR(splatarray), + LABEL_PTR(newhash), + LABEL_PTR(newrange), + LABEL_PTR(pop), + LABEL_PTR(dup), + LABEL_PTR(dupn), + LABEL_PTR(swap), + LABEL_PTR(reverse), + LABEL_PTR(reput), + LABEL_PTR(topn), + LABEL_PTR(setn), + LABEL_PTR(adjuststack), + LABEL_PTR(defined), + LABEL_PTR(checkmatch), + LABEL_PTR(checkkeyword), + LABEL_PTR(tracecoverage), + LABEL_PTR(defineclass), + LABEL_PTR(send), + LABEL_PTR(opt_str_freeze), + LABEL_PTR(opt_str_uminus), + LABEL_PTR(opt_newarray_max), + LABEL_PTR(opt_newarray_min), + LABEL_PTR(opt_send_without_block), + LABEL_PTR(invokesuper), + LABEL_PTR(invokeblock), + LABEL_PTR(leave), + LABEL_PTR(throw), + LABEL_PTR(jump), + LABEL_PTR(branchif), + LABEL_PTR(branchunless), + LABEL_PTR(branchnil), + LABEL_PTR(branchiftype), + LABEL_PTR(getinlinecache), + LABEL_PTR(setinlinecache), + LABEL_PTR(once), + LABEL_PTR(opt_case_dispatch), + LABEL_PTR(opt_plus), + LABEL_PTR(opt_minus), + LABEL_PTR(opt_mult), + LABEL_PTR(opt_div), + LABEL_PTR(opt_mod), + LABEL_PTR(opt_eq), + LABEL_PTR(opt_neq), + LABEL_PTR(opt_lt), + LABEL_PTR(opt_le), + LABEL_PTR(opt_gt), + LABEL_PTR(opt_ge), + LABEL_PTR(opt_ltlt), + LABEL_PTR(opt_aref), + LABEL_PTR(opt_aset), + LABEL_PTR(opt_aset_with), + LABEL_PTR(opt_aref_with), + LABEL_PTR(opt_length), + LABEL_PTR(opt_size), + LABEL_PTR(opt_empty_p), + LABEL_PTR(opt_succ), + LABEL_PTR(opt_not), + LABEL_PTR(opt_regexpmatch1), + LABEL_PTR(opt_regexpmatch2), + LABEL_PTR(opt_call_c_function), + LABEL_PTR(bitblt), + LABEL_PTR(answer), + LABEL_PTR(getlocal_OP__WC__0), + LABEL_PTR(getlocal_OP__WC__1), + LABEL_PTR(setlocal_OP__WC__0), + LABEL_PTR(setlocal_OP__WC__1), + LABEL_PTR(putobject_OP_INT2FIX_O_0_C_), + LABEL_PTR(putobject_OP_INT2FIX_O_1_C_), + LABEL_PTR(trace_nop), + LABEL_PTR(trace_getlocal), + LABEL_PTR(trace_setlocal), + LABEL_PTR(trace_getblockparam), + LABEL_PTR(trace_setblockparam), + LABEL_PTR(trace_getspecial), + LABEL_PTR(trace_setspecial), + LABEL_PTR(trace_getinstancevariable), + LABEL_PTR(trace_setinstancevariable), + LABEL_PTR(trace_getclassvariable), + LABEL_PTR(trace_setclassvariable), + LABEL_PTR(trace_getconstant), + LABEL_PTR(trace_setconstant), + LABEL_PTR(trace_getglobal), + LABEL_PTR(trace_setglobal), + LABEL_PTR(trace_putnil), + LABEL_PTR(trace_putself), + LABEL_PTR(trace_putobject), + LABEL_PTR(trace_putspecialobject), + LABEL_PTR(trace_putiseq), + LABEL_PTR(trace_putstring), + LABEL_PTR(trace_concatstrings), + LABEL_PTR(trace_tostring), + LABEL_PTR(trace_freezestring), + LABEL_PTR(trace_toregexp), + LABEL_PTR(trace_intern), + LABEL_PTR(trace_newarray), + LABEL_PTR(trace_duparray), + LABEL_PTR(trace_expandarray), + LABEL_PTR(trace_concatarray), + LABEL_PTR(trace_splatarray), + LABEL_PTR(trace_newhash), + LABEL_PTR(trace_newrange), + LABEL_PTR(trace_pop), + LABEL_PTR(trace_dup), + LABEL_PTR(trace_dupn), + LABEL_PTR(trace_swap), + LABEL_PTR(trace_reverse), + LABEL_PTR(trace_reput), + LABEL_PTR(trace_topn), + LABEL_PTR(trace_setn), + LABEL_PTR(trace_adjuststack), + LABEL_PTR(trace_defined), + LABEL_PTR(trace_checkmatch), + LABEL_PTR(trace_checkkeyword), + LABEL_PTR(trace_tracecoverage), + LABEL_PTR(trace_defineclass), + LABEL_PTR(trace_send), + LABEL_PTR(trace_opt_str_freeze), + LABEL_PTR(trace_opt_str_uminus), + LABEL_PTR(trace_opt_newarray_max), + LABEL_PTR(trace_opt_newarray_min), + LABEL_PTR(trace_opt_send_without_block), + LABEL_PTR(trace_invokesuper), + LABEL_PTR(trace_invokeblock), + LABEL_PTR(trace_leave), + LABEL_PTR(trace_throw), + LABEL_PTR(trace_jump), + LABEL_PTR(trace_branchif), + LABEL_PTR(trace_branchunless), + LABEL_PTR(trace_branchnil), + LABEL_PTR(trace_branchiftype), + LABEL_PTR(trace_getinlinecache), + LABEL_PTR(trace_setinlinecache), + LABEL_PTR(trace_once), + LABEL_PTR(trace_opt_case_dispatch), + LABEL_PTR(trace_opt_plus), + LABEL_PTR(trace_opt_minus), + LABEL_PTR(trace_opt_mult), + LABEL_PTR(trace_opt_div), + LABEL_PTR(trace_opt_mod), + LABEL_PTR(trace_opt_eq), + LABEL_PTR(trace_opt_neq), + LABEL_PTR(trace_opt_lt), + LABEL_PTR(trace_opt_le), + LABEL_PTR(trace_opt_gt), + LABEL_PTR(trace_opt_ge), + LABEL_PTR(trace_opt_ltlt), + LABEL_PTR(trace_opt_aref), + LABEL_PTR(trace_opt_aset), + LABEL_PTR(trace_opt_aset_with), + LABEL_PTR(trace_opt_aref_with), + LABEL_PTR(trace_opt_length), + LABEL_PTR(trace_opt_size), + LABEL_PTR(trace_opt_empty_p), + LABEL_PTR(trace_opt_succ), + LABEL_PTR(trace_opt_not), + LABEL_PTR(trace_opt_regexpmatch1), + LABEL_PTR(trace_opt_regexpmatch2), + LABEL_PTR(trace_opt_call_c_function), + LABEL_PTR(trace_bitblt), + LABEL_PTR(trace_answer), + LABEL_PTR(trace_getlocal_OP__WC__0), + LABEL_PTR(trace_getlocal_OP__WC__1), + LABEL_PTR(trace_setlocal_OP__WC__0), + LABEL_PTR(trace_setlocal_OP__WC__1), + LABEL_PTR(trace_putobject_OP_INT2FIX_O_0_C_), + LABEL_PTR(trace_putobject_OP_INT2FIX_O_1_C_), +}; + +ASSERT_VM_INSTRUCTION_SIZE(insns_address_table);