project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
linux
|
9ad36309e2719a884f946678e0296be10f0bb4c1
| 1
|
static int route4_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, u32 flags,
struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
struct route4_filter *fold, *f1, *pfp, *f = NULL;
struct route4_bucket *b;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_ROUTE4_MAX + 1];
unsigned int h, th;
int err;
bool new = true;
if (opt == NULL)
return handle ? -EINVAL : 0;
err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
route4_policy, NULL);
if (err < 0)
return err;
fold = *arg;
if (fold && handle && fold->handle != handle)
return -EINVAL;
err = -ENOBUFS;
f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (!f)
goto errout;
err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
if (err < 0)
goto errout;
if (fold) {
f->id = fold->id;
f->iif = fold->iif;
f->res = fold->res;
f->handle = fold->handle;
f->tp = fold->tp;
f->bkt = fold->bkt;
new = false;
}
err = route4_set_parms(net, tp, base, f, handle, head, tb,
tca[TCA_RATE], new, flags, extack);
if (err < 0)
goto errout;
h = from_hash(f->handle >> 16);
fp = &f->bkt->ht[h];
for (pfp = rtnl_dereference(*fp);
(f1 = rtnl_dereference(*fp)) != NULL;
fp = &f1->next)
if (f->handle < f1->handle)
break;
tcf_block_netif_keep_dst(tp->chain->block);
rcu_assign_pointer(f->next, f1);
rcu_assign_pointer(*fp, f);
if (fold && fold->handle && f->handle != fold->handle) {
th = to_hash(fold->handle);
h = from_hash(fold->handle >> 16);
b = rtnl_dereference(head->table[th]);
if (b) {
fp = &b->ht[h];
for (pfp = rtnl_dereference(*fp); pfp;
fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
if (pfp == fold) {
rcu_assign_pointer(*fp, fold->next);
break;
}
}
}
}
route4_reset_fastmap(head);
*arg = f;
if (fold) {
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
tcf_queue_work(&fold->rwork, route4_delete_filter_work);
}
return 0;
errout:
if (f)
tcf_exts_destroy(&f->exts);
kfree(f);
return err;
}
| null | null | 197,329
|
57022267833203831104909751798504283544
| 95
|
net_sched: cls_route: remove from list when handle is 0
When a route filter is replaced and the old filter has a 0 handle, the old
one won't be removed from the hashtable, while it will still be freed.
The test was there since before commit 1109c00547fc ("net: sched: RCU
cls_route"), when a new filter was not allocated when there was an old one.
The old filter was reused and the reinserting would only be necessary if an
old filter was replaced. That was still wrong for the same case where the
old handle was 0.
Remove the old filter from the list independently from its handle value.
This fixes CVE-2022-2588, also reported as ZDI-CAN-17440.
Reported-by: Zhenpeng Lin <[email protected]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Reviewed-by: Kamal Mostafa <[email protected]>
Cc: <[email protected]>
Acked-by: Jamal Hadi Salim <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
other
|
tensorflow
|
68867bf01239d9e1048f98cbad185bf4761bedd3
| 1
|
Status AutoParallel::Initialize(const GrapplerItem& item) {
num_gpus_ = GetNumAvailableGPUs();
LOG(INFO) << "Number of GPUs: " << num_gpus_;
item_ = &item;
graph_ = item.graph;
LOG(INFO) << "Original graph size: " << graph_.node_size();
if (item.fetch.empty()) {
return Status(error::INVALID_ARGUMENT, "No fetch nodes provided.");
}
if (item.MainVariables().empty()) {
return Status(error::INVALID_ARGUMENT, "No variables provided.");
}
for (const auto& init : item.init_ops) {
VLOG(1) << "Init node: " << init;
}
for (const auto& fetch : item.fetch) {
VLOG(1) << "Fetch node: " << fetch;
}
for (const auto& var : item.MainVariables()) {
VLOG(2) << "Variable: " << var->name();
}
const std::set<string> apply_gradients_ops = {"ApplyGradientDescent",
"ApplyProximalGradientDescent",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyProximalAdagrad",
"ApplyAdagradDA",
"ApplyFtrl",
"ApplyMomentum",
"ApplyAdam",
"ApplyRMSProp",
"ApplyCenteredRMSProp"};
for (int i = 0; i < graph_.node_size(); i++) {
all_nodes_.insert(
std::make_pair(graph_.node(i).name(), graph_.mutable_node(i)));
if (apply_gradients_ops.find(graph_.node(i).op()) !=
apply_gradients_ops.end()) {
apply_gradients_nodes_.insert(graph_.node(i).name());
VLOG(2) << "Apply gradients node: " << graph_.node(i).name();
}
}
auto div_const_node = AddNodeDivConst();
all_nodes_.insert(std::make_pair(div_const_node->name(), div_const_node));
std::map<string, int> gradient_pos = {{"ApplyGradientDescent", 2},
{"ApplyProximalGradientDescent", 4},
{"ApplyAdadelta", 6},
{"ApplyAdagrad", 3},
{"ApplyProximalAdagrad", 5},
{"ApplyAdagradDA", 3},
{"ApplyFtrl", 3},
{"ApplyMomentum", 3},
{"ApplyAdam", 9},
{"ApplyRMSProp", 7},
{"ApplyCenteredRMSProp", 8}};
for (const auto& apply_gradient_node_name : apply_gradients_nodes_) {
auto apply_gradients_op = all_nodes_[apply_gradient_node_name]->op();
auto apply_gradients_node = all_nodes_[apply_gradient_node_name];
auto div_node = AddNodeDiv(
apply_gradient_node_name,
apply_gradients_node->input(gradient_pos[apply_gradients_op]),
div_const_node->name());
all_nodes_.insert(std::make_pair(div_node->name(), div_node));
*apply_gradients_node->mutable_input(gradient_pos[apply_gradients_op]) =
div_node->name();
}
LOG(INFO) << "Graph size after adding div nodes: " << all_nodes_.size();
std::vector<const NodeDef*> train_nodes;
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, item.fetch, &train_nodes));
LOG(INFO) << "Number of training nodes: " << train_nodes.size();
const NodeDef* dequeue_node;
for (const auto& train_node : train_nodes) {
if (IsDequeueOp(*train_node)) {
dequeue_node = train_node;
break;
}
}
std::vector<const NodeDef*> input_nodes;
if (dequeue_node) {
LOG(INFO) << "Dequeue node: " << dequeue_node->name();
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, {dequeue_node->name()},
{}, &input_nodes));
}
LOG(INFO) << "Number of input nodes: " << input_nodes.size();
std::set<string> dont_replicate_nodes;
for (const auto& variable : item.MainVariables()) {
dont_replicate_nodes.insert(variable->name());
}
for (const auto& init : item.init_ops) {
dont_replicate_nodes.insert(NodeName(init));
}
// Don't replicate all input nodes, except the dequeue node.
for (const auto& input_node : input_nodes) {
if (input_node->name() != dequeue_node->name()) {
dont_replicate_nodes.insert(input_node->name());
}
}
for (const auto& node : train_nodes) {
if (dont_replicate_nodes.find(node->name()) == dont_replicate_nodes.end()) {
replica_nodes_.insert(node->name());
}
}
LOG(INFO) << "Number of replica nodes: " << replica_nodes_.size();
for (const auto& node : all_nodes_) {
if (replica_nodes_.find(node.first) == replica_nodes_.end()) {
shared_nodes_.insert(node.first);
}
}
LOG(INFO) << "Number of shared nodes: " << shared_nodes_.size();
return Status::OK();
}
| null | null | 197,359
|
44781454719041051172233993396995938942
| 125
|
Prevent unitialized variable use in grappler.
PiperOrigin-RevId: 399702928
Change-Id: Id7e75451fbff297692dfb687f60ea04b25c96b24
|
other
|
lighttpd1.4
|
971773f1fae600074b46ef64f3ca1f76c227985f
| 1
|
static handler_t wstunnel_handler_setup (request_st * const r, plugin_data * const p) {
handler_ctx *hctx = r->plugin_ctx[p->id];
int hybivers;
hctx->errh = r->conf.errh;/*(for mod_wstunnel-specific DEBUG_* macros)*/
hctx->conf = p->conf; /*(copies struct)*/
hybivers = wstunnel_check_request(r, hctx);
if (hybivers < 0) return HANDLER_FINISHED;
hctx->hybivers = hybivers;
if (0 == hybivers) {
DEBUG_LOG_INFO("WebSocket Version = %s", "hybi-00");
}
else {
DEBUG_LOG_INFO("WebSocket Version = %d", hybivers);
}
hctx->gw.opts.backend = BACKEND_PROXY; /*(act proxy-like)*/
hctx->gw.opts.pdata = hctx;
hctx->gw.opts.parse = wstunnel_recv_parse;
hctx->gw.stdin_append = wstunnel_stdin_append;
hctx->gw.create_env = wstunnel_create_env;
hctx->gw.handler_ctx_free = wstunnel_handler_ctx_free;
hctx->gw.backend_error = wstunnel_backend_error;
hctx->gw.response = chunk_buffer_acquire();
hctx->frame.state = MOD_WEBSOCKET_FRAME_STATE_INIT;
hctx->frame.ctl.siz = 0;
hctx->frame.payload = chunk_buffer_acquire();
unsigned int binary = hctx->conf.frame_type; /*(0 = "text"; 1 = "binary")*/
if (!binary) {
const buffer *vb =
http_header_request_get(r, HTTP_HEADER_OTHER, CONST_STR_LEN("Sec-WebSocket-Protocol"));
if (NULL != vb) {
for (const char *s = vb->ptr; *s; ++s) {
while (*s==' '||*s=='\t'||*s=='\r'||*s=='\n') ++s;
if (buffer_eq_icase_ssn(s, CONST_STR_LEN("binary"))) {
s += sizeof("binary")-1;
while (*s==' '||*s=='\t'||*s=='\r'||*s=='\n') ++s;
if (*s==','||*s=='\0') {
hctx->subproto = 1;
binary = 1;
break;
}
}
else if (buffer_eq_icase_ssn(s, CONST_STR_LEN("base64"))) {
s += sizeof("base64")-1;
while (*s==' '||*s=='\t'||*s=='\r'||*s=='\n') ++s;
if (*s==','||*s=='\0') {
hctx->subproto = -1;
break;
}
}
s = strchr(s, ',');
if (NULL == s) break;
}
}
}
if (binary) {
DEBUG_LOG_INFO("%s", "will recv binary data from backend");
hctx->frame.type = MOD_WEBSOCKET_FRAME_TYPE_BIN;
hctx->frame.type_before = MOD_WEBSOCKET_FRAME_TYPE_BIN;
hctx->frame.type_backend = MOD_WEBSOCKET_FRAME_TYPE_BIN;
}
else {
DEBUG_LOG_INFO("%s", "will recv text data from backend");
hctx->frame.type = MOD_WEBSOCKET_FRAME_TYPE_TEXT;
hctx->frame.type_before = MOD_WEBSOCKET_FRAME_TYPE_TEXT;
hctx->frame.type_backend = MOD_WEBSOCKET_FRAME_TYPE_TEXT;
}
return HANDLER_GO_ON;
}
| null | null | 197,373
|
109983880448084962630397912027710757424
| 73
|
[mod_wstunnel] fix crash with bad hybivers (fixes #3165)
(thx Michał Dardas)
x-ref:
"mod_wstunnel null pointer dereference"
https://redmine.lighttpd.net/issues/3165
|
other
|
tensorflow
|
4071d8e2f6c45c1955a811fee757ca2adbe462c1
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& dims = context->input(1);
if (TensorShapeUtils::IsScalar(input.shape())) {
context->set_output(0, input);
} else {
const int input_dims = input.dims();
OP_REQUIRES(context, TensorShapeUtils::IsVector(dims.shape()),
errors::InvalidArgument("'dims' must be 1-dimension, not ",
dims.dims()));
OP_REQUIRES(
context, input_dims == dims.dim_size(0),
errors::InvalidArgument(
"'dims' must have the same number of values as 'input' has "
"dimensions. 'input' has ",
input_dims, "'dims' has ", dims.dim_size(0), " values"));
OP_REQUIRES(context, input_dims <= 8,
errors::Unimplemented(
"reverse is not implemented for tensors of rank > 8."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
#define HANDLE_REVERSE(NDIMS) \
case NDIMS: \
HandleReverseCase<Device, T, NDIMS>(context, dims.vec<bool>(), output); \
return;
switch (input_dims) {
HANDLE_REVERSE(0);
HANDLE_REVERSE(1);
HANDLE_REVERSE(2);
HANDLE_REVERSE(3);
HANDLE_REVERSE(4);
HANDLE_REVERSE(5);
HANDLE_REVERSE(6);
HANDLE_REVERSE(7);
HANDLE_REVERSE(8);
}
#undef HANDLE_REVERSE
}
}
| null | null | 197,395
|
269562292786484769082922681948698599079
| 45
|
Fix FPE issue with `tf.raw_ops.Reverse`.
PiperOrigin-RevId: 371176973
Change-Id: Ic6d483bfc95313ec2299c2d1c956cfe96c96626c
|
other
|
tensorflow
|
9e82dce6e6bd1f36a57e08fa85af213e2b2f2622
| 1
|
void RestoreTensor(OpKernelContext* context,
checkpoint::TensorSliceReader::OpenTableFunction open_func,
int preferred_shard, bool restore_slice, int restore_index) {
const Tensor& file_pattern_t = context->input(0);
{
const int64_t size = file_pattern_t.NumElements();
OP_REQUIRES(
context, size == 1,
errors::InvalidArgument(
"Input 0 (file_pattern) must be a string scalar; got a tensor of ",
size, "elements"));
}
const string& file_pattern = file_pattern_t.flat<tstring>()(0);
const Tensor& tensor_name_t = context->input(1);
const string& tensor_name = tensor_name_t.flat<tstring>()(restore_index);
// If we cannot find a cached reader we will allocate our own.
std::unique_ptr<checkpoint::TensorSliceReader> allocated_reader;
const checkpoint::TensorSliceReader* reader = nullptr;
if (context->slice_reader_cache()) {
reader = context->slice_reader_cache()->GetReader(file_pattern, open_func,
preferred_shard);
}
if (!reader) {
allocated_reader.reset(new checkpoint::TensorSliceReader(
file_pattern, open_func, preferred_shard));
reader = allocated_reader.get();
}
OP_REQUIRES_OK(context, CHECK_NOTNULL(reader)->status());
// Get the shape and type from the save file.
DataType type;
TensorShape saved_shape;
OP_REQUIRES(
context, reader->HasTensor(tensor_name, &saved_shape, &type),
errors::NotFound("Tensor name \"", tensor_name,
"\" not found in checkpoint files ", file_pattern));
OP_REQUIRES(
context, type == context->expected_output_dtype(restore_index),
errors::InvalidArgument("Expected to restore a tensor of type ",
DataTypeString(context->expected_output_dtype(0)),
", got a tensor of type ", DataTypeString(type),
" instead: tensor_name = ", tensor_name));
// Shape of the output and slice to load.
TensorShape output_shape(saved_shape);
TensorSlice slice_to_load(saved_shape.dims());
if (restore_slice) {
const tstring& shape_spec =
context->input(2).flat<tstring>()(restore_index);
if (!shape_spec.empty()) {
TensorShape parsed_shape;
OP_REQUIRES_OK(context, checkpoint::ParseShapeAndSlice(
shape_spec, &parsed_shape, &slice_to_load,
&output_shape));
OP_REQUIRES(
context, parsed_shape.IsSameSize(saved_shape),
errors::InvalidArgument(
"Shape in shape_and_slice spec does not match the shape in the "
"save file: ",
parsed_shape.DebugString(),
", save file shape: ", saved_shape.DebugString()));
}
}
Tensor* t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(restore_index, output_shape, &t));
if (output_shape.num_elements() == 0) return;
#define READER_COPY(T) \
case DataTypeToEnum<T>::value: \
OP_REQUIRES(context, \
reader->CopySliceData(tensor_name, slice_to_load, \
t->flat<T>().data()), \
errors::InvalidArgument("Error copying slice data")); \
break;
switch (type) {
TF_CALL_SAVE_RESTORE_TYPES(READER_COPY)
default:
context->SetStatus(errors::Unimplemented(
"Restoring data type ", DataTypeString(type), " not yet supported"));
}
#undef READER_COPY
}
| null | null | 197,466
|
35516253093365065269122878247898074447
| 90
|
Fix NPE in restoring code.
PiperOrigin-RevId: 388303253
Change-Id: Ia8c68568cb854bca538909a182b31a618d68ce55
|
other
|
FFmpeg
|
c953baa084607dd1d84c3bfcce3cf6a87c3e6e05
| 1
|
static int build_open_gop_key_points(AVStream *st)
{
int k;
int sample_id = 0;
uint32_t cra_index;
MOVStreamContext *sc = st->priv_data;
if (st->codecpar->codec_id != AV_CODEC_ID_HEVC || !sc->sync_group_count)
return 0;
/* Build an unrolled index of the samples */
sc->sample_offsets_count = 0;
for (uint32_t i = 0; i < sc->ctts_count; i++)
sc->sample_offsets_count += sc->ctts_data[i].count;
av_freep(&sc->sample_offsets);
sc->sample_offsets = av_calloc(sc->sample_offsets_count, sizeof(*sc->sample_offsets));
if (!sc->sample_offsets)
return AVERROR(ENOMEM);
k = 0;
for (uint32_t i = 0; i < sc->ctts_count; i++)
for (int j = 0; j < sc->ctts_data[i].count; j++)
sc->sample_offsets[k++] = sc->ctts_data[i].duration;
/* The following HEVC NAL type reveal the use of open GOP sync points
* (TODO: BLA types may also be concerned) */
cra_index = get_sgpd_sync_index(sc, HEVC_NAL_CRA_NUT); /* Clean Random Access */
if (!cra_index)
return 0;
/* Build a list of open-GOP key samples */
sc->open_key_samples_count = 0;
for (uint32_t i = 0; i < sc->sync_group_count; i++)
if (sc->sync_group[i].index == cra_index)
sc->open_key_samples_count += sc->sync_group[i].count;
av_freep(&sc->open_key_samples);
sc->open_key_samples = av_calloc(sc->open_key_samples_count, sizeof(*sc->open_key_samples));
if (!sc->open_key_samples)
return AVERROR(ENOMEM);
k = 0;
for (uint32_t i = 0; i < sc->sync_group_count; i++) {
const MOVSbgp *sg = &sc->sync_group[i];
if (sg->index == cra_index)
for (uint32_t j = 0; j < sg->count; j++)
sc->open_key_samples[k++] = sample_id;
sample_id += sg->count;
}
/* Identify the minimal time step between samples */
sc->min_sample_duration = UINT_MAX;
for (uint32_t i = 0; i < sc->stts_count; i++)
sc->min_sample_duration = FFMIN(sc->min_sample_duration, sc->stts_data[i].duration);
return 0;
}
| null | null | 197,473
|
201473938080232102307587175073350073803
| 54
|
avformat/mov: Check count sums in build_open_gop_key_points()
Fixes: ffmpeg.md
Fixes: Out of array access
Fixes: CVE-2022-2566
Found-by: Andy Nguyen <[email protected]>
Found-by: 3pvd <[email protected]>
Reviewed-by: Andy Nguyen <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
other
|
gpac
|
dc7de8d3d604426c7a6e628d90cb9fb88e7b4c2c
| 1
|
GF_Err BD_DecMFFieldVec(GF_BifsDecoder * codec, GF_BitStream *bs, GF_Node *node, GF_FieldInfo *field, Bool is_mem_com)
{
GF_Err e;
u32 NbBits, nbFields;
u32 i;
GF_ChildNodeItem *last;
u8 qp_local, qp_on, initial_qp;
GF_FieldInfo sffield;
memset(&sffield, 0, sizeof(GF_FieldInfo));
sffield.fieldIndex = field->fieldIndex;
sffield.fieldType = gf_sg_vrml_get_sf_type(field->fieldType);
sffield.NDTtype = field->NDTtype;
sffield.name = field->name;
initial_qp = qp_local = qp_on = 0;
//vector description - alloc the MF size before
NbBits = gf_bs_read_int(bs, 5);
nbFields = gf_bs_read_int(bs, NbBits);
if (codec->ActiveQP) {
initial_qp = 1;
/*this is for QP 14*/
gf_bifs_dec_qp14_set_length(codec, nbFields);
}
if (field->fieldType != GF_SG_VRML_MFNODE) {
e = gf_sg_vrml_mf_alloc(field->far_ptr, field->fieldType, nbFields);
if (e) return e;
for (i=0; i<nbFields; i++) {
e = gf_sg_vrml_mf_get_item(field->far_ptr, field->fieldType, & sffield.far_ptr, i);
if (e) return e;
e = gf_bifs_dec_sf_field(codec, bs, node, &sffield, GF_FALSE);
if (e) return e;
}
} else {
last = NULL;
for (i=0; i<nbFields; i++) {
GF_Node *new_node = gf_bifs_dec_node(codec, bs, field->NDTtype);
if (new_node) {
e = gf_node_register(new_node, is_mem_com ? NULL : node);
if (e) return e;
if (node) {
/*special case for QP, register as the current QP*/
if (gf_node_get_tag(new_node) == TAG_MPEG4_QuantizationParameter) {
qp_local = ((M_QuantizationParameter *)new_node)->isLocal;
/*we have a QP in the same scope, remove previous
NB: we assume this is the right behavior, the spec doesn't say
whether QP is cumulative or not*/
if (qp_on) gf_bifs_dec_qp_remove(codec, GF_FALSE);
e = gf_bifs_dec_qp_set(codec, new_node);
if (e) return e;
qp_on = 1;
if (qp_local) qp_local = 2;
if (codec->force_keep_qp) {
e = gf_node_list_add_child_last(field->far_ptr, new_node, &last);
if (e) return e;
} else {
gf_node_register(new_node, NULL);
gf_node_unregister(new_node, node);
}
} else {
e = gf_node_list_add_child_last(field->far_ptr, new_node, &last);
if (e) return e;
}
}
/*proto coding*/
else if (codec->pCurrentProto) {
/*TO DO: what happens if this is a QP node on the interface ?*/
e = gf_node_list_add_child_last( (GF_ChildNodeItem **)field->far_ptr, new_node, &last);
if (e) return e;
}
} else {
return codec->LastError ? codec->LastError : GF_NON_COMPLIANT_BITSTREAM;
}
}
/*according to the spec, the QP applies to the current node itself, not just children.
If IsLocal is TRUE remove the node*/
if (qp_on && qp_local) {
if (qp_local == 2) {
// qp_local = 1;
} else {
//ask to get rid of QP and reactivate if we had a QP when entering the node
gf_bifs_dec_qp_remove(codec, initial_qp);
// qp_local = 0;
}
}
}
/*finally delete the QP if any (local or not) as we get out of this node*/
if (qp_on) gf_bifs_dec_qp_remove(codec, GF_TRUE);
return GF_OK;
}
| null | null | 197,499
|
299257728605197431750731122978204720459
| 96
|
fixed #2212
|
other
|
curl
|
914aaab9153764ef8fa4178215b8ad89d3ac263a
| 1
|
static CURLUcode hostname_check(struct Curl_URL *u, char *hostname)
{
size_t len;
size_t hlen = strlen(hostname);
if(hostname[0] == '[') {
const char *l = "0123456789abcdefABCDEF:.";
if(hlen < 4) /* '[::]' is the shortest possible valid string */
return CURLUE_BAD_IPV6;
hostname++;
hlen -= 2;
if(hostname[hlen] != ']')
return CURLUE_BAD_IPV6;
/* only valid letters are ok */
len = strspn(hostname, l);
if(hlen != len) {
hlen = len;
if(hostname[len] == '%') {
/* this could now be '%[zone id]' */
char zoneid[16];
int i = 0;
char *h = &hostname[len + 1];
/* pass '25' if present and is a url encoded percent sign */
if(!strncmp(h, "25", 2) && h[2] && (h[2] != ']'))
h += 2;
while(*h && (*h != ']') && (i < 15))
zoneid[i++] = *h++;
if(!i || (']' != *h))
/* impossible to reach? */
return CURLUE_MALFORMED_INPUT;
zoneid[i] = 0;
u->zoneid = strdup(zoneid);
if(!u->zoneid)
return CURLUE_OUT_OF_MEMORY;
hostname[len] = ']'; /* insert end bracket */
hostname[len + 1] = 0; /* terminate the hostname */
}
else
return CURLUE_BAD_IPV6;
/* hostname is fine */
}
#ifdef ENABLE_IPV6
{
char dest[16]; /* fits a binary IPv6 address */
char norm[MAX_IPADR_LEN];
hostname[hlen] = 0; /* end the address there */
if(1 != Curl_inet_pton(AF_INET6, hostname, dest))
return CURLUE_BAD_IPV6;
/* check if it can be done shorter */
if(Curl_inet_ntop(AF_INET6, dest, norm, sizeof(norm)) &&
(strlen(norm) < hlen)) {
strcpy(hostname, norm);
hlen = strlen(norm);
hostname[hlen + 1] = 0;
}
hostname[hlen] = ']'; /* restore ending bracket */
}
#endif
}
else {
/* letters from the second string is not ok */
len = strcspn(hostname, " \r\n");
if(hlen != len)
/* hostname with bad content */
return CURLUE_BAD_HOSTNAME;
}
if(!hostname[0])
return CURLUE_NO_HOST;
return CURLUE_OK;
}
| null | null | 197,503
|
335913449235086315587446947934344013072
| 73
|
urlapi: reject percent-decoding host name into separator bytes
CVE-2022-27780
Reported-by: Axel Chong
Bug: https://curl.se/docs/CVE-2022-27780.html
Closes #8826
|
other
|
libjpeg
|
187035b9726710b4fe11d565c7808975c930895d
| 1
|
void HierarchicalBitmapRequester::PrepareForDecoding(void)
{
#if ACCUSOFT_CODE
UBYTE i;
BuildCommon();
if (m_ppDecodingMCU == NULL) {
m_ppDecodingMCU = (struct Line **)m_pEnviron->AllocMem(sizeof(struct Line *) * m_ucCount*8);
memset(m_ppDecodingMCU,0,sizeof(struct Line *) * m_ucCount * 8);
}
if (m_ppUpsampler == NULL) {
m_ppUpsampler = (class UpsamplerBase **)m_pEnviron->AllocMem(sizeof(class UpsamplerBase *) * m_ucCount);
memset(m_ppUpsampler,0,sizeof(class Upsampler *) * m_ucCount);
for(i = 0;i < m_ucCount;i++) {
class Component *comp = m_pFrame->ComponentOf(i);
UBYTE sx = comp->SubXOf();
UBYTE sy = comp->SubYOf();
if (sx > 1 || sy > 1) {
m_ppUpsampler[i] = UpsamplerBase::CreateUpsampler(m_pEnviron,sx,sy,
m_ulPixelWidth,m_ulPixelHeight,
m_pFrame->TablesOf()->isChromaCentered());
m_bSubsampling = true;
}
}
}
if (m_pLargestScale)
m_pLargestScale->PrepareForDecoding();
#endif
}
| null | null | 197,511
|
268108991876913793224174807817149913216
| 35
|
The code now checks for consistency of the MCU sizes across
hierarchical levels, and fails in case they are different.
|
other
|
glewlwyd
|
0efd112bb62f566877750ad62ee828bff579b4e2
| 1
|
static json_t * check_attestation_fido_u2f(json_t * j_params, unsigned char * credential_id, size_t credential_id_len, unsigned char * cert_x, size_t cert_x_len, unsigned char * cert_y, size_t cert_y_len, cbor_item_t * att_stmt, unsigned char * rpid_hash, size_t rpid_hash_len, const unsigned char * client_data) {
json_t * j_error = json_array(), * j_return;
cbor_item_t * key = NULL, * x5c = NULL, * sig = NULL, * att_cert = NULL;
int i, ret;
char * message = NULL;
gnutls_pubkey_t pubkey = NULL;
gnutls_x509_crt_t cert = NULL;
gnutls_datum_t cert_dat, data, signature, cert_issued_by;
unsigned char data_signed[200], client_data_hash[32], cert_export[32], cert_export_b64[64];
size_t data_signed_offset = 0, client_data_hash_len = 32, cert_export_len = 32, cert_export_b64_len = 0;
if (j_error != NULL) {
do {
if (gnutls_x509_crt_init(&cert)) {
json_array_append_new(j_error, json_string("check_attestation_fido_u2f - Error gnutls_x509_crt_init"));
break;
}
if (gnutls_pubkey_init(&pubkey)) {
json_array_append_new(j_error, json_string("check_attestation_fido_u2f - Error gnutls_pubkey_init"));
break;
}
// Step 1
if (att_stmt == NULL || !cbor_isa_map(att_stmt) || cbor_map_size(att_stmt) != 2) {
json_array_append_new(j_error, json_string("CBOR map value 'attStmt' invalid format"));
break;
}
for (i=0; i<2; i++) {
key = cbor_map_handle(att_stmt)[i].key;
if (cbor_isa_string(key)) {
if (0 == o_strncmp((const char *)cbor_string_handle(key), "x5c", MIN(o_strlen("x5c"), cbor_string_length(key)))) {
x5c = cbor_map_handle(att_stmt)[i].value;
} else if (0 == o_strncmp((const char *)cbor_string_handle(key), "sig", MIN(o_strlen("sig"), cbor_string_length(key)))) {
sig = cbor_map_handle(att_stmt)[i].value;
} else {
message = msprintf("attStmt map element %d key is not valid: '%.*s'", i, cbor_string_length(key), cbor_string_handle(key));
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
} else {
message = msprintf("attStmt map element %d key is not a string", i);
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
}
if (x5c == NULL || !cbor_isa_array(x5c) || cbor_array_size(x5c) != 1) {
json_array_append_new(j_error, json_string("CBOR map value 'x5c' invalid format"));
break;
}
att_cert = cbor_array_get(x5c, 0);
cert_dat.data = cbor_bytestring_handle(att_cert);
cert_dat.size = cbor_bytestring_length(att_cert);
if ((ret = gnutls_x509_crt_import(cert, &cert_dat, GNUTLS_X509_FMT_DER)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_pcert_import_x509_raw: %d", ret);
break;
}
if (json_object_get(j_params, "root-ca-list") != json_null() && validate_certificate_from_root(j_params, cert, x5c) != G_OK) {
json_array_append_new(j_error, json_string("Unrecognized certificate authority"));
if (gnutls_x509_crt_get_issuer_dn2(cert, &cert_issued_by) >= 0) {
message = msprintf("Unrecognized certificate autohority: %.*s", cert_issued_by.size, cert_issued_by.data);
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - %s", message);
o_free(message);
gnutls_free(cert_issued_by.data);
} else {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Unrecognized certificate autohority (unable to get issuer dn)");
}
break;
}
if ((ret = gnutls_pubkey_import_x509(pubkey, cert, 0)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_pubkey_import_x509: %d", ret);
break;
}
if ((ret = gnutls_x509_crt_get_key_id(cert, GNUTLS_KEYID_USE_SHA256, cert_export, &cert_export_len)) < 0) {
json_array_append_new(j_error, json_string("Error exporting x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_x509_crt_get_key_id: %d", ret);
break;
}
if (!o_base64_encode(cert_export, cert_export_len, cert_export_b64, &cert_export_b64_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error o_base64_encode cert_export");
break;
}
if (!generate_digest_raw(digest_SHA256, client_data, o_strlen((char *)client_data), client_data_hash, &client_data_hash_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_fido_u2f - Error generate_digest_raw client_data");
break;
}
if (sig == NULL || !cbor_isa_bytestring(sig)) {
json_array_append_new(j_error, json_string("Error sig is not a bytestring"));
break;
}
// Build bytestring to verify signature
data_signed[0] = 0x0;
data_signed_offset = 1;
memcpy(data_signed+data_signed_offset, rpid_hash, rpid_hash_len);
data_signed_offset += rpid_hash_len;
memcpy(data_signed+data_signed_offset, client_data_hash, client_data_hash_len);
data_signed_offset+=client_data_hash_len;
memcpy(data_signed+data_signed_offset, credential_id, credential_id_len);
data_signed_offset+=credential_id_len;
data_signed[data_signed_offset] = 0x04;
data_signed_offset++;
memcpy(data_signed+data_signed_offset, cert_x, cert_x_len);
data_signed_offset+=cert_x_len;
memcpy(data_signed+data_signed_offset, cert_y, cert_y_len);
data_signed_offset+=cert_y_len;
// Let's verify sig over data_signed
data.data = data_signed;
data.size = data_signed_offset;
signature.data = cbor_bytestring_handle(sig);
signature.size = cbor_bytestring_length(sig);
if (gnutls_pubkey_verify_data2(pubkey, GNUTLS_SIGN_ECDSA_SHA256, 0, &data, &signature)) {
json_array_append_new(j_error, json_string("Invalid signature"));
}
} while (0);
if (json_array_size(j_error)) {
j_return = json_pack("{sisO}", "result", G_ERROR_PARAM, "error", j_error);
} else {
j_return = json_pack("{sis{ss%}}", "result", G_OK, "data", "certificate", cert_export_b64, cert_export_b64_len);
}
json_decref(j_error);
gnutls_pubkey_deinit(pubkey);
gnutls_x509_crt_deinit(cert);
if (att_cert != NULL) {
cbor_decref(&att_cert);
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_fido_u2f - Error allocating resources for j_error");
j_return = json_pack("{si}", "result", G_ERROR);
}
return j_return;
}
| null | null | 197,517
|
200217168569920456522529557748799680683
| 150
|
Fix fido2 signature validation bug
|
other
|
tensorflow
|
098e7762d909bac47ce1dbabe6dfd06294cb9d58
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& gradient = ctx->input(0);
const Tensor& input = ctx->input(1);
Tensor* input_backprop = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, input.shape(), &input_backprop));
OP_REQUIRES(
ctx, axis_ >= -1,
errors::InvalidArgument("Axis must be at least -1. Found ", axis_));
OP_REQUIRES(ctx, (axis_ == -1 || axis_ < input.shape().dims()),
errors::InvalidArgument(
"Axis should be -1 or 0 or a positive value less than ",
input.shape().dims(), "but given axis value was ", axis_));
OP_REQUIRES(
ctx, input.IsSameSize(gradient),
errors::InvalidArgument("gradient and input must be the same size"));
const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_);
const Tensor& input_min_tensor = ctx->input(2);
OP_REQUIRES(ctx,
input_min_tensor.dims() == 0 || input_min_tensor.dims() == 1,
errors::InvalidArgument(
"Input min tensor must have dimension 1. Recieved ",
input_min_tensor.dims(), "."));
const Tensor& input_max_tensor = ctx->input(3);
OP_REQUIRES(ctx,
input_max_tensor.dims() == 0 || input_max_tensor.dims() == 1,
errors::InvalidArgument(
"Input max tensor must have dimension 1. Recieved ",
input_max_tensor.dims(), "."));
if (axis_ != -1) {
OP_REQUIRES(
ctx, input_min_tensor.dim_size(0) == depth,
errors::InvalidArgument("min has incorrect size, expected ", depth,
" was ", input_min_tensor.dim_size(0)));
OP_REQUIRES(
ctx, input_max_tensor.dim_size(0) == depth,
errors::InvalidArgument("max has incorrect size, expected ", depth,
" was ", input_max_tensor.dim_size(0)));
}
TensorShape min_max_shape(input_min_tensor.shape());
Tensor* input_min_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, min_max_shape, &input_min_backprop));
Tensor* input_max_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, min_max_shape, &input_max_backprop));
if (axis_ == -1) {
functor::QuantizeAndDequantizeOneScaleGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), gradient.template flat<T>(),
input.template flat<T>(), input_min_tensor.scalar<T>(),
input_max_tensor.scalar<T>(), input_backprop->template flat<T>(),
input_min_backprop->template scalar<T>(),
input_max_backprop->template scalar<T>());
} else {
functor::QuantizeAndDequantizePerChannelGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(),
gradient.template flat_inner_outer_dims<T, 3>(axis_ - 1),
input.template flat_inner_outer_dims<T, 3>(axis_ - 1),
&input_min_tensor, &input_max_tensor,
input_backprop->template flat_inner_outer_dims<T, 3>(axis_ - 1),
input_min_backprop->template flat<T>(),
input_max_backprop->template flat<T>());
}
}
| null | null | 197,518
|
323622655184970473925088678540103856747
| 68
|
Fix tf.raw_ops.QuantizeAndDequantizeV4Grad vulnerability with invalid input_min or input_max.
Check that argument is actually a scalar before treating it as such.
PiperOrigin-RevId: 445198280
|
other
|
wolfMQTT
|
84d4b53122e0fa0280c7872350b89d5777dabbb2
| 1
|
static int MqttClient_WaitType(MqttClient *client, void *packet_obj,
byte wait_type, word16 wait_packet_id, int timeout_ms)
{
int rc;
word16 packet_id;
MqttPacketType packet_type;
#ifdef WOLFMQTT_MULTITHREAD
MqttPendResp *pendResp;
int readLocked;
#endif
MqttMsgStat* mms_stat;
int waitMatchFound;
if (client == NULL || packet_obj == NULL) {
return MQTT_CODE_ERROR_BAD_ARG;
}
/* all packet type structures must have MqttMsgStat at top */
mms_stat = (MqttMsgStat*)packet_obj;
wait_again:
/* initialize variables */
packet_id = 0;
packet_type = MQTT_PACKET_TYPE_RESERVED;
#ifdef WOLFMQTT_MULTITHREAD
pendResp = NULL;
readLocked = 0;
#endif
waitMatchFound = 0;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Type %s (%d), ID %d",
MqttPacket_TypeDesc((MqttPacketType)wait_type),
wait_type, wait_packet_id);
#endif
switch ((int)*mms_stat)
{
case MQTT_MSG_BEGIN:
{
#ifdef WOLFMQTT_MULTITHREAD
/* Lock recv socket mutex */
rc = wm_SemLock(&client->lockRecv);
if (rc != 0) {
PRINTF("MqttClient_WaitType: recv lock error!");
return rc;
}
readLocked = 1;
#endif
/* reset the packet state */
client->packet.stat = MQTT_PK_BEGIN;
}
FALL_THROUGH;
#ifdef WOLFMQTT_V5
case MQTT_MSG_AUTH:
#endif
case MQTT_MSG_WAIT:
{
#ifdef WOLFMQTT_MULTITHREAD
/* Check to see if packet type and id have already completed */
pendResp = NULL;
rc = wm_SemLock(&client->lockClient);
if (rc == 0) {
if (MqttClient_RespList_Find(client, (MqttPacketType)wait_type,
wait_packet_id, &pendResp)) {
if (pendResp->packetDone) {
/* pending response is already done, so return */
rc = pendResp->packet_ret;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("PendResp already Done %p: Rc %d", pendResp, rc);
#endif
MqttClient_RespList_Remove(client, pendResp);
wm_SemUnlock(&client->lockClient);
wm_SemUnlock(&client->lockRecv);
return rc;
}
}
wm_SemUnlock(&client->lockClient);
}
else {
break; /* error */
}
#endif /* WOLFMQTT_MULTITHREAD */
*mms_stat = MQTT_MSG_WAIT;
/* Wait for packet */
rc = MqttPacket_Read(client, client->rx_buf, client->rx_buf_len,
timeout_ms);
/* handle failure */
if (rc <= 0) {
break;
}
/* capture length read */
client->packet.buf_len = rc;
/* Decode Packet - get type and id */
rc = MqttClient_DecodePacket(client, client->rx_buf,
client->packet.buf_len, NULL, &packet_type, NULL, &packet_id);
if (rc < 0) {
break;
}
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("Read Packet: Len %d, Type %d, ID %d",
client->packet.buf_len, packet_type, packet_id);
#endif
*mms_stat = MQTT_MSG_READ;
}
FALL_THROUGH;
case MQTT_MSG_READ:
case MQTT_MSG_READ_PAYLOAD:
{
MqttPacketType use_packet_type;
void* use_packet_obj;
#ifdef WOLFMQTT_MULTITHREAD
readLocked = 1; /* if in this state read is locked */
#endif
/* read payload state only happens for publish messages */
if (*mms_stat == MQTT_MSG_READ_PAYLOAD) {
packet_type = MQTT_PACKET_TYPE_PUBLISH;
}
/* Determine if we received data for this request */
if ((wait_type == MQTT_PACKET_TYPE_ANY ||
wait_type == packet_type ||
MqttIsPubRespPacket(packet_type) == MqttIsPubRespPacket(wait_type)) &&
(wait_packet_id == 0 || wait_packet_id == packet_id))
{
use_packet_obj = packet_obj;
waitMatchFound = 1;
}
else {
/* use generic packet object */
use_packet_obj = &client->msg;
}
use_packet_type = packet_type;
#ifdef WOLFMQTT_MULTITHREAD
/* Check to see if we have a pending response for this packet */
pendResp = NULL;
rc = wm_SemLock(&client->lockClient);
if (rc == 0) {
if (MqttClient_RespList_Find(client, packet_type, packet_id,
&pendResp)) {
/* we found packet match this incoming read packet */
pendResp->packetProcessing = 1;
use_packet_obj = pendResp->packet_obj;
use_packet_type = pendResp->packet_type;
/* req from another thread... not a match */
waitMatchFound = 0;
}
wm_SemUnlock(&client->lockClient);
}
else {
break; /* error */
}
#endif /* WOLFMQTT_MULTITHREAD */
/* Perform packet handling for publish callback and QoS */
rc = MqttClient_HandlePacket(client, use_packet_type,
use_packet_obj, timeout_ms);
#ifdef WOLFMQTT_NONBLOCK
if (rc == MQTT_CODE_CONTINUE) {
/* we have received some data, so keep the recv
mutex lock active and return */
return rc;
}
#endif
/* handle success case */
if (rc >= 0) {
rc = MQTT_CODE_SUCCESS;
}
#ifdef WOLFMQTT_MULTITHREAD
if (pendResp) {
/* Mark pending response entry done */
if (wm_SemLock(&client->lockClient) == 0) {
pendResp->packetDone = 1;
pendResp->packet_ret = rc;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("PendResp Done %p", pendResp);
#endif
pendResp = NULL;
wm_SemUnlock(&client->lockClient);
}
}
#endif /* WOLFMQTT_MULTITHREAD */
break;
}
case MQTT_MSG_WRITE:
case MQTT_MSG_WRITE_PAYLOAD:
default:
{
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Invalid state %d!", *mms_stat);
#endif
rc = MQTT_CODE_ERROR_STAT;
break;
}
} /* switch (*mms_stat) */
#ifdef WOLFMQTT_NONBLOCK
if (rc != MQTT_CODE_CONTINUE)
#endif
{
/* reset state */
*mms_stat = MQTT_MSG_BEGIN;
}
#ifdef WOLFMQTT_MULTITHREAD
if (readLocked) {
wm_SemUnlock(&client->lockRecv);
}
#endif
if (rc < 0) {
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Failure: %s (%d)",
MqttClient_ReturnCodeToString(rc), rc);
#endif
return rc;
}
if (!waitMatchFound) {
/* if we get here, then the we are still waiting for a packet */
goto wait_again;
}
return rc;
}
| null | null | 197,565
|
199649762322354424417738575772569260931
| 241
|
Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free
|
other
|
libde265
|
e83f3798dd904aa579425c53020c67e03735138d
| 1
|
de265_error slice_segment_header::read(bitreader* br, decoder_context* ctx,
bool* continueDecoding)
{
*continueDecoding = false;
reset();
// set defaults
dependent_slice_segment_flag = 0;
// read bitstream
first_slice_segment_in_pic_flag = get_bits(br,1);
if (ctx->get_RapPicFlag()) { // TODO: is this still correct ? Should we drop RapPicFlag ?
no_output_of_prior_pics_flag = get_bits(br,1);
}
slice_pic_parameter_set_id = get_uvlc(br);
if (slice_pic_parameter_set_id > DE265_MAX_PPS_SETS ||
slice_pic_parameter_set_id == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_NONEXISTING_PPS_REFERENCED, false);
return DE265_OK;
}
if (!ctx->has_pps(slice_pic_parameter_set_id)) {
ctx->add_warning(DE265_WARNING_NONEXISTING_PPS_REFERENCED, false);
return DE265_OK;
}
pps = ctx->get_shared_pps(slice_pic_parameter_set_id);
const seq_parameter_set* sps = pps->sps.get();
if (!sps->sps_read) {
ctx->add_warning(DE265_WARNING_NONEXISTING_SPS_REFERENCED, false);
*continueDecoding = false;
return DE265_OK;
}
if (!first_slice_segment_in_pic_flag) {
if (pps->dependent_slice_segments_enabled_flag) {
dependent_slice_segment_flag = get_bits(br,1);
} else {
dependent_slice_segment_flag = 0;
}
int slice_segment_address = get_bits(br, ceil_log2(sps->PicSizeInCtbsY));
if (dependent_slice_segment_flag) {
if (slice_segment_address == 0) {
*continueDecoding = false;
ctx->add_warning(DE265_WARNING_DEPENDENT_SLICE_WITH_ADDRESS_ZERO, false);
return DE265_OK;
}
if (ctx->previous_slice_header == NULL) {
return DE265_ERROR_NO_INITIAL_SLICE_HEADER;
}
*this = *ctx->previous_slice_header;
first_slice_segment_in_pic_flag = 0;
dependent_slice_segment_flag = 1;
}
this->slice_segment_address = slice_segment_address;
} else {
dependent_slice_segment_flag = 0;
slice_segment_address = 0;
}
if (slice_segment_address < 0 ||
slice_segment_address >= sps->PicSizeInCtbsY) {
ctx->add_warning(DE265_WARNING_SLICE_SEGMENT_ADDRESS_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
//printf("SLICE %d (%d)\n",slice_segment_address, sps->PicSizeInCtbsY);
if (!dependent_slice_segment_flag) {
for (int i=0; i<pps->num_extra_slice_header_bits; i++) {
//slice_reserved_undetermined_flag[i]
skip_bits(br,1);
}
slice_type = get_uvlc(br);
if (slice_type > 2 ||
slice_type == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
*continueDecoding = false;
return DE265_OK;
}
if (pps->output_flag_present_flag) {
pic_output_flag = get_bits(br,1);
}
else {
pic_output_flag = 1;
}
if (sps->separate_colour_plane_flag == 1) {
colour_plane_id = get_bits(br,2);
}
slice_pic_order_cnt_lsb = 0;
short_term_ref_pic_set_sps_flag = 0;
int NumLtPics = 0;
if (ctx->get_nal_unit_type() != NAL_UNIT_IDR_W_RADL &&
ctx->get_nal_unit_type() != NAL_UNIT_IDR_N_LP) {
slice_pic_order_cnt_lsb = get_bits(br, sps->log2_max_pic_order_cnt_lsb);
short_term_ref_pic_set_sps_flag = get_bits(br,1);
if (!short_term_ref_pic_set_sps_flag) {
read_short_term_ref_pic_set(ctx, sps,
br, &slice_ref_pic_set,
sps->num_short_term_ref_pic_sets(),
sps->ref_pic_sets,
true);
CurrRpsIdx = sps->num_short_term_ref_pic_sets();
CurrRps = slice_ref_pic_set;
}
else {
int nBits = ceil_log2(sps->num_short_term_ref_pic_sets());
if (nBits>0) short_term_ref_pic_set_idx = get_bits(br,nBits);
else short_term_ref_pic_set_idx = 0;
if (short_term_ref_pic_set_idx >= sps->num_short_term_ref_pic_sets()) {
ctx->add_warning(DE265_WARNING_SHORT_TERM_REF_PIC_SET_OUT_OF_RANGE, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
CurrRpsIdx = short_term_ref_pic_set_idx;
CurrRps = sps->ref_pic_sets[CurrRpsIdx];
}
// --- long-term MC ---
if (sps->long_term_ref_pics_present_flag) {
if (sps->num_long_term_ref_pics_sps > 0) {
num_long_term_sps = get_uvlc(br);
if (num_long_term_sps == UVLC_ERROR) {
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
else {
num_long_term_sps = 0;
}
num_long_term_pics= get_uvlc(br);
if (num_long_term_pics == UVLC_ERROR) {
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
// check maximum number of reference frames
if (num_long_term_sps +
num_long_term_pics +
CurrRps.NumNegativePics +
CurrRps.NumPositivePics
> sps->sps_max_dec_pic_buffering[sps->sps_max_sub_layers-1])
{
ctx->add_warning(DE265_WARNING_MAX_NUM_REF_PICS_EXCEEDED, false);
*continueDecoding = false;
return DE265_OK;
}
for (int i=0; i<num_long_term_sps + num_long_term_pics; i++) {
if (i < num_long_term_sps) {
int nBits = ceil_log2(sps->num_long_term_ref_pics_sps);
lt_idx_sps[i] = get_bits(br, nBits);
// check that the referenced lt-reference really exists
if (lt_idx_sps[i] >= sps->num_long_term_ref_pics_sps) {
ctx->add_warning(DE265_NON_EXISTING_LT_REFERENCE_CANDIDATE_IN_SLICE_HEADER, false);
*continueDecoding = false;
return DE265_OK;
}
// delta_poc_msb_present_flag[i] = 0; // TODO ?
ctx->PocLsbLt[i] = sps->lt_ref_pic_poc_lsb_sps[ lt_idx_sps[i] ];
ctx->UsedByCurrPicLt[i] = sps->used_by_curr_pic_lt_sps_flag[ lt_idx_sps[i] ];
}
else {
int nBits = sps->log2_max_pic_order_cnt_lsb;
poc_lsb_lt[i] = get_bits(br, nBits);
used_by_curr_pic_lt_flag[i] = get_bits(br,1);
ctx->PocLsbLt[i] = poc_lsb_lt[i];
ctx->UsedByCurrPicLt[i] = used_by_curr_pic_lt_flag[i];
}
if (ctx->UsedByCurrPicLt[i]) {
NumLtPics++;
}
delta_poc_msb_present_flag[i] = get_bits(br,1);
if (delta_poc_msb_present_flag[i]) {
delta_poc_msb_cycle_lt[i] = get_uvlc(br);
if (delta_poc_msb_cycle_lt[i]==UVLC_ERROR) {
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
else {
delta_poc_msb_cycle_lt[i] = 0;
}
if (i==0 || i==num_long_term_sps) {
ctx->DeltaPocMsbCycleLt[i] = delta_poc_msb_cycle_lt[i];
}
else {
ctx->DeltaPocMsbCycleLt[i] = (delta_poc_msb_cycle_lt[i] +
ctx->DeltaPocMsbCycleLt[i-1]);
}
}
}
else {
num_long_term_sps = 0;
num_long_term_pics= 0;
}
if (sps->sps_temporal_mvp_enabled_flag) {
slice_temporal_mvp_enabled_flag = get_bits(br,1);
}
else {
slice_temporal_mvp_enabled_flag = 0;
}
}
else {
slice_pic_order_cnt_lsb = 0;
num_long_term_sps = 0;
num_long_term_pics= 0;
}
// --- SAO ---
if (sps->sample_adaptive_offset_enabled_flag) {
slice_sao_luma_flag = get_bits(br,1);
if (sps->ChromaArrayType != CHROMA_MONO) {
slice_sao_chroma_flag = get_bits(br,1);
}
else {
slice_sao_chroma_flag = 0;
}
}
else {
slice_sao_luma_flag = 0;
slice_sao_chroma_flag = 0;
}
num_ref_idx_l0_active = 0;
num_ref_idx_l1_active = 0;
if (slice_type == SLICE_TYPE_P ||
slice_type == SLICE_TYPE_B) {
num_ref_idx_active_override_flag = get_bits(br,1);
if (num_ref_idx_active_override_flag) {
num_ref_idx_l0_active = get_uvlc(br);
if (num_ref_idx_l0_active == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
num_ref_idx_l0_active++;;
if (slice_type == SLICE_TYPE_B) {
num_ref_idx_l1_active = get_uvlc(br);
if (num_ref_idx_l1_active == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
num_ref_idx_l1_active++;
}
}
else {
num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active;
num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active;
}
if (num_ref_idx_l0_active > 16) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; }
if (num_ref_idx_l1_active > 16) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; }
NumPocTotalCurr = CurrRps.NumPocTotalCurr_shortterm_only + NumLtPics;
if (pps->lists_modification_present_flag && NumPocTotalCurr > 1) {
int nBits = ceil_log2(NumPocTotalCurr);
ref_pic_list_modification_flag_l0 = get_bits(br,1);
if (ref_pic_list_modification_flag_l0) {
for (int i=0;i<num_ref_idx_l0_active;i++) {
list_entry_l0[i] = get_bits(br, nBits);
}
}
if (slice_type == SLICE_TYPE_B) {
ref_pic_list_modification_flag_l1 = get_bits(br,1);
if (ref_pic_list_modification_flag_l1) {
for (int i=0;i<num_ref_idx_l1_active;i++) {
list_entry_l1[i] = get_bits(br, nBits);
}
}
}
else {
ref_pic_list_modification_flag_l1 = 0;
}
}
else {
ref_pic_list_modification_flag_l0 = 0;
ref_pic_list_modification_flag_l1 = 0;
}
if (slice_type == SLICE_TYPE_B) {
mvd_l1_zero_flag = get_bits(br,1);
}
if (pps->cabac_init_present_flag) {
cabac_init_flag = get_bits(br,1);
}
else {
cabac_init_flag = 0;
}
if (slice_temporal_mvp_enabled_flag) {
if (slice_type == SLICE_TYPE_B)
collocated_from_l0_flag = get_bits(br,1);
else
collocated_from_l0_flag = 1;
if (( collocated_from_l0_flag && num_ref_idx_l0_active > 1) ||
(!collocated_from_l0_flag && num_ref_idx_l1_active > 1)) {
collocated_ref_idx = get_uvlc(br);
if (collocated_ref_idx == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
else {
collocated_ref_idx = 0;
}
// check whether collocated_ref_idx points to a valid index
if (( collocated_from_l0_flag && collocated_ref_idx >= num_ref_idx_l0_active) ||
(!collocated_from_l0_flag && collocated_ref_idx >= num_ref_idx_l1_active)) {
ctx->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
if ((pps->weighted_pred_flag && slice_type == SLICE_TYPE_P) ||
(pps->weighted_bipred_flag && slice_type == SLICE_TYPE_B)) {
if (!read_pred_weight_table(br,this,ctx))
{
ctx->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
five_minus_max_num_merge_cand = get_uvlc(br);
if (five_minus_max_num_merge_cand == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
MaxNumMergeCand = 5-five_minus_max_num_merge_cand;
}
slice_qp_delta = get_svlc(br);
if (slice_qp_delta == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
//logtrace(LogSlice,"slice_qp_delta: %d\n",shdr->slice_qp_delta);
if (pps->pps_slice_chroma_qp_offsets_present_flag) {
slice_cb_qp_offset = get_svlc(br);
if (slice_cb_qp_offset == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
slice_cr_qp_offset = get_svlc(br);
if (slice_cr_qp_offset == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
else {
slice_cb_qp_offset = 0;
slice_cr_qp_offset = 0;
}
if (pps->range_extension.chroma_qp_offset_list_enabled_flag) {
cu_chroma_qp_offset_enabled_flag = get_bits(br,1);
}
if (pps->deblocking_filter_override_enabled_flag) {
deblocking_filter_override_flag = get_bits(br,1);
}
else {
deblocking_filter_override_flag = 0;
}
slice_beta_offset = pps->beta_offset;
slice_tc_offset = pps->tc_offset;
if (deblocking_filter_override_flag) {
slice_deblocking_filter_disabled_flag = get_bits(br,1);
if (!slice_deblocking_filter_disabled_flag) {
slice_beta_offset = get_svlc(br);
if (slice_beta_offset == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
slice_beta_offset *= 2;
slice_tc_offset = get_svlc(br);
if (slice_tc_offset == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
slice_tc_offset *= 2;
}
}
else {
slice_deblocking_filter_disabled_flag = pps->pic_disable_deblocking_filter_flag;
}
if (pps->pps_loop_filter_across_slices_enabled_flag &&
(slice_sao_luma_flag || slice_sao_chroma_flag ||
!slice_deblocking_filter_disabled_flag )) {
slice_loop_filter_across_slices_enabled_flag = get_bits(br,1);
}
else {
slice_loop_filter_across_slices_enabled_flag =
pps->pps_loop_filter_across_slices_enabled_flag;
}
}
if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag ) {
num_entry_point_offsets = get_uvlc(br);
if (num_entry_point_offsets == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
if (pps->entropy_coding_sync_enabled_flag) {
// check num_entry_points for valid range
int firstCTBRow = slice_segment_address / sps->PicWidthInCtbsY;
int lastCTBRow = firstCTBRow + num_entry_point_offsets;
if (lastCTBRow >= sps->PicHeightInCtbsY) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
if (pps->tiles_enabled_flag) {
if (num_entry_point_offsets > pps->num_tile_columns * pps->num_tile_rows) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
}
entry_point_offset.resize( num_entry_point_offsets );
if (num_entry_point_offsets > 0) {
offset_len = get_uvlc(br);
if (offset_len == UVLC_ERROR) {
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
offset_len++;
if (offset_len > 32) {
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
for (int i=0; i<num_entry_point_offsets; i++) {
{
entry_point_offset[i] = get_bits(br,offset_len)+1;
}
if (i>0) {
entry_point_offset[i] += entry_point_offset[i-1];
}
}
}
}
else {
num_entry_point_offsets = 0;
}
if (pps->slice_segment_header_extension_present_flag) {
slice_segment_header_extension_length = get_uvlc(br);
if (slice_segment_header_extension_length == UVLC_ERROR ||
slice_segment_header_extension_length > 1000) { // TODO: safety check against too large values
ctx->add_warning(DE265_WARNING_SLICEHEADER_INVALID, false);
return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE;
}
for (int i=0; i<slice_segment_header_extension_length; i++) {
//slice_segment_header_extension_data_byte[i]
get_bits(br,8);
}
}
compute_derived_values(pps.get());
*continueDecoding = true;
return DE265_OK;
}
| null | null | 197,574
|
48645466230561722425504217043798688139
| 524
|
fix check for valid PPS idx (#298)
|
other
|
radare2
|
fc285cecb8469f0262db0170bf6dd7c01d9b8ed5
| 1
|
static RList *oneshotall_buffer(RBin *bin, RBuffer *b) {
RList *list = r_list_newf (free);
RBinXtrData *meta = get_the_meta (bin, b);
r_list_append (list, meta);
return list;
}
| null | null | 197,579
|
140388220319988183510409398909926632593
| 6
|
Fix #20354
|
other
|
ZLMediaKit
|
7d8b212a3c3368bc2f6507cb74664fc419eb9327
| 1
|
void RtmpProtocol::handle_chunk(RtmpPacket::Ptr packet) {
auto &chunk_data = *packet;
switch (chunk_data.type_id) {
case MSG_ACK: {
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("MSG_ACK: Not enough data");
}
//auto bytePeerRecv = load_be32(&chunk_data.buffer[0]);
//TraceL << "MSG_ACK:" << bytePeerRecv;
break;
}
case MSG_SET_CHUNK: {
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("MSG_SET_CHUNK :Not enough data");
}
_chunk_size_in = load_be32(&chunk_data.buffer[0]);
TraceL << "MSG_SET_CHUNK:" << _chunk_size_in;
break;
}
case MSG_USER_CONTROL: {
//user control message
if (chunk_data.buffer.size() < 2) {
throw std::runtime_error("MSG_USER_CONTROL: Not enough data.");
}
uint16_t event_type = load_be16(&chunk_data.buffer[0]);
chunk_data.buffer.erase(0, 2);
switch (event_type) {
case CONTROL_PING_REQUEST: {
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("CONTROL_PING_REQUEST: Not enough data.");
}
uint32_t timeStamp = load_be32(&chunk_data.buffer[0]);
//TraceL << "CONTROL_PING_REQUEST:" << time_stamp;
sendUserControl(CONTROL_PING_RESPONSE, timeStamp);
break;
}
case CONTROL_PING_RESPONSE: {
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("CONTROL_PING_RESPONSE: Not enough data.");
}
//uint32_t time_stamp = load_be32(&chunk_data.buffer[0]);
//TraceL << "CONTROL_PING_RESPONSE:" << time_stamp;
break;
}
case CONTROL_STREAM_BEGIN: {
//开始播放
if (chunk_data.buffer.size() < 4) {
WarnL << "CONTROL_STREAM_BEGIN: Not enough data:" << chunk_data.buffer.size();
break;
}
uint32_t stream_index = load_be32(&chunk_data.buffer[0]);
onStreamBegin(stream_index);
TraceL << "CONTROL_STREAM_BEGIN:" << stream_index;
break;
}
case CONTROL_STREAM_EOF: {
//暂停
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("CONTROL_STREAM_EOF: Not enough data.");
}
uint32_t stream_index = load_be32(&chunk_data.buffer[0]);
onStreamEof(stream_index);
TraceL << "CONTROL_STREAM_EOF:" << stream_index;
break;
}
case CONTROL_STREAM_DRY: {
//停止播放
if (chunk_data.buffer.size() < 4) {
throw std::runtime_error("CONTROL_STREAM_DRY: Not enough data.");
}
uint32_t stream_index = load_be32(&chunk_data.buffer[0]);
onStreamDry(stream_index);
TraceL << "CONTROL_STREAM_DRY:" << stream_index;
break;
}
default: /*WarnL << "unhandled user control:" << event_type; */ break;
}
break;
}
case MSG_WIN_SIZE: {
_windows_size = load_be32(&chunk_data.buffer[0]);
TraceL << "MSG_WIN_SIZE:" << _windows_size;
break;
}
case MSG_SET_PEER_BW: {
_bandwidth = load_be32(&chunk_data.buffer[0]);
_band_limit_type = chunk_data.buffer[4];
TraceL << "MSG_SET_PEER_BW:" << _windows_size;
break;
}
case MSG_AGGREGATE: {
auto ptr = (uint8_t *) chunk_data.buffer.data();
auto ptr_tail = ptr + chunk_data.buffer.size();
uint32_t latest_ts, timestamp;
timestamp = chunk_data.time_stamp;
bool first_message = true;
while (ptr + 8 + 3 < ptr_tail) {
auto type = *ptr;
ptr += 1;
auto size = load_be24(ptr);
ptr += 3;
auto ts = load_be24(ptr);
ptr += 3;
ts |= (*ptr << 24);
ptr += 1;
ptr += 3;
//参考FFmpeg多拷贝了4个字节
size += 4;
if (ptr + size > ptr_tail) {
break;
}
if (!first_message) {
timestamp += ts - latest_ts;
}
first_message = false;
latest_ts = ts;
auto sub_packet_ptr = RtmpPacket::create();
auto &sub_packet = *sub_packet_ptr;
sub_packet.buffer.assign((char *)ptr, size);
sub_packet.type_id = type;
sub_packet.body_size = size;
sub_packet.time_stamp = timestamp;
sub_packet.stream_index = chunk_data.stream_index;
sub_packet.chunk_id = chunk_data.chunk_id;
handle_chunk(std::move(sub_packet_ptr));
ptr += size;
}
break;
}
default: onRtmpChunk(std::move(packet)); break;
}
}
| null | null | 197,592
|
35629967477238943936739294697312947509
| 143
|
修复rtmp汇报窗口太小导致循环递归的bug:#1839
|
other
|
njs
|
ad48705bf1f04b4221a5f5b07715ac48b3160d53
| 1
|
njs_function_frame_save(njs_vm_t *vm, njs_frame_t *frame, u_char *pc)
{
size_t value_count, n;
njs_value_t *start, *end, *p, **new, *value, **local;
njs_function_t *function;
njs_native_frame_t *active, *native;
*frame = *vm->active_frame;
frame->previous_active_frame = NULL;
native = &frame->native;
active = &vm->active_frame->native;
value_count = njs_function_frame_value_count(active);
function = active->function;
new = (njs_value_t **) ((u_char *) native + NJS_FRAME_SIZE);
value = (njs_value_t *) (new + value_count
+ function->u.lambda->temp);
native->arguments = value;
native->arguments_offset = value + (function->args_offset - 1);
native->local = new + njs_function_frame_args_count(active);
native->temp = new + value_count;
native->pc = pc;
start = njs_function_frame_values(active, &end);
p = native->arguments;
while (start < end) {
*p = *start++;
*new++ = p++;
}
/* Move all arguments. */
p = native->arguments;
local = native->local + function->args_offset;
for (n = 0; n < function->args_count; n++) {
if (!njs_is_valid(p)) {
njs_set_undefined(p);
}
*local++ = p++;
}
return NJS_OK;
}
| null | null | 197,593
|
59057408530075801979165560392552080100
| 51
|
Fixed frame allocation from an awaited frame.
njs_function_frame_save() is used to save the awaited frame when "await"
instruction is encountered. The saving was done as a memcpy() of
existing runtime frame.
njs_function_frame_alloc() is used to alloc a new function frame, this
function tries to use a spare preallocated memory from the previous
frame first. Previously, this function might result in "use-after-free"
when invoked from a restored frame saved with njs_function_frame_save().
Because njs_function_frame_save() left pointers to the spare memory of
the original frame which may be already free when saved frame is
restored.
The fix is to erase fields for the spare memory from the saved frame.
This closes #469 issue on Github.
|
other
|
njs
|
36f04a3178fcb6da8513cc3dbf35215c2a581b3f
| 1
|
njs_string_prototype_replace(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
u_char *r;
size_t length, search_length, ret_length, size;
int64_t pos;
njs_int_t ret;
njs_value_t *this, *search, *replace;
njs_value_t search_lvalue, replace_lvalue, replacer, retval,
arguments[3];
const u_char *p;
njs_function_t *func_replace;
njs_string_prop_t string, s, ret_string;
static const njs_value_t replace_key =
njs_wellknown_symbol(NJS_SYMBOL_REPLACE);
this = njs_argument(args, 0);
if (njs_slow_path(njs_is_null_or_undefined(this))) {
njs_type_error(vm, "cannot convert \"%s\"to object",
njs_type_string(this->type));
return NJS_ERROR;
}
search = njs_lvalue_arg(&search_lvalue, args, nargs, 1);
replace = njs_lvalue_arg(&replace_lvalue, args, nargs, 2);
if (!njs_is_null_or_undefined(search)) {
ret = njs_value_method(vm, search, njs_value_arg(&replace_key),
&replacer);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_is_defined(&replacer)) {
arguments[0] = *this;
arguments[1] = *replace;
return njs_function_call(vm, njs_function(&replacer), search,
arguments, 2, &vm->retval);
}
}
ret = njs_value_to_string(vm, this, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_value_to_string(vm, search, search);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
func_replace = njs_is_function(replace) ? njs_function(replace) : NULL;
if (func_replace == NULL) {
ret = njs_value_to_string(vm, replace, replace);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
length = njs_string_prop(&string, this);
search_length = njs_string_prop(&s, search);
pos = njs_string_index_of(&string, &s, 0);
if (pos < 0) {
vm->retval = *this;
return NJS_OK;
}
if (func_replace == NULL) {
ret = njs_string_get_substitution(vm, search, this, pos, NULL, 0, NULL,
replace, &retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
} else {
arguments[0] = *search;
njs_set_number(&arguments[1], pos);
arguments[2] = *this;
ret = njs_function_call(vm, func_replace,
njs_value_arg(&njs_value_undefined),
arguments, 3, &retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_value_to_string(vm, &retval, &retval);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
}
if (length == string.size) {
p = string.start + pos;
} else {
/* UTF-8 string. */
p = njs_string_offset(string.start, string.start + string.size, pos);
}
ret_length = njs_string_prop(&ret_string, &retval);
size = string.size + ret_string.size - s.size;
length += ret_length - search_length;
r = njs_string_alloc(vm, &vm->retval, size, length);
if (njs_slow_path(r == NULL)) {
return NJS_ERROR;
}
r = njs_cpymem(r, string.start, p - string.start);
r = njs_cpymem(r, ret_string.start, ret_string.size);
memcpy(r, p + s.size, string.size - s.size - (p - string.start));
return NJS_OK;
}
| null | null | 197,608
|
313418285314830712713325262947868404424
| 122
|
Fixed String.prototype.replace() with byte strings.
This closes #522 issue on Github.
|
other
|
curl
|
058f98dc3fe595f21dc26a5b9b1699e519ba5705
| 1
|
static void hashkey(struct connectdata *conn, char *buf,
size_t len, /* something like 128 is fine */
const char **hostp)
{
const char *hostname;
long port = conn->remote_port;
#ifndef CURL_DISABLE_PROXY
if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
hostname = conn->http_proxy.host.name;
port = conn->port;
}
else
#endif
if(conn->bits.conn_to_host)
hostname = conn->conn_to_host.name;
else
hostname = conn->host.name;
if(hostp)
/* report back which name we used */
*hostp = hostname;
/* put the number first so that the hostname gets cut off if too long */
msnprintf(buf, len, "%ld%s", port, hostname);
Curl_strntolower(buf, buf, len);
}
| null | null | 197,610
|
141759203618228167043873707310570221475
| 27
|
conncache: include the zone id in the "bundle" hashkey
Make connections to two separate IPv6 zone ids create separate
connections.
Reported-by: Harry Sintonen
Bug: https://curl.se/docs/CVE-2022-27775.html
Closes #8747
|
other
|
open-vm-tools
|
70a74758bfe0042c27f15ce590fb21a2bc54d745
| 1
|
Proto_SecurityCheckRequest(ServiceConnection *conn,
ProtoRequest *req)
{
VGAuthError err;
gboolean isSecure = ServiceNetworkIsConnectionPrivateSuperUser(conn);
switch (req->reqType) {
/*
* This comes over the public connection; alwsys let it through.
*/
case PROTO_REQUEST_SESSION_REQ:
err = VGAUTH_E_OK;
break;
/*
* No security issues with Connect or QueryMappedCerts
*/
case PROTO_REQUEST_CONN:
case PROTO_REQUEST_QUERYMAPPEDALIASES:
err = VGAUTH_E_OK;
break;
/*
* These request can come over any user connection; always let
* them through if they are coming from root or the owner of
* the certstore being changed.
*/
case PROTO_REQUEST_ADDALIAS:
case PROTO_REQUEST_REMOVEALIAS:
case PROTO_REQUEST_QUERYALIASES:
case PROTO_REQUEST_CREATETICKET:
if (isSecure) {
err = VGAUTH_E_OK;
} else {
const gchar *connOwner = conn->userName;
const gchar *reqUser = NULL;
if (req->reqType == PROTO_REQUEST_ADDALIAS) {
reqUser = req->reqData.addAlias.userName;
} else if (req->reqType == PROTO_REQUEST_REMOVEALIAS) {
reqUser = req->reqData.removeAlias.userName;
} else if (req->reqType == PROTO_REQUEST_QUERYALIASES) {
reqUser = req->reqData.queryAliases.userName;
} else if (req->reqType == PROTO_REQUEST_CREATETICKET) {
reqUser = req->reqData.createTicket.userName;
} else {
ASSERT(0);
}
if (Usercheck_CompareByName(connOwner, reqUser)) {
err = VGAUTH_E_OK;
} else {
Audit_Event(FALSE,
SU_(proto.attack, "Possible security attack! Request type %d has a "
"userName (%s) which doesn't match the pipe owner (%s)!"),
req->reqType, reqUser, connOwner);
Warning("%s: Possible security attack! Request type %d has a "
"userName (%s) which doesn't match the pipe owner (%s)!\n",
__FUNCTION__, req->reqType, reqUser, connOwner);
err = VGAUTH_E_PERMISSION_DENIED;
}
}
break;
/*
* These requests must come through a super-user owned private
* connection.
*/
case PROTO_REQUEST_VALIDATETICKET:
err = (isSecure) ? VGAUTH_E_OK : VGAUTH_E_PERMISSION_DENIED;
break;
case PROTO_REQUEST_VALIDATE_SAML_BEARER_TOKEN:
/*
* CAF wants to be able to validate as any user.
*/
err = VGAUTH_E_OK;
break;
case PROTO_REQUEST_REVOKETICKET:
/*
* We want to allow just SUPERUSER and the ticket's owner to do the
* Revoke. But returning VGAUTH_E_PERMISSION_DENIED is also a hint
* to an attacker that the ticket is valid. So rather than
* blow it off, we just ignore security at this layer,
* and let the request fall through to ServiceRevokeTicket(),
* which will turn a security issue into a no-op.
*/
err = VGAUTH_E_OK;
break;
default:
Warning("%s: Unrecognized request type '%d'\n",
__FUNCTION__, req->reqType);
err = VGAUTH_E_PERMISSION_DENIED;
break;
}
return err;
}
| null | null | 197,611
|
197467841338532217297599212611526262605
| 94
|
Properly check authorization on incoming guestOps requests.
Fix public pipe request checks. Only a SessionRequest type should
be accepted on the public pipe.
|
other
|
tensorflow
|
abcced051cb1bd8fb05046ac3b6023a7ebcc4578
| 1
|
Status TensorSliceReader::GetTensor(
const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const {
DataType type;
TensorShape shape;
TensorSlice slice;
{
mutex_lock l(mu_);
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (tss == nullptr) {
return errors::NotFound(name, " not found in checkpoint file");
}
if (tss->Slices().size() > 1) {
// TODO(sherrym): Support multi-slice checkpoints.
return errors::Unimplemented("Sliced checkpoints are not supported");
}
type = tss->type();
shape = tss->shape();
slice = tss->Slices().begin()->second.slice;
}
std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor(type, shape));
bool success = false;
#define READER_COPY(dt) \
case dt: \
success = CopySliceData(name, slice, \
t->flat<EnumToDataType<dt>::Type>().data()); \
break;
switch (type) {
READER_COPY(DT_FLOAT);
READER_COPY(DT_DOUBLE);
READER_COPY(DT_INT32);
READER_COPY(DT_UINT8);
READER_COPY(DT_INT16);
READER_COPY(DT_INT8);
READER_COPY(DT_INT64);
READER_COPY(DT_STRING);
default:
return errors::Unimplemented("Data type not supported");
}
#undef READER_COPY
if (!success) {
return errors::NotFound(name, " not found in checkpoint file");
}
std::swap(*out_tensor, t);
return Status::OK();
}
| null | null | 197,615
|
82915190253600026971804388780694146918
| 52
|
Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354
|
other
|
tensorflow
|
e84c975313e8e8e38bb2ea118196369c45c51378
| 1
|
void Compute(OpKernelContext* const context) override {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
const auto node_id_range = node_id_range_t->vec<int32>();
const int32_t node_id_first = node_id_range(0); // inclusive
const int32_t node_id_last = node_id_range(1); // exclusive
const Tensor* stats_summary_indices_t;
OP_REQUIRES_OK(context, context->input("stats_summary_indices",
&stats_summary_indices_t));
const auto stats_summary_indices = stats_summary_indices_t->matrix<int32>();
const int32_t num_sparse_entries = stats_summary_indices_t->dim_size(0);
const Tensor* stats_summary_values_t;
OP_REQUIRES_OK(context, context->input("stats_summary_values",
&stats_summary_values_t));
const auto stats_summary_values = stats_summary_values_t->vec<float>();
const Tensor* stats_summary_shape_t;
OP_REQUIRES_OK(
context, context->input("stats_summary_shape", &stats_summary_shape_t));
const auto stats_summary_shape = stats_summary_shape_t->vec<int32>();
const int32_t num_buckets = stats_summary_shape(2) - 1;
const int32_t stats_dims = stats_summary_shape(3);
const Tensor* l1_t;
OP_REQUIRES_OK(context, context->input("l1", &l1_t));
const auto l1 = l1_t->scalar<float>()();
const Tensor* l2_t;
OP_REQUIRES_OK(context, context->input("l2", &l2_t));
const auto l2 = l2_t->scalar<float>()();
const Tensor* tree_complexity_t;
OP_REQUIRES_OK(context,
context->input("tree_complexity", &tree_complexity_t));
const auto tree_complexity = tree_complexity_t->scalar<float>()();
const Tensor* min_node_weight_t;
OP_REQUIRES_OK(context,
context->input("min_node_weight", &min_node_weight_t));
const auto min_node_weight = min_node_weight_t->scalar<float>()();
std::vector<int32> output_node_ids;
std::vector<float> output_gains;
std::vector<int32> output_feature_dimensions;
std::vector<int32> output_thresholds;
std::vector<float> output_left_node_contribs;
std::vector<float> output_right_node_contribs;
std::vector<string> output_split_types;
FeatureMap f_map;
int32_t previous_node_id = -1;
for (int idx = 0; idx < num_sparse_entries; ++idx) {
int32_t node_id = stats_summary_indices(idx, 0);
if (node_id != previous_node_id) {
process_node(f_map, &output_node_ids, &output_gains,
&output_feature_dimensions, &output_thresholds,
&output_left_node_contribs, &output_right_node_contribs,
&output_split_types, previous_node_id, min_node_weight, l1,
l2, num_buckets);
f_map.clear();
}
previous_node_id = node_id;
DCHECK_LE(node_id_first, node_id);
DCHECK_LT(node_id, node_id_last);
const int32_t feature_dim = stats_summary_indices(idx, 1);
const int32_t bucket_id = stats_summary_indices(idx, 2);
const int32_t stat_dim = stats_summary_indices(idx, 3);
std::pair<FeatureMapIterator, bool> const& f_insert_result = f_map.insert(
FeatureMapIterator::value_type(feature_dim, BucketMap()));
auto& b_map = f_insert_result.first->second;
std::pair<BucketMapIterator, bool> const& b_insert_result =
b_map.insert(BucketMapIterator::value_type(
bucket_id, std::vector<float>(stats_dims)));
auto& stats = b_insert_result.first->second;
stats[stat_dim] = stats_summary_values(idx);
} // for node_id
// process the last node id
process_node(f_map, &output_node_ids, &output_gains,
&output_feature_dimensions, &output_thresholds,
&output_left_node_contribs, &output_right_node_contribs,
&output_split_types, previous_node_id, min_node_weight, l1, l2,
num_buckets);
const int num_nodes = output_node_ids.size();
// output_node_ids
Tensor* output_node_ids_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes},
&output_node_ids_t));
auto output_node_ids_vec = output_node_ids_t->vec<int32>();
// output_gains
Tensor* output_gains_t;
OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes},
&output_gains_t));
auto output_gains_vec = output_gains_t->vec<float>();
// output_feature_dimensions
Tensor* output_feature_dimension_t;
OP_REQUIRES_OK(context,
context->allocate_output("feature_dimensions", {num_nodes},
&output_feature_dimension_t));
auto output_feature_dimensions_vec =
output_feature_dimension_t->vec<int32>();
// output_thresholds
Tensor* output_thresholds_t;
OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes},
&output_thresholds_t));
auto output_thresholds_vec = output_thresholds_t->vec<int32>();
// output_left_node_contribs
Tensor* output_left_node_contribs_t;
OP_REQUIRES_OK(
context, context->allocate_output("left_node_contribs", {num_nodes, 1},
&output_left_node_contribs_t));
auto output_left_node_contribs_matrix =
output_left_node_contribs_t->matrix<float>();
// output_right_node_contribs
Tensor* output_right_node_contribs_t;
OP_REQUIRES_OK(
context, context->allocate_output("right_node_contribs", {num_nodes, 1},
&output_right_node_contribs_t));
auto output_right_node_contribs_matrix =
output_right_node_contribs_t->matrix<float>();
// split type
Tensor* output_split_types_t;
OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) {
output_node_ids_vec(i) = output_node_ids[i];
// Adjust the gains to penalize by tree complexity.
output_gains_vec(i) = output_gains[i] - tree_complexity;
output_feature_dimensions_vec(i) = output_feature_dimensions[i];
output_thresholds_vec(i) = output_thresholds[i];
// TODO(crawles): change this for multi-class.
output_left_node_contribs_matrix(i, 0) = output_left_node_contribs[i];
output_right_node_contribs_matrix(i, 0) = output_right_node_contribs[i];
output_split_types_vec(i) = output_split_types[i];
}
}
| null | null | 197,621
|
114788244564354532287660481522170220382
| 150
|
In tf.raw_ops.BoostedTreesSparseCalculateBestFeatureSplit, limit stat_dim in stats_summary_indices to under stats_dims in stats_summary_shape
PiperOrigin-RevId: 387171191
Change-Id: I83ca8a75b22aa78c037e8b98779da6cced16bfaa
|
other
|
njs
|
6a40a85ff239497c6458c7dbef18f6a2736fe992
| 1
|
njs_promise_perform_then(njs_vm_t *vm, njs_value_t *value,
njs_value_t *fulfilled, njs_value_t *rejected,
njs_promise_capability_t *capability)
{
njs_int_t ret;
njs_value_t arguments[2];
njs_promise_t *promise;
njs_function_t *function;
njs_promise_data_t *data;
njs_promise_reaction_t *fulfilled_reaction, *rejected_reaction;
if (!njs_is_function(fulfilled)) {
fulfilled = njs_value_arg(&njs_value_undefined);
}
if (!njs_is_function(rejected)) {
rejected = njs_value_arg(&njs_value_undefined);
}
promise = njs_promise(value);
data = njs_data(&promise->value);
fulfilled_reaction = njs_mp_alloc(vm->mem_pool,
sizeof(njs_promise_reaction_t));
if (njs_slow_path(fulfilled_reaction == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
fulfilled_reaction->capability = capability;
fulfilled_reaction->handler = *fulfilled;
fulfilled_reaction->type = NJS_PROMISE_FULFILL;
rejected_reaction = njs_mp_alloc(vm->mem_pool,
sizeof(njs_promise_reaction_t));
if (njs_slow_path(rejected_reaction == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
rejected_reaction->capability = capability;
rejected_reaction->handler = *rejected;
rejected_reaction->type = NJS_PROMISE_REJECTED;
if (data->state == NJS_PROMISE_PENDING) {
njs_queue_insert_tail(&data->fulfill_queue, &fulfilled_reaction->link);
njs_queue_insert_tail(&data->reject_queue, &rejected_reaction->link);
} else {
function = njs_promise_create_function(vm,
sizeof(njs_promise_context_t));
function->u.native = njs_promise_reaction_job;
if (data->state == NJS_PROMISE_REJECTED) {
njs_set_data(&arguments[0], rejected_reaction, 0);
ret = njs_promise_host_rejection_tracker(vm, promise,
NJS_PROMISE_HANDLE);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
} else {
njs_set_data(&arguments[0], fulfilled_reaction, 0);
}
arguments[1] = data->result;
ret = njs_promise_add_event(vm, function, arguments, 2);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
data->is_handled = 1;
if (capability == NULL) {
njs_vm_retval_set(vm, &njs_value_undefined);
} else {
njs_vm_retval_set(vm, &capability->promise);
}
return NJS_OK;
}
| null | null | 197,632
|
155587680052916618369463188511595578744
| 85
|
Fixed type confusion bug while resolving promises.
Previously, the internal function njs_promise_perform_then() which
implements PerformPromiseThen() expects its first argument to always be
a promise instance. This assertion might be invalid because the
functions corresponding to Promise.prototype.then() and
Promise.resolve() incorrectly verified their arguments.
Specifically, the functions recognized their first argument as promise
if it was an object which was an Promise or had Promise object in its
prototype chain. The later condition is not correct because internal
slots are not inherited according to the spec.
This closes #447 issue in Github.
|
other
|
njs
|
eafe4c7a326b163612f10861392622b5da5b1792
| 1
|
njs_object_iterate_reverse(njs_vm_t *vm, njs_iterator_args_t *args,
njs_iterator_handler_t handler)
{
double idx;
int64_t i, from, to, length;
njs_int_t ret;
njs_array_t *array, *keys;
njs_value_t *entry, *value, prop, character, string_obj;
const u_char *p, *end, *pos;
njs_string_prop_t string_prop;
njs_object_value_t *object;
value = args->value;
from = args->from;
to = args->to;
if (njs_is_array(value)) {
array = njs_array(value);
from += 1;
while (from-- > to) {
if (njs_slow_path(!array->object.fast_array)) {
goto process_object;
}
if (njs_fast_path(from < array->length
&& njs_is_valid(&array->start[from])))
{
ret = handler(vm, args, &array->start[from], from);
} else {
entry = njs_value_arg(&njs_value_invalid);
ret = njs_value_property_i64(vm, value, from, &prop);
if (njs_slow_path(ret != NJS_DECLINED)) {
if (ret == NJS_ERROR) {
return NJS_ERROR;
}
entry = ∝
}
ret = handler(vm, args, entry, from);
}
if (njs_slow_path(ret != NJS_OK)) {
if (ret == NJS_DONE) {
return NJS_DONE;
}
return NJS_ERROR;
}
}
return NJS_OK;
}
if (njs_is_string(value) || njs_is_object_string(value)) {
if (njs_is_string(value)) {
object = njs_object_value_alloc(vm, NJS_OBJ_TYPE_STRING, 0, value);
if (njs_slow_path(object == NULL)) {
return NJS_ERROR;
}
njs_set_object_value(&string_obj, object);
args->value = &string_obj;
}
else {
value = njs_object_value(value);
}
length = njs_string_prop(&string_prop, value);
end = string_prop.start + string_prop.size;
if ((size_t) length == string_prop.size) {
/* Byte or ASCII string. */
p = string_prop.start + from;
i = from + 1;
while (i-- > to) {
/* This cannot fail. */
(void) njs_string_new(vm, &character, p, 1, 1);
ret = handler(vm, args, &character, i);
if (njs_slow_path(ret != NJS_OK)) {
if (ret == NJS_DONE) {
return NJS_DONE;
}
return NJS_ERROR;
}
p--;
}
} else {
/* UTF-8 string. */
p = njs_string_offset(string_prop.start, end, from);
p = njs_utf8_next(p, end);
i = from + 1;
while (i-- > to) {
pos = njs_utf8_prev(p);
/* This cannot fail. */
(void) njs_string_new(vm, &character, pos, p - pos , 1);
ret = handler(vm, args, &character, i);
if (njs_slow_path(ret != NJS_OK)) {
if (ret == NJS_DONE) {
return NJS_DONE;
}
return NJS_ERROR;
}
p = pos;
}
}
return NJS_OK;
}
if (!njs_is_object(value)) {
return NJS_OK;
}
process_object:
if (!njs_fast_object(from - to)) {
keys = njs_array_indices(vm, value);
if (njs_slow_path(keys == NULL)) {
return NJS_ERROR;
}
i = keys->length;
while (i > 0) {
idx = njs_string_to_index(&keys->start[--i]);
if (idx < to || idx > from) {
continue;
}
ret = njs_iterator_object_handler(vm, handler, args,
&keys->start[i], idx);
if (njs_slow_path(ret != NJS_OK)) {
njs_array_destroy(vm, keys);
return ret;
}
}
njs_array_destroy(vm, keys);
return NJS_OK;
}
i = from + 1;
while (i-- > to) {
ret = njs_iterator_object_handler(vm, handler, args, NULL, i);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
return NJS_OK;
}
| null | null | 197,666
|
55404641652654760762273779055017294350
| 174
|
Fixed Array.prototype.lastIndexOf() with unicode string as "this".
Previously, when lastIndexOf() was called with unicode string as "this"
argument and a negative "fromIndex" argument null-pointer dererence
might occur because njs_string_offset() was called with invalid index
value whereas njs_string_offset() should always be called with valid
index argument.
The fix is to verify that from index is valid.
This closes #482 issue on Github.
|
other
|
linux
|
6c342ce2239c182c2428ce5a44cb32330434ae6e
| 1
|
static void mctp_serial_close(struct tty_struct *tty)
{
struct mctp_serial *dev = tty->disc_data;
int idx = dev->idx;
unregister_netdev(dev->netdev);
cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
| null | null | 197,718
|
38292062647437607669587167983842602858
| 9
|
mctp: serial: Cancel pending work from ndo_uninit handler
We cannot do the cancel_work_sync from after the unregister_netdev, as
the dev pointer is no longer valid, causing a uaf on ldisc unregister
(or device close).
Instead, do the cancel_work_sync from the ndo_uninit op, where the dev
still exists, but the queue has stopped.
Fixes: 7bd9890f3d74 ("mctp: serial: cancel tx work on ldisc close")
Reported-by: Luo Likang <[email protected]>
Tested-by: Luo Likang <[email protected]>
Signed-off-by: Jeremy Kerr <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
other
|
tensorflow
|
be7a4de6adfbd303ce08be4332554dff70362612
| 1
|
void Compute(OpKernelContext* context) override {
// Read ragged_splits inputs.
OpInputList ragged_nested_splits_in;
OP_REQUIRES_OK(context, context->input_list("rt_nested_splits",
&ragged_nested_splits_in));
const int ragged_nested_splits_len = ragged_nested_splits_in.size();
RaggedTensorVariant batched_ragged_input;
// Read ragged_values input.
batched_ragged_input.set_values(context->input(ragged_nested_splits_len));
batched_ragged_input.mutable_nested_splits()->reserve(
ragged_nested_splits_len);
for (int i = 0; i < ragged_nested_splits_len; i++) {
batched_ragged_input.append_splits(ragged_nested_splits_in[i]);
}
if (!batched_input_) {
// Encode as a Scalar Variant Tensor.
Tensor* encoded_scalar;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}),
&encoded_scalar));
encoded_scalar->scalar<Variant>()() = std::move(batched_ragged_input);
return;
}
// Unbatch the Ragged Tensor and encode the components.
std::vector<RaggedTensorVariant> unbatched_ragged_input;
auto batched_splits_top_vec =
batched_ragged_input.splits(0).vec<SPLIT_TYPE>();
int num_components = batched_splits_top_vec.size() - 1;
OP_REQUIRES(context, num_components >= 0,
errors::Internal("Invalid split argument."));
OP_REQUIRES_OK(context, UnbatchRaggedZerothDim<VALUE_TYPE, SPLIT_TYPE>(
batched_ragged_input, &unbatched_ragged_input));
// Bundle the encoded scalar Variant Tensors into a rank-1 Variant Tensor.
Tensor* encoded_vector;
int output_size = unbatched_ragged_input.size();
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({output_size}),
&encoded_vector));
auto encoded_vector_t = encoded_vector->vec<Variant>();
for (int i = 0; i < output_size; i++) {
encoded_vector_t(i) = unbatched_ragged_input[i];
}
}
| null | null | 197,719
|
209500931068648342443345704454092620756
| 45
|
Ensure non-empty rt_nested_splits in tf.raw_ops.RaggedTensorToVariant
PiperOrigin-RevId: 387664237
Change-Id: Ia1700c34b5610873d63561abc86e23b46ead93b3
|
other
|
qdecoder
|
ce7c8a7ac450a823a11b06508ef1eb7441241f81
| 1
|
size_t _q_urldecode(char *str)
{
if (str == NULL) {
return 0;
}
char *pEncPt, *pBinPt = str;
for (pEncPt = str; *pEncPt != '\0'; pEncPt++) {
switch (*pEncPt) {
case '+': {
*pBinPt++ = ' ';
break;
}
case '%': {
*pBinPt++ = _q_x2c(*(pEncPt + 1), *(pEncPt + 2));
pEncPt += 2;
break;
}
default: {
*pBinPt++ = *pEncPt;
break;
}
}
}
*pBinPt = '\0';
return (pBinPt - str);
}
| null | null | 197,741
|
188552871240633623427606115202458159863
| 28
|
security update: add check on improperly encoded input
|
other
|
tensorflow
|
c79ba87153ee343401dbe9d1954d7f79e521eb14
| 1
|
Status TransposeShapeFn(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle perm_shape = c->input(1);
const Tensor* perm = c->input_tensor(1);
DimensionHandle perm_elems = c->NumElements(perm_shape);
// If we don't have rank information on the input or value information on
// perm we can't return any shape information, otherwise we have enough
// information to at least find the rank of the output.
if (!c->RankKnown(input) && !c->ValueKnown(perm_elems) && perm == nullptr) {
c->set_output(0, c->UnknownShape());
return Status::OK();
}
// Find our value of the rank.
int64_t rank;
if (c->RankKnown(input)) {
rank = c->Rank(input);
} else if (c->ValueKnown(perm_elems)) {
rank = c->Value(perm_elems);
} else {
rank = perm->NumElements();
}
if (!c->RankKnown(input) && rank < 2) {
// A permutation array containing a single element is ambiguous. It could
// indicate either a scalar or a 1-dimensional array, both of which the
// transpose op returns unchanged.
c->set_output(0, input);
return Status::OK();
}
std::vector<DimensionHandle> dims;
dims.resize(rank);
TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input));
// Ensure that perm is a vector and has rank elements.
TF_RETURN_IF_ERROR(c->WithRank(perm_shape, 1, &perm_shape));
TF_RETURN_IF_ERROR(c->WithValue(perm_elems, rank, &perm_elems));
// If we know the rank of the input and the value of perm, we can return
// all shape information, otherwise we can only return rank information,
// but no information for the dimensions.
if (perm != nullptr) {
std::vector<int64_t> data;
if (perm->dtype() == DT_INT32) {
data = AsInt64<int32>(perm, rank);
} else {
data = AsInt64<int64_t>(perm, rank);
}
for (int32_t i = 0; i < rank; ++i) {
int64_t in_idx = data[i];
if (in_idx >= rank) {
return errors::InvalidArgument("perm dim ", in_idx,
" is out of range of input rank ", rank);
}
dims[i] = c->Dim(input, in_idx);
}
} else {
for (int i = 0; i < rank; ++i) {
dims[i] = c->UnknownDim();
}
}
c->set_output(0, c->MakeShape(dims));
return Status::OK();
}
| null | null | 197,748
|
34055993943311259251029225987542874775
| 65
|
Make Transpose's shape inference function validate that negative `perm` values are within the tensor's rank.
PiperOrigin-RevId: 403252853
Change-Id: Ia6b31b45b237312668bb31c2c3b3c7bbce2d2610
|
other
|
tensorflow
|
bb6a0383ed553c286f87ca88c207f6774d5c4a8f
| 1
|
TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
switch (params->type) {
case kTfLiteFloat32:
return GatherNd<float, IndicesT>(params, indices, output);
case kTfLiteUInt8:
return GatherNd<uint8_t, IndicesT>(params, indices, output);
case kTfLiteInt8:
return GatherNd<int8_t, IndicesT>(params, indices, output);
case kTfLiteInt16:
return GatherNd<int16_t, IndicesT>(params, indices, output);
case kTfLiteInt32:
return GatherNd<int32_t, IndicesT>(params, indices, output);
case kTfLiteInt64:
return GatherNd<int64_t, IndicesT>(params, indices, output);
case kTfLiteString:
return GatherNdString<IndicesT>(params, indices, output);
default:
context->ReportError(context,
"Params type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
}
| null | null | 197,760
|
79501722422646953902317860019376579160
| 24
|
Prevent heap OOB read in TFLite's `gather_nd.cc`.
Passing negative indices is illegal but there was a missing check so that resulted in OOB accesses.
PiperOrigin-RevId: 387208551
Change-Id: I6b7a8a62d3e7c13a16d81619e5bc23ae2cdbc7fd
|
other
|
qemu
|
f9a70e79391f6d7c2a912d785239ee8effc1922d
| 1
|
static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len)
{
int i;
uint16_t limit;
VncDisplay *vd = vs->vd;
if (data[0] > 3) {
update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE);
}
switch (data[0]) {
case VNC_MSG_CLIENT_SET_PIXEL_FORMAT:
if (len == 1)
return 20;
set_pixel_format(vs, read_u8(data, 4), read_u8(data, 5),
read_u8(data, 6), read_u8(data, 7),
read_u16(data, 8), read_u16(data, 10),
read_u16(data, 12), read_u8(data, 14),
read_u8(data, 15), read_u8(data, 16));
break;
case VNC_MSG_CLIENT_SET_ENCODINGS:
if (len == 1)
return 4;
if (len == 4) {
limit = read_u16(data, 2);
if (limit > 0)
return 4 + (limit * 4);
} else
limit = read_u16(data, 2);
for (i = 0; i < limit; i++) {
int32_t val = read_s32(data, 4 + (i * 4));
memcpy(data + 4 + (i * 4), &val, sizeof(val));
}
set_encodings(vs, (int32_t *)(data + 4), limit);
break;
case VNC_MSG_CLIENT_FRAMEBUFFER_UPDATE_REQUEST:
if (len == 1)
return 10;
framebuffer_update_request(vs,
read_u8(data, 1), read_u16(data, 2), read_u16(data, 4),
read_u16(data, 6), read_u16(data, 8));
break;
case VNC_MSG_CLIENT_KEY_EVENT:
if (len == 1)
return 8;
key_event(vs, read_u8(data, 1), read_u32(data, 4));
break;
case VNC_MSG_CLIENT_POINTER_EVENT:
if (len == 1)
return 6;
pointer_event(vs, read_u8(data, 1), read_u16(data, 2), read_u16(data, 4));
break;
case VNC_MSG_CLIENT_CUT_TEXT:
if (len == 1)
return 8;
if (len == 8) {
uint32_t dlen = read_u32(data, 4);
if (dlen > 0)
return 8 + dlen;
}
client_cut_text(vs, read_u32(data, 4), data + 8);
break;
case VNC_MSG_CLIENT_QEMU:
if (len == 1)
return 2;
switch (read_u8(data, 1)) {
case VNC_MSG_CLIENT_QEMU_EXT_KEY_EVENT:
if (len == 2)
return 12;
ext_key_event(vs, read_u16(data, 2),
read_u32(data, 4), read_u32(data, 8));
break;
case VNC_MSG_CLIENT_QEMU_AUDIO:
if (len == 2)
return 4;
switch (read_u16 (data, 2)) {
case VNC_MSG_CLIENT_QEMU_AUDIO_ENABLE:
audio_add(vs);
break;
case VNC_MSG_CLIENT_QEMU_AUDIO_DISABLE:
audio_del(vs);
break;
case VNC_MSG_CLIENT_QEMU_AUDIO_SET_FORMAT:
if (len == 4)
return 10;
switch (read_u8(data, 4)) {
case 0: vs->as.fmt = AUD_FMT_U8; break;
case 1: vs->as.fmt = AUD_FMT_S8; break;
case 2: vs->as.fmt = AUD_FMT_U16; break;
case 3: vs->as.fmt = AUD_FMT_S16; break;
case 4: vs->as.fmt = AUD_FMT_U32; break;
case 5: vs->as.fmt = AUD_FMT_S32; break;
default:
printf("Invalid audio format %d\n", read_u8(data, 4));
vnc_client_error(vs);
break;
}
vs->as.nchannels = read_u8(data, 5);
if (vs->as.nchannels != 1 && vs->as.nchannels != 2) {
printf("Invalid audio channel coount %d\n",
read_u8(data, 5));
vnc_client_error(vs);
break;
}
vs->as.freq = read_u32(data, 6);
break;
default:
printf ("Invalid audio message %d\n", read_u8(data, 4));
vnc_client_error(vs);
break;
}
break;
default:
printf("Msg: %d\n", read_u16(data, 0));
vnc_client_error(vs);
break;
}
break;
default:
printf("Msg: %d\n", data[0]);
vnc_client_error(vs);
break;
}
vnc_read_when(vs, protocol_client_msg, 1);
return 0;
}
| null | null | 197,796
|
134241989503304135961732860410446769152
| 140
|
ui/vnc: limit client_cut_text msg payload size
currently a malicious client could define a payload
size of 2^32 - 1 bytes and send up to that size of
data to the vnc server. The server would allocated
that amount of memory which could easily create an
out of memory condition.
This patch limits the payload size to 1MB max.
Please note that client_cut_text messages are currently
silently ignored.
Signed-off-by: Peter Lieven <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
|
other
|
tensorflow
|
368af875869a204b4ac552b9ddda59f6a46a56ec
| 1
|
bool TensorSliceReader::CopySliceData(const string& name,
const TensorSlice& slice, T* data) const {
std::vector<std::pair<TensorSlice, string>> details;
const TensorSliceSet* tss;
{
mutex_lock l(mu_);
tss = FindTensorSlice(name, slice, &details);
if (!tss && !all_shards_loaded_) {
VLOG(1) << "Did not find slice in preferred shard, loading all shards."
<< name << ": " << slice.DebugString();
LoadAllShards();
tss = FindTensorSlice(name, slice, &details);
}
if (!tss) {
// No such tensor
return false;
}
}
// We have the data -- copy it over.
string value;
for (const auto& x : details) {
const TensorSlice& slice_s = x.first;
const string& fname = x.second;
int idx = gtl::FindWithDefault(fname_to_index_, fname, -1);
CHECK_GE(idx, 0) << "Failed to find the index for filename " << fname;
// We read a record in the corresponding sstable
const string key = EncodeTensorNameSlice(name, slice_s);
if (!sss_[idx]->Get(key, &value)) {
VLOG(1) << "Failed to seek to the record for tensor " << name
<< ", slice " << slice_s.DebugString()
<< ": computed key = " << key;
return false;
}
SavedTensorSlices sts;
if (!ParseProtoUnlimited(&sts, value)) {
VLOG(1) << "Failed to parse the record for tensor " << name << ", slice "
<< slice_s.DebugString() << ": computed key = " << key;
return false;
}
CopyDataFromTensorSliceToTensorSlice(
tss->shape(), slice_s, slice,
checkpoint::TensorProtoData<T>(sts.data().data()), data);
}
return true;
}
| null | null | 197,801
|
125021583934003130290771039701895727492
| 45
|
Avoid buffer overflow when loading tensors with insufficient data from checkpoints.
`CopyDataFromTensorSliceToTensorSlice` does not (and cannot conveniently)
provide any bounds checking on its own, so the size is instead checked prior
to passing unvalidated data to that function.
PiperOrigin-RevId: 392971286
Change-Id: If2073b36d4d5eedd386329f56729395fd7effee1
|
other
|
curl
|
8dfc93e573ca740544a2d79ebb0ed786592c65c3
| 1
|
Curl_cookie_add(struct Curl_easy *data,
/*
* The 'data' pointer here may be NULL at times, and thus
* must only be used very carefully for things that can deal
* with data being NULL. Such as infof() and similar
*/
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
bool noexpire, /* if TRUE, skip remove_expired() */
char *lineptr, /* first character of the line */
const char *domain, /* default domain */
const char *path, /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
bool secure) /* TRUE if connection is over secure origin */
{
struct Cookie *clist;
struct Cookie *co;
struct Cookie *lastc = NULL;
struct Cookie *replace_co = NULL;
struct Cookie *replace_clist = NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
size_t myhash;
#ifdef CURL_DISABLE_VERBOSE_STRINGS
(void)data;
#endif
DEBUGASSERT(MAX_SET_COOKIE_AMOUNT <= 255); /* counter is an unsigned char */
if(data->req.setcookies >= MAX_SET_COOKIE_AMOUNT)
return NULL;
/* First, alloc and init a new struct for it */
co = calloc(1, sizeof(struct Cookie));
if(!co)
return NULL; /* bail out if we're this low on memory */
if(httpheader) {
/* This line was read off a HTTP-header */
char name[MAX_NAME];
char what[MAX_NAME];
const char *ptr;
const char *semiptr;
size_t linelength = strlen(lineptr);
if(linelength > MAX_COOKIE_LINE) {
/* discard overly long lines at once */
free(co);
return NULL;
}
semiptr = strchr(lineptr, ';'); /* first, find a semicolon */
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
ptr = lineptr;
do {
/* we have a <what>=<this> pair or a stand-alone word here */
name[0] = what[0] = 0; /* init the buffers */
if(1 <= sscanf(ptr, "%" MAX_NAME_TXT "[^;\r\n=] =%"
MAX_NAME_TXT "[^;\r\n]",
name, what)) {
/*
* Use strstore() below to properly deal with received cookie
* headers that have the same string property set more than once,
* and then we use the last one.
*/
const char *whatptr;
bool done = FALSE;
bool sep;
size_t len = strlen(what);
size_t nlen = strlen(name);
const char *endofn = &ptr[ nlen ];
/*
* Check for too long individual name or contents, or too long
* combination of name + contents. Chrome and Firefox support 4095 or
* 4096 bytes combo
*/
if(nlen >= (MAX_NAME-1) || len >= (MAX_NAME-1) ||
((nlen + len) > MAX_NAME)) {
freecookie(co);
infof(data, "oversized cookie dropped, name/val %zu + %zu bytes",
nlen, len);
return NULL;
}
/* name ends with a '=' ? */
sep = (*endofn == '=')?TRUE:FALSE;
if(nlen) {
endofn--; /* move to the last character */
if(ISBLANK(*endofn)) {
/* skip trailing spaces in name */
while(*endofn && ISBLANK(*endofn) && nlen) {
endofn--;
nlen--;
}
name[nlen] = 0; /* new end of name */
}
}
/* Strip off trailing whitespace from the 'what' */
while(len && ISBLANK(what[len-1])) {
what[len-1] = 0;
len--;
}
/* Skip leading whitespace from the 'what' */
whatptr = what;
while(*whatptr && ISBLANK(*whatptr))
whatptr++;
/*
* Check if we have a reserved prefix set before anything else, as we
* otherwise have to test for the prefix in both the cookie name and
* "the rest". Prefixes must start with '__' and end with a '-', so
* only test for names where that can possibly be true.
*/
if(nlen > 3 && name[0] == '_' && name[1] == '_') {
if(!strncmp("__Secure-", name, 9))
co->prefix |= COOKIE_PREFIX__SECURE;
else if(!strncmp("__Host-", name, 7))
co->prefix |= COOKIE_PREFIX__HOST;
}
if(!co->name) {
/* The very first name/value pair is the actual cookie name */
if(!sep) {
/* Bad name/value pair. */
badcookie = TRUE;
break;
}
co->name = strdup(name);
co->value = strdup(whatptr);
done = TRUE;
if(!co->name || !co->value) {
badcookie = TRUE;
break;
}
}
else if(!len) {
/*
* this was a "<name>=" with no content, and we must allow
* 'secure' and 'httponly' specified this weirdly
*/
done = TRUE;
/*
* secure cookies are only allowed to be set when the connection is
* using a secure protocol, or when the cookie is being set by
* reading from file
*/
if(strcasecompare("secure", name)) {
if(secure || !c->running) {
co->secure = TRUE;
}
else {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("httponly", name))
co->httponly = TRUE;
else if(sep)
/* there was a '=' so we're not done parsing this field */
done = FALSE;
}
if(done)
;
else if(strcasecompare("path", name)) {
strstore(&co->path, whatptr);
if(!co->path) {
badcookie = TRUE; /* out of memory bad */
break;
}
free(co->spath); /* if this is set again */
co->spath = sanitize_cookie_path(co->path);
if(!co->spath) {
badcookie = TRUE; /* out of memory bad */
break;
}
}
else if(strcasecompare("domain", name) && whatptr[0]) {
bool is_ip;
/*
* Now, we make sure that our host is within the given domain, or
* the given domain is not valid and thus cannot be set.
*/
if('.' == whatptr[0])
whatptr++; /* ignore preceding dot */
#ifndef USE_LIBPSL
/*
* Without PSL we don't know when the incoming cookie is set on a
* TLD or otherwise "protected" suffix. To reduce risk, we require a
* dot OR the exact host name being "localhost".
*/
if(bad_domain(whatptr))
domain = ":";
#endif
is_ip = Curl_host_is_ipnum(domain ? domain : whatptr);
if(!domain
|| (is_ip && !strcmp(whatptr, domain))
|| (!is_ip && tailmatch(whatptr, domain))) {
strstore(&co->domain, whatptr);
if(!co->domain) {
badcookie = TRUE;
break;
}
if(!is_ip)
co->tailmatch = TRUE; /* we always do that if the domain name was
given */
}
else {
/*
* We did not get a tailmatch and then the attempted set domain is
* not a domain to which the current host belongs. Mark as bad.
*/
badcookie = TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s",
whatptr);
}
}
else if(strcasecompare("version", name)) {
strstore(&co->version, whatptr);
if(!co->version) {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("max-age", name)) {
/*
* Defined in RFC2109:
*
* Optional. The Max-Age attribute defines the lifetime of the
* cookie, in seconds. The delta-seconds value is a decimal non-
* negative integer. After delta-seconds seconds elapse, the
* client should discard the cookie. A value of zero means the
* cookie should be discarded immediately.
*/
strstore(&co->maxage, whatptr);
if(!co->maxage) {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("expires", name)) {
strstore(&co->expirestr, whatptr);
if(!co->expirestr) {
badcookie = TRUE;
break;
}
}
/*
* Else, this is the second (or more) name we don't know about!
*/
}
else {
/* this is an "illegal" <what>=<this> pair */
}
if(!semiptr || !*semiptr) {
/* we already know there are no more cookies */
semiptr = NULL;
continue;
}
ptr = semiptr + 1;
while(*ptr && ISBLANK(*ptr))
ptr++;
semiptr = strchr(ptr, ';'); /* now, find the next semicolon */
if(!semiptr && *ptr)
/*
* There are no more semicolons, but there's a final name=value pair
* coming up
*/
semiptr = strchr(ptr, '\0');
} while(semiptr);
if(co->maxage) {
CURLofft offt;
offt = curlx_strtoofft((*co->maxage == '\"')?
&co->maxage[1]:&co->maxage[0], NULL, 10,
&co->expires);
if(offt == CURL_OFFT_FLOW)
/* overflow, used max value */
co->expires = CURL_OFF_T_MAX;
else if(!offt) {
if(!co->expires)
/* already expired */
co->expires = 1;
else if(CURL_OFF_T_MAX - now < co->expires)
/* would overflow */
co->expires = CURL_OFF_T_MAX;
else
co->expires += now;
}
}
else if(co->expirestr) {
/*
* Note that if the date couldn't get parsed for whatever reason, the
* cookie will be treated as a session cookie
*/
co->expires = Curl_getdate_capped(co->expirestr);
/*
* Session cookies have expires set to 0 so if we get that back from the
* date parser let's add a second to make it a non-session cookie
*/
if(co->expires == 0)
co->expires = 1;
else if(co->expires < 0)
co->expires = 0;
}
if(!badcookie && !co->domain) {
if(domain) {
/* no domain was given in the header line, set the default */
co->domain = strdup(domain);
if(!co->domain)
badcookie = TRUE;
}
}
if(!badcookie && !co->path && path) {
/*
* No path was given in the header line, set the default. Note that the
* passed-in path to this function MAY have a '?' and following part that
* MUST NOT be stored as part of the path.
*/
char *queryp = strchr(path, '?');
/*
* queryp is where the interesting part of the path ends, so now we
* want to the find the last
*/
char *endslash;
if(!queryp)
endslash = strrchr(path, '/');
else
endslash = memrchr(path, '/', (queryp - path));
if(endslash) {
size_t pathlen = (endslash-path + 1); /* include end slash */
co->path = malloc(pathlen + 1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen] = 0; /* null-terminate */
co->spath = sanitize_cookie_path(co->path);
if(!co->spath)
badcookie = TRUE; /* out of memory bad */
}
else
badcookie = TRUE;
}
}
/*
* If we didn't get a cookie name, or a bad one, the this is an illegal
* line so bail out.
*/
if(badcookie || !co->name) {
freecookie(co);
return NULL;
}
data->req.setcookies++;
}
else {
/*
* This line is NOT a HTTP header style line, we do offer support for
* reading the odd netscape cookies-file format here
*/
char *ptr;
char *firstptr;
char *tok_buf = NULL;
int fields;
/*
* IE introduced HTTP-only cookies to prevent XSS attacks. Cookies marked
* with httpOnly after the domain name are not accessible from javascripts,
* but since curl does not operate at javascript level, we include them
* anyway. In Firefox's cookie files, these lines are preceded with
* #HttpOnly_ and then everything is as usual, so we skip 10 characters of
* the line..
*/
if(strncmp(lineptr, "#HttpOnly_", 10) == 0) {
lineptr += 10;
co->httponly = TRUE;
}
if(lineptr[0]=='#') {
/* don't even try the comments */
free(co);
return NULL;
}
/* strip off the possible end-of-line characters */
ptr = strchr(lineptr, '\r');
if(ptr)
*ptr = 0; /* clear it */
ptr = strchr(lineptr, '\n');
if(ptr)
*ptr = 0; /* clear it */
firstptr = strtok_r(lineptr, "\t", &tok_buf); /* tokenize it on the TAB */
/*
* Now loop through the fields and init the struct we already have
* allocated
*/
for(ptr = firstptr, fields = 0; ptr && !badcookie;
ptr = strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceding dots */
ptr++;
co->domain = strdup(ptr);
if(!co->domain)
badcookie = TRUE;
break;
case 1:
/*
* flag: A TRUE/FALSE value indicating if all machines within a given
* domain can access the variable. Set TRUE when the cookie says
* .domain.com and to false when the domain is complete www.domain.com
*/
co->tailmatch = strcasecompare(ptr, "TRUE")?TRUE:FALSE;
break;
case 2:
/* The file format allows the path field to remain not filled in */
if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
/* only if the path doesn't look like a boolean option! */
co->path = strdup(ptr);
if(!co->path)
badcookie = TRUE;
else {
co->spath = sanitize_cookie_path(co->path);
if(!co->spath) {
badcookie = TRUE; /* out of memory bad */
}
}
break;
}
/* this doesn't look like a path, make one up! */
co->path = strdup("/");
if(!co->path)
badcookie = TRUE;
co->spath = strdup("/");
if(!co->spath)
badcookie = TRUE;
fields++; /* add a field and fall down to secure */
/* FALLTHROUGH */
case 3:
co->secure = FALSE;
if(strcasecompare(ptr, "TRUE")) {
if(secure || c->running)
co->secure = TRUE;
else
badcookie = TRUE;
}
break;
case 4:
if(curlx_strtoofft(ptr, NULL, 10, &co->expires))
badcookie = TRUE;
break;
case 5:
co->name = strdup(ptr);
if(!co->name)
badcookie = TRUE;
else {
/* For Netscape file format cookies we check prefix on the name */
if(strncasecompare("__Secure-", co->name, 9))
co->prefix |= COOKIE_PREFIX__SECURE;
else if(strncasecompare("__Host-", co->name, 7))
co->prefix |= COOKIE_PREFIX__HOST;
}
break;
case 6:
co->value = strdup(ptr);
if(!co->value)
badcookie = TRUE;
break;
}
}
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
if(!co->value)
badcookie = TRUE;
else
fields++;
}
if(!badcookie && (7 != fields))
/* we did not find the sufficient number of fields */
badcookie = TRUE;
if(badcookie) {
freecookie(co);
return NULL;
}
}
if(co->prefix & COOKIE_PREFIX__SECURE) {
/* The __Secure- prefix only requires that the cookie be set secure */
if(!co->secure) {
freecookie(co);
return NULL;
}
}
if(co->prefix & COOKIE_PREFIX__HOST) {
/*
* The __Host- prefix requires the cookie to be secure, have a "/" path
* and not have a domain set.
*/
if(co->secure && co->path && strcmp(co->path, "/") == 0 && !co->tailmatch)
;
else {
freecookie(co);
return NULL;
}
}
if(!c->running && /* read from a file */
c->newsession && /* clean session cookies */
!co->expires) { /* this is a session cookie since it doesn't expire! */
freecookie(co);
return NULL;
}
co->livecookie = c->running;
co->creationtime = ++c->lastct;
/*
* Now we have parsed the incoming line, we must now check if this supersedes
* an already existing cookie, which it may if the previous have the same
* domain and path as this.
*/
/* at first, remove expired cookies */
if(!noexpire)
remove_expired(c);
#ifdef USE_LIBPSL
/*
* Check if the domain is a Public Suffix and if yes, ignore the cookie. We
* must also check that the data handle isn't NULL since the psl code will
* dereference it.
*/
if(data && (domain && co->domain && !Curl_host_is_ipnum(co->domain))) {
const psl_ctx_t *psl = Curl_psl_use(data);
int acceptable;
if(psl) {
acceptable = psl_is_cookie_domain_acceptable(psl, domain, co->domain);
Curl_psl_release(data);
}
else
acceptable = !bad_domain(domain);
if(!acceptable) {
infof(data, "cookie '%s' dropped, domain '%s' must not "
"set cookies for '%s'", co->name, domain, co->domain);
freecookie(co);
return NULL;
}
}
#endif
/* A non-secure cookie may not overlay an existing secure cookie. */
myhash = cookiehash(co->domain);
clist = c->cookies[myhash];
while(clist) {
if(strcasecompare(clist->name, co->name)) {
/* the names are identical */
bool matching_domains = FALSE;
if(clist->domain && co->domain) {
if(strcasecompare(clist->domain, co->domain))
/* The domains are identical */
matching_domains = TRUE;
}
else if(!clist->domain && !co->domain)
matching_domains = TRUE;
if(matching_domains && /* the domains were identical */
clist->spath && co->spath && /* both have paths */
clist->secure && !co->secure && !secure) {
size_t cllen;
const char *sep;
/*
* A non-secure cookie may not overlay an existing secure cookie.
* For an existing cookie "a" with path "/login", refuse a new
* cookie "a" with for example path "/login/en", while the path
* "/loginhelper" is ok.
*/
sep = strchr(clist->spath + 1, '/');
if(sep)
cllen = sep - clist->spath;
else
cllen = strlen(clist->spath);
if(strncasecompare(clist->spath, co->spath, cllen)) {
infof(data, "cookie '%s' for domain '%s' dropped, would "
"overlay an existing cookie", co->name, co->domain);
freecookie(co);
return NULL;
}
}
}
if(!replace_co && strcasecompare(clist->name, co->name)) {
/* the names are identical */
if(clist->domain && co->domain) {
if(strcasecompare(clist->domain, co->domain) &&
(clist->tailmatch == co->tailmatch))
/* The domains are identical */
replace_old = TRUE;
}
else if(!clist->domain && !co->domain)
replace_old = TRUE;
if(replace_old) {
/* the domains were identical */
if(clist->spath && co->spath) {
if(strcasecompare(clist->spath, co->spath))
replace_old = TRUE;
else
replace_old = FALSE;
}
else if(!clist->spath && !co->spath)
replace_old = TRUE;
else
replace_old = FALSE;
}
if(replace_old && !co->livecookie && clist->livecookie) {
/*
* Both cookies matched fine, except that the already present cookie is
* "live", which means it was set from a header, while the new one was
* read from a file and thus isn't "live". "live" cookies are preferred
* so the new cookie is freed.
*/
freecookie(co);
return NULL;
}
if(replace_old) {
replace_co = co;
replace_clist = clist;
}
}
lastc = clist;
clist = clist->next;
}
if(replace_co) {
co = replace_co;
clist = replace_clist;
co->next = clist->next; /* get the next-pointer first */
/* when replacing, creationtime is kept from old */
co->creationtime = clist->creationtime;
/* then free all the old pointers */
free(clist->name);
free(clist->value);
free(clist->domain);
free(clist->path);
free(clist->spath);
free(clist->expirestr);
free(clist->version);
free(clist->maxage);
*clist = *co; /* then store all the new data */
free(co); /* free the newly allocated memory */
co = clist;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, "
"expire %" CURL_FORMAT_CURL_OFF_T,
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
lastc->next = co;
else
c->cookies[myhash] = co;
c->numcookies++; /* one more cookie in the jar */
}
/*
* Now that we've added a new cookie to the jar, update the expiration
* tracker in case it is the next one to expire.
*/
if(co->expires && (co->expires < c->next_expiration))
c->next_expiration = co->expires;
return co;
}
| null | null | 197,805
|
340011739869148140831706810976304098697
| 717
|
cookie: reject cookies with "control bytes"
Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
Reported-by: Axel Chong
Bug: https://curl.se/docs/CVE-2022-35252.html
CVE-2022-35252
Closes #9381
|
other
|
mruby
|
47068ae07a5fa3aa9a1879cdfe98a9ce0f339299
| 1
|
mrb_f_send(mrb_state *mrb, mrb_value self)
{
mrb_sym name;
mrb_value block, *regs;
mrb_method_t m;
struct RClass *c;
mrb_callinfo *ci = mrb->c->ci;
int n = ci->n;
if (ci->cci > CINFO_NONE) {
funcall:;
const mrb_value *argv;
mrb_int argc;
mrb_get_args(mrb, "n*&", &name, &argv, &argc, &block);
return mrb_funcall_with_block(mrb, self, name, argc, argv, block);
}
regs = mrb->c->ci->stack+1;
if (n == 0) {
mrb_argnum_error(mrb, 0, 1, -1);
}
else if (n == 15) {
name = mrb_obj_to_sym(mrb, RARRAY_PTR(regs[0])[0]);
}
else {
name = mrb_obj_to_sym(mrb, regs[0]);
}
c = mrb_class(mrb, self);
m = mrb_method_search_vm(mrb, &c, name);
if (MRB_METHOD_UNDEF_P(m)) { /* call method_mising */
goto funcall;
}
ci->mid = name;
ci->u.target_class = c;
/* remove first symbol from arguments */
if (n == 15) { /* variable length arguments */
regs[0] = mrb_ary_subseq(mrb, regs[0], 1, RARRAY_LEN(regs[0]) - 1);
}
else { /* n > 0 */
for (int i=0; i<n; i++) {
regs[i] = regs[i+1];
}
regs[n] = regs[n+1]; /* copy kdict or block */
if (ci->nk > 0) {
regs[n+1] = regs[n+2]; /* copy block */
}
ci->n--;
}
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
return MRB_METHOD_CFUNC(m)(mrb, self);
}
return exec_irep(mrb, self, MRB_METHOD_PROC(m));
}
| null | null | 197,808
|
162694432190174106015089185108525789681
| 64
|
vm.c: packed arguments length may be zero for `send` method.
|
other
|
gpac
|
c535bad50d5812d27ee5b22b54371bddec411514
| 1
|
static GF_Err BM_ParseGlobalQuantizer(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list)
{
GF_Node *node;
GF_Command *com;
GF_CommandField *inf;
node = gf_bifs_dec_node(codec, bs, NDT_SFWorldNode);
if (!node) return GF_NON_COMPLIANT_BITSTREAM;
/*reset global QP*/
if (codec->scenegraph->global_qp) {
gf_node_unregister(codec->scenegraph->global_qp, NULL);
}
codec->ActiveQP = NULL;
codec->scenegraph->global_qp = NULL;
if (gf_node_get_tag(node) != TAG_MPEG4_QuantizationParameter) {
gf_node_unregister(node, NULL);
return GF_NON_COMPLIANT_BITSTREAM;
}
/*register global QP*/
codec->ActiveQP = (M_QuantizationParameter *) node;
codec->ActiveQP->isLocal = 0;
codec->scenegraph->global_qp = node;
/*register TWICE: once for the command, and for the scenegraph globalQP*/
node->sgprivate->num_instances = 2;
com = gf_sg_command_new(codec->current_graph, GF_SG_GLOBAL_QUANTIZER);
inf = gf_sg_command_field_new(com);
inf->new_node = node;
inf->field_ptr = &inf->new_node;
inf->fieldType = GF_SG_VRML_SFNODE;
gf_list_add(com_list, com);
return GF_OK;
}
| null | null | 197,824
|
117674170084737235817324369429005978526
| 36
|
fixed #2194
|
other
|
jerryscript
|
f3a420b672927037beb4508d7bdd68fb25d2caf6
| 1
|
lexer_expect_object_literal_id (parser_context_t *context_p, /**< context */
uint32_t ident_opts) /**< lexer_obj_ident_opts_t option bits */
{
lexer_skip_spaces (context_p);
if (context_p->source_p >= context_p->source_end_p)
{
parser_raise_error (context_p, PARSER_ERR_PROPERTY_IDENTIFIER_EXPECTED);
}
context_p->token.keyword_type = LEXER_EOS;
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
bool create_literal_object = false;
JERRY_ASSERT ((ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER) || !(ident_opts & LEXER_OBJ_IDENT_CLASS_NO_STATIC));
#if JERRY_FUNCTION_TO_STRING
if (ident_opts & LEXER_OBJ_IDENT_SET_FUNCTION_START)
{
context_p->function_start_p = context_p->source_p;
}
#endif /* JERRY_FUNCTION_TO_STRING */
if (lexer_parse_identifier (context_p, LEXER_PARSE_NO_OPTS))
{
if (!(ident_opts & (LEXER_OBJ_IDENT_ONLY_IDENTIFIERS | LEXER_OBJ_IDENT_OBJECT_PATTERN)))
{
lexer_skip_spaces (context_p);
context_p->token.flags = (uint8_t) (context_p->token.flags | LEXER_NO_SKIP_SPACES);
if (context_p->source_p < context_p->source_end_p
#if JERRY_ESNEXT
&& context_p->source_p[0] != LIT_CHAR_COMMA && context_p->source_p[0] != LIT_CHAR_RIGHT_BRACE
&& context_p->source_p[0] != LIT_CHAR_LEFT_PAREN && context_p->source_p[0] != LIT_CHAR_SEMICOLON
&& context_p->source_p[0] != LIT_CHAR_EQUALS
#endif /* JERRY_ESNEXT */
&& context_p->source_p[0] != LIT_CHAR_COLON)
{
if (lexer_compare_literal_to_string (context_p, "get", 3))
{
context_p->token.type = LEXER_PROPERTY_GETTER;
return;
}
if (lexer_compare_literal_to_string (context_p, "set", 3))
{
context_p->token.type = LEXER_PROPERTY_SETTER;
return;
}
#if JERRY_ESNEXT
if (lexer_compare_literal_to_string (context_p, "async", 5))
{
context_p->token.type = LEXER_KEYW_ASYNC;
return;
}
if (ident_opts & LEXER_OBJ_IDENT_CLASS_NO_STATIC)
{
if (lexer_compare_literal_to_string (context_p, "static", 6))
{
context_p->token.type = LEXER_KEYW_STATIC;
}
return;
}
#endif /* JERRY_ESNEXT */
}
}
create_literal_object = true;
}
#if JERRY_ESNEXT
else if (ident_opts & LEXER_OBJ_IDENT_CLASS_PRIVATE)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_CHARACTER);
}
#endif /* JERRY_ESNEXT */
else
{
switch (context_p->source_p[0])
{
case LIT_CHAR_DOUBLE_QUOTE:
case LIT_CHAR_SINGLE_QUOTE:
{
lexer_parse_string (context_p, LEXER_STRING_NO_OPTS);
create_literal_object = true;
break;
}
#if JERRY_ESNEXT
case LIT_CHAR_LEFT_SQUARE:
{
#if JERRY_FUNCTION_TO_STRING
const uint8_t *function_start_p = context_p->function_start_p;
#endif /* JERRY_FUNCTION_TO_STRING */
lexer_consume_next_character (context_p);
lexer_next_token (context_p);
parser_parse_expression (context_p, PARSE_EXPR_NO_COMMA);
if (context_p->token.type != LEXER_RIGHT_SQUARE)
{
parser_raise_error (context_p, PARSER_ERR_RIGHT_SQUARE_EXPECTED);
}
#if JERRY_FUNCTION_TO_STRING
context_p->function_start_p = function_start_p;
#endif /* JERRY_FUNCTION_TO_STRING */
return;
}
case LIT_CHAR_ASTERISK:
{
if (ident_opts & (LEXER_OBJ_IDENT_ONLY_IDENTIFIERS | LEXER_OBJ_IDENT_OBJECT_PATTERN))
{
break;
}
context_p->token.type = LEXER_MULTIPLY;
lexer_consume_next_character (context_p);
return;
}
case LIT_CHAR_HASHMARK:
{
if (ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER)
{
context_p->token.type = LEXER_HASHMARK;
return;
}
break;
}
#endif /* JERRY_ESNEXT */
case LIT_CHAR_LEFT_BRACE:
{
if (ident_opts & (LEXER_OBJ_IDENT_CLASS_NO_STATIC | LEXER_OBJ_IDENT_CLASS_PRIVATE))
{
break;
}
context_p->token.type = LEXER_LEFT_BRACE;
lexer_consume_next_character (context_p);
return;
}
case LIT_CHAR_RIGHT_BRACE:
{
if (ident_opts & LEXER_OBJ_IDENT_ONLY_IDENTIFIERS)
{
break;
}
context_p->token.type = LEXER_RIGHT_BRACE;
lexer_consume_next_character (context_p);
return;
}
#if JERRY_ESNEXT
case LIT_CHAR_DOT:
{
if (!(context_p->source_p + 1 >= context_p->source_end_p || lit_char_is_decimal_digit (context_p->source_p[1])))
{
if ((ident_opts & ((uint32_t) ~(LEXER_OBJ_IDENT_OBJECT_PATTERN | LEXER_OBJ_IDENT_SET_FUNCTION_START)))
|| context_p->source_p + 2 >= context_p->source_end_p || context_p->source_p[1] != LIT_CHAR_DOT
|| context_p->source_p[2] != LIT_CHAR_DOT)
{
break;
}
context_p->token.type = LEXER_THREE_DOTS;
context_p->token.flags &= (uint8_t) ~LEXER_NO_SKIP_SPACES;
PARSER_PLUS_EQUAL_LC (context_p->column, 3);
context_p->source_p += 3;
return;
}
/* FALLTHRU */
}
#endif /* JERRY_ESNEXT */
default:
{
const uint8_t *char_p = context_p->source_p;
if (char_p[0] == LIT_CHAR_DOT)
{
char_p++;
}
if (char_p < context_p->source_end_p && char_p[0] >= LIT_CHAR_0 && char_p[0] <= LIT_CHAR_9)
{
lexer_parse_number (context_p);
if (!(ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER))
{
lexer_construct_number_object (context_p, false, false);
}
return;
}
break;
}
}
}
if (create_literal_object)
{
#if JERRY_ESNEXT
if (ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER)
{
return;
}
if (ident_opts & LEXER_OBJ_IDENT_CLASS_PRIVATE)
{
parser_resolve_private_identifier (context_p);
return;
}
#endif /* JERRY_ESNEXT */
lexer_construct_literal_object (context_p, &context_p->token.lit_location, LEXER_STRING_LITERAL);
return;
}
parser_raise_error (context_p, PARSER_ERR_PROPERTY_IDENTIFIER_EXPECTED);
} /* lexer_expect_object_literal_id */
| null | null | 197,825
|
18090551596235490242321914030640150439
| 221
|
Fix class static block opening brace parsing (#4942)
The next character should not be consumed after finding the static block opening brace.
This patch fixes #4916.
JerryScript-DCO-1.0-Signed-off-by: Martin Negyokru [email protected]
|
other
|
tensorflow
|
7731e8dfbe4a56773be5dc94d631611211156659
| 1
|
bool IsConstantFoldable(
const Node* n,
const std::unordered_map<string, std::vector<PartialTensorShape>>*
shape_map,
const std::function<bool(const Node*)>& consider,
int64_t max_constant_size_in_bytes,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
if (n->IsConstant()) {
return true;
}
if (MaybeReplaceShapeOp(n, shape_map, shape_replacement_map)) {
return true;
}
if (n->op_def().is_stateful()) {
return false;
}
if (consider && !consider(n)) {
return false;
}
if (shape_map != nullptr) {
// We can skip the node if an output is known to be oversized.
auto shape_it = shape_map->find(n->name());
if (shape_it != shape_map->end()) {
for (int64_t i = 0; i < shape_it->second.size(); ++i) {
const auto& out_shape = shape_it->second[i];
if (out_shape.IsFullyDefined() &&
out_shape.num_elements() * DataTypeSize(n->output_type(i)) >
max_constant_size_in_bytes) {
return false;
}
}
}
}
if (n->IsControlFlow() || n->IsSend() || n->IsRecv()) {
return false;
}
// TODO(yuanbyu): For now disable these session handle operations.
if (n->IsGetSessionHandle() || n->IsGetSessionTensor() ||
n->IsDeleteSessionTensor()) {
return false;
}
if (n->IsSource()) {
return false;
}
if (n->IsSink()) {
return false;
}
if (n->IsFakeParam()) {
return false;
}
// Since constant-folding runs on the CPU, do not attempt to constant-fold
// operators that have no CPU kernel. Also implies that we will not
// constant-fold functions.
// TODO(phawkins): allow constant-folding for functions; functions may
// be arbitrarily expensive to execute.
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), n->def())) {
return false;
}
// Do not constant fold nodes which will be allocated by ScopedAllocator.
// This is because the constant-folding graph will not contain the
// `_ScopedAllocator` node, and that is necessary to be able to run a node
// that will use this allocator.
if (n->attrs().Find(kScopedAllocatorAttrName) != nullptr) {
VLOG(2) << "Skip node [" << n->DebugString()
<< "] for constant folding due to scoped allocator";
return false;
}
return true;
}
| null | null | 197,826
|
172373573712419656450812250984956481158
| 70
|
Don't constant-fold DT_RESOURCE constants.
PiperOrigin-RevId: 391803952
Change-Id: I0ea3ec31d3e7dfda0f03b4027a237f08d00a3091
|
other
|
samba
|
d92dfb0dabf9cfccb86f2b1146d6c353af2e1435
| 1
|
static NTSTATUS ldapsrv_SearchRequest(struct ldapsrv_call *call)
{
struct ldap_SearchRequest *req = &call->request->r.SearchRequest;
struct ldap_Result *done;
struct ldapsrv_reply *done_r;
TALLOC_CTX *local_ctx;
struct ldapsrv_context *callback_ctx = NULL;
struct ldb_context *samdb = talloc_get_type(call->conn->ldb, struct ldb_context);
struct ldb_dn *basedn;
struct ldb_request *lreq;
struct ldb_control *search_control;
struct ldb_search_options_control *search_options;
struct ldb_control *extended_dn_control;
struct ldb_extended_dn_control *extended_dn_decoded = NULL;
struct ldb_control *notification_control = NULL;
enum ldb_scope scope = LDB_SCOPE_DEFAULT;
const char **attrs = NULL;
const char *scope_str, *errstr = NULL;
int result = -1;
int ldb_ret = -1;
unsigned int i;
int extended_type = 1;
DEBUG(10, ("SearchRequest"));
DEBUGADD(10, (" basedn: %s", req->basedn));
DEBUGADD(10, (" filter: %s\n", ldb_filter_from_tree(call, req->tree)));
local_ctx = talloc_new(call);
NT_STATUS_HAVE_NO_MEMORY(local_ctx);
basedn = ldb_dn_new(local_ctx, samdb, req->basedn);
NT_STATUS_HAVE_NO_MEMORY(basedn);
DEBUG(10, ("SearchRequest: basedn: [%s]\n", req->basedn));
DEBUG(10, ("SearchRequest: filter: [%s]\n", ldb_filter_from_tree(call, req->tree)));
switch (req->scope) {
case LDAP_SEARCH_SCOPE_BASE:
scope_str = "BASE";
scope = LDB_SCOPE_BASE;
break;
case LDAP_SEARCH_SCOPE_SINGLE:
scope_str = "ONE";
scope = LDB_SCOPE_ONELEVEL;
break;
case LDAP_SEARCH_SCOPE_SUB:
scope_str = "SUB";
scope = LDB_SCOPE_SUBTREE;
break;
default:
result = LDAP_PROTOCOL_ERROR;
map_ldb_error(local_ctx, LDB_ERR_PROTOCOL_ERROR, NULL,
&errstr);
errstr = talloc_asprintf(local_ctx,
"%s. Invalid scope", errstr);
goto reply;
}
DEBUG(10,("SearchRequest: scope: [%s]\n", scope_str));
if (req->num_attributes >= 1) {
attrs = talloc_array(local_ctx, const char *, req->num_attributes+1);
NT_STATUS_HAVE_NO_MEMORY(attrs);
for (i=0; i < req->num_attributes; i++) {
DEBUG(10,("SearchRequest: attrs: [%s]\n",req->attributes[i]));
attrs[i] = req->attributes[i];
}
attrs[i] = NULL;
}
DEBUG(5,("ldb_request %s dn=%s filter=%s\n",
scope_str, req->basedn, ldb_filter_from_tree(call, req->tree)));
callback_ctx = talloc_zero(local_ctx, struct ldapsrv_context);
NT_STATUS_HAVE_NO_MEMORY(callback_ctx);
callback_ctx->call = call;
callback_ctx->extended_type = extended_type;
callback_ctx->attributesonly = req->attributesonly;
ldb_ret = ldb_build_search_req_ex(&lreq, samdb, local_ctx,
basedn, scope,
req->tree, attrs,
call->request->controls,
callback_ctx,
ldap_server_search_callback,
NULL);
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
if (call->conn->global_catalog) {
search_control = ldb_request_get_control(lreq, LDB_CONTROL_SEARCH_OPTIONS_OID);
search_options = NULL;
if (search_control) {
search_options = talloc_get_type(search_control->data, struct ldb_search_options_control);
search_options->search_options |= LDB_SEARCH_OPTION_PHANTOM_ROOT;
} else {
search_options = talloc(lreq, struct ldb_search_options_control);
NT_STATUS_HAVE_NO_MEMORY(search_options);
search_options->search_options = LDB_SEARCH_OPTION_PHANTOM_ROOT;
ldb_request_add_control(lreq, LDB_CONTROL_SEARCH_OPTIONS_OID, false, search_options);
}
} else {
ldb_request_add_control(lreq, DSDB_CONTROL_NO_GLOBAL_CATALOG, false, NULL);
}
extended_dn_control = ldb_request_get_control(lreq, LDB_CONTROL_EXTENDED_DN_OID);
if (extended_dn_control) {
if (extended_dn_control->data) {
extended_dn_decoded = talloc_get_type(extended_dn_control->data, struct ldb_extended_dn_control);
extended_type = extended_dn_decoded->type;
} else {
extended_type = 0;
}
callback_ctx->extended_type = extended_type;
}
notification_control = ldb_request_get_control(lreq, LDB_CONTROL_NOTIFICATION_OID);
if (notification_control != NULL) {
const struct ldapsrv_call *pc = NULL;
size_t count = 0;
for (pc = call->conn->pending_calls; pc != NULL; pc = pc->next) {
count += 1;
}
if (count >= call->conn->limits.max_notifications) {
DEBUG(10,("SearchRequest: error MaxNotificationPerConn\n"));
result = map_ldb_error(local_ctx,
LDB_ERR_ADMIN_LIMIT_EXCEEDED,
"MaxNotificationPerConn reached",
&errstr);
goto reply;
}
/*
* For now we need to do periodic retries on our own.
* As the dsdb_notification module will return after each run.
*/
call->notification.busy = true;
}
{
const char *scheme = NULL;
switch (call->conn->referral_scheme) {
case LDAP_REFERRAL_SCHEME_LDAPS:
scheme = "ldaps";
break;
default:
scheme = "ldap";
}
ldb_ret = ldb_set_opaque(
samdb,
LDAP_REFERRAL_SCHEME_OPAQUE,
discard_const_p(char *, scheme));
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
}
{
time_t timeout = call->conn->limits.search_timeout;
if (timeout == 0
|| (req->timelimit != 0
&& req->timelimit < timeout))
{
timeout = req->timelimit;
}
ldb_set_timeout(samdb, lreq, timeout);
}
if (!call->conn->is_privileged) {
ldb_req_mark_untrusted(lreq);
}
LDB_REQ_SET_LOCATION(lreq);
ldb_ret = ldb_request(samdb, lreq);
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
ldb_ret = ldb_wait(lreq->handle, LDB_WAIT_ALL);
if (ldb_ret == LDB_SUCCESS) {
if (call->notification.busy) {
/* Move/Add it to the end */
DLIST_DEMOTE(call->conn->pending_calls, call);
call->notification.generation =
call->conn->service->notification.generation;
if (callback_ctx->count != 0) {
call->notification.generation += 1;
ldapsrv_notification_retry_setup(call->conn->service,
true);
}
talloc_free(local_ctx);
return NT_STATUS_OK;
}
}
reply:
DLIST_REMOVE(call->conn->pending_calls, call);
call->notification.busy = false;
done_r = ldapsrv_init_reply(call, LDAP_TAG_SearchResultDone);
NT_STATUS_HAVE_NO_MEMORY(done_r);
done = &done_r->msg->r.SearchResultDone;
done->dn = NULL;
done->referral = NULL;
if (result != -1) {
} else if (ldb_ret == LDB_SUCCESS) {
if (callback_ctx->controls) {
done_r->msg->controls = callback_ctx->controls;
talloc_steal(done_r->msg, callback_ctx->controls);
}
result = LDB_SUCCESS;
} else {
DEBUG(10,("SearchRequest: error\n"));
result = map_ldb_error(local_ctx, ldb_ret, ldb_errstring(samdb),
&errstr);
}
done->resultcode = result;
done->errormessage = (errstr?talloc_strdup(done_r, errstr):NULL);
talloc_free(local_ctx);
return ldapsrv_queue_reply_forced(call, done_r);
}
| null | null | 197,830
|
314918662591421001470003516678988238334
| 238
|
CVE-2021-3670 ldap_server: Remove duplicate print of LDAP search details
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694
Signed-off-by: Andrew Bartlett <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]>
(cherry picked from commit 2b3af3b560c9617a233c131376c870fce146c002)
|
other
|
radare2
|
3345147916b9bb3da225248d571cdbac690c0c4d
| 1
|
R_API bool r_io_bank_map_add_top(RIO *io, const ut32 bankid, const ut32 mapid) {
RIOBank *bank = r_io_bank_get (io, bankid);
RIOMap *map = r_io_map_get (io, mapid);
r_return_val_if_fail (io && bank && map, false);
RIOMapRef *mapref = _mapref_from_map (map);
if (!mapref) {
return false;
}
RIOSubMap *sm = r_io_submap_new (io, mapref);
if (!sm) {
free (mapref);
return false;
}
RRBNode *entry = _find_entry_submap_node (bank, sm);
if (!entry) {
// no intersection with any submap, so just insert
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
bank->last_used = NULL;
RIOSubMap *bd = (RIOSubMap *)entry->data;
if (r_io_submap_to (bd) == r_io_submap_to (sm) &&
r_io_submap_from (bd) >= r_io_submap_from (sm)) {
// _find_entry_submap_node guarantees, that there is no submap
// prior to bd in the range of sm, so instead of deleting and inserting
// we can just memcpy
memcpy (bd, sm, sizeof (RIOSubMap));
free (sm);
r_list_append (bank->maprefs, mapref);
return true;
}
if (r_io_submap_from (bd) < r_io_submap_from (sm) &&
r_io_submap_to (sm) < r_io_submap_to (bd)) {
// split bd into 2 maps => bd and bdsm
RIOSubMap *bdsm = R_NEWCOPY (RIOSubMap, bd);
if (!bdsm) {
free (sm);
free (mapref);
return false;
}
r_io_submap_set_from (bdsm, r_io_submap_to (sm) + 1);
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
// TODO: insert and check return value, before adjusting sm size
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (bdsm);
free (mapref);
return false;
}
if (!r_crbtree_insert (bank->submaps, bdsm, _find_sm_by_from_vaddr_cb, NULL)) {
r_crbtree_delete (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL);
free (sm);
free (bdsm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
// guaranteed intersection
if (r_io_submap_from (bd) < r_io_submap_from (sm)) {
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
entry = r_rbnode_next (entry);
}
ut64 smto = r_io_submap_to (sm);
while (entry && r_io_submap_to (((RIOSubMap *)entry->data)) <= smto) {
//delete all submaps that are completly included in sm
RRBNode *next = r_rbnode_next (entry);
// this can be optimized, there is no need to do search here
// XXX this is a workaround to avoid an UAF in Reproducer: iobank-crash
void *smfree = bank->submaps->free;
bank->submaps->free = NULL;
bool a = r_crbtree_delete (bank->submaps, entry->data, _find_sm_by_from_vaddr_cb, NULL);
bank->submaps->free = smfree;
if (!a) {
entry = NULL;
break;
}
entry = next;
}
if (entry && r_io_submap_from (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
bd = (RIOSubMap *)entry->data;
r_io_submap_set_from (bd, r_io_submap_to (sm) + 1);
}
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
| null | null | 197,831
|
63723229772803148724831552912896980310
| 98
|
Properly fix the UAF in r_io_bank_map_add_top ##crash
* Associated with the CVE-2022-0559
* Reported by alkyne Choi via huntr.dev
|
other
|
zsh
|
c187154f47697cdbf822c2f9d714d570ed4a0fd1
| 1
|
parsecolorchar(zattr arg, int is_fg)
{
if (bv->fm[1] == '{') {
char *ep;
bv->fm += 2; /* skip over F{ */
if ((ep = strchr(bv->fm, '}'))) {
char oc = *ep, *col, *coll;
*ep = '\0';
/* expand the contents of the argument so you can use
* %v for example */
coll = col = promptexpand(bv->fm, 0, NULL, NULL, NULL);
*ep = oc;
arg = match_colour((const char **)&coll, is_fg, 0);
free(col);
bv->fm = ep;
} else {
arg = match_colour((const char **)&bv->fm, is_fg, 0);
if (*bv->fm != '}')
bv->fm--;
}
} else
arg = match_colour(NULL, 1, arg);
return arg;
}
| null | null | 197,850
|
20338572218079855309761187886154019823
| 24
|
security/41: Don't perform PROMPT_SUBST evaluation on %F/%K arguments
Mitigates CVE-2021-45444
|
other
|
openscad
|
b81369dffc3f385257a9b1f5c271118a88671d6d
| 1
|
static std::string getComment(const std::string &fulltext, int line)
{
if (line < 1) return "";
// Locate line
unsigned int start = 0;
for (; start<fulltext.length() ; ++start) {
if (line <= 1) break;
if (fulltext[start] == '\n') line--;
}
int end = start + 1;
while (fulltext[end] != '\n') end++;
std::string comment = fulltext.substr(start, end - start);
// Locate comment
unsigned int startText = 0;
int noOfSemicolon = 0;
bool inString = false;
for (; startText < comment.length() - 1; ++startText) {
if (inString && comment.compare(startText, 2, "\\\"") == 0) {
startText++;
continue;
}
if (comment[startText] == '"') inString = !inString;
if (!inString) {
if (comment.compare(startText, 2, "//") == 0) break;
if (comment[startText] == ';' && noOfSemicolon > 0) return "";
if (comment[startText] == ';') noOfSemicolon++;
}
}
if (startText + 2 > comment.length()) return "";
std::string result = comment.substr(startText + 2);
return result;
}
| null | null | 197,891
|
313568077470000783177680974177297728787
| 38
|
Add file bounds check to comment parser
|
other
|
tensorflow
|
eb921122119a6b6e470ee98b89e65d721663179d
| 1
|
TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input,
const TfLiteTensor* positions, TfLiteTensor* output) {
tflite::GatherParams op_params;
op_params.axis = params.axis;
op_params.batch_dims = params.batch_dims;
optimized_ops::Gather(op_params, GetTensorShape(input),
GetTensorData<InputT>(input), GetTensorShape(positions),
GetTensorData<PositionsT>(positions),
GetTensorShape(output), GetTensorData<InputT>(output));
return kTfLiteOk;
}
| null | null | 197,893
|
141143556549957882519288651479279376437
| 11
|
Prevent heap OOB read in TFLite's `gather.cc`.
Passing negative indices is illegal but there was a missing check so that resulted in OOB accesses.
PiperOrigin-RevId: 387231300
Change-Id: I3111b54b2f232638d795be17efc46abe4ede6bf8
|
other
|
tensorflow
|
96f364a1ca3009f98980021c4b32be5fdcca33a1
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& gradient = ctx->input(0);
const Tensor& input = ctx->input(1);
Tensor* input_backprop = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, input.shape(), &input_backprop));
OP_REQUIRES(
ctx, input.IsSameSize(gradient),
errors::InvalidArgument("gradient and input must be the same size"));
const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_);
const Tensor& input_min_tensor = ctx->input(2);
OP_REQUIRES(ctx,
input_min_tensor.dims() == 0 || input_min_tensor.dims() == 1,
errors::InvalidArgument(
"Input min tensor must have dimension 1. Recieved ",
input_min_tensor.dims(), "."));
const Tensor& input_max_tensor = ctx->input(3);
OP_REQUIRES(ctx,
input_max_tensor.dims() == 0 || input_max_tensor.dims() == 1,
errors::InvalidArgument(
"Input max tensor must have dimension 1. Recieved ",
input_max_tensor.dims(), "."));
if (axis_ != -1) {
OP_REQUIRES(
ctx, input_min_tensor.dim_size(0) == depth,
errors::InvalidArgument("min has incorrect size, expected ", depth,
" was ", input_min_tensor.dim_size(0)));
OP_REQUIRES(
ctx, input_max_tensor.dim_size(0) == depth,
errors::InvalidArgument("max has incorrect size, expected ", depth,
" was ", input_max_tensor.dim_size(0)));
}
TensorShape min_max_shape(input_min_tensor.shape());
Tensor* input_min_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, min_max_shape, &input_min_backprop));
Tensor* input_max_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, min_max_shape, &input_max_backprop));
if (axis_ == -1) {
functor::QuantizeAndDequantizeOneScaleGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), gradient.template flat<T>(),
input.template flat<T>(), input_min_tensor.scalar<T>(),
input_max_tensor.scalar<T>(), input_backprop->template flat<T>(),
input_min_backprop->template scalar<T>(),
input_max_backprop->template scalar<T>());
} else {
functor::QuantizeAndDequantizePerChannelGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(),
gradient.template flat_inner_outer_dims<T, 3>(axis_ - 1),
input.template flat_inner_outer_dims<T, 3>(axis_ - 1),
&input_min_tensor, &input_max_tensor,
input_backprop->template flat_inner_outer_dims<T, 3>(axis_ - 1),
input_min_backprop->template flat<T>(),
input_max_backprop->template flat<T>());
}
}
| null | null | 197,898
|
310199121109835568765569218593347618755
| 61
|
Validate axis input in tf.raw_ops.QuantizeAndDequantizeV4Grad
PiperOrigin-RevId: 388291385
Change-Id: I3bab68dc61d935afa96c0da021a7b722c6dc8dc8
|
other
|
u-boot
|
2ac0baab4aff1a0b45067d0b62f00c15f4e86856
| 1
|
int sqfs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
{
struct squashfs_super_block *sblk = ctxt.sblk;
struct squashfs_dir_stream *dirs;
struct squashfs_lreg_inode *lreg;
struct squashfs_base_inode *base;
struct squashfs_reg_inode *reg;
int i_number, offset = 0, ret;
struct fs_dirent *dent;
unsigned char *ipos;
dirs = (struct squashfs_dir_stream *)fs_dirs;
if (!dirs->size) {
*dentp = NULL;
return -SQFS_STOP_READDIR;
}
dent = &dirs->dentp;
if (!dirs->entry_count) {
if (dirs->size > SQFS_DIR_HEADER_SIZE) {
dirs->size -= SQFS_DIR_HEADER_SIZE;
} else {
*dentp = NULL;
dirs->size = 0;
return -SQFS_STOP_READDIR;
}
if (dirs->size > SQFS_EMPTY_FILE_SIZE) {
/* Read follow-up (emitted) dir. header */
memcpy(dirs->dir_header, dirs->table,
SQFS_DIR_HEADER_SIZE);
dirs->entry_count = dirs->dir_header->count + 1;
ret = sqfs_read_entry(&dirs->entry, dirs->table +
SQFS_DIR_HEADER_SIZE);
if (ret)
return -SQFS_STOP_READDIR;
dirs->table += SQFS_DIR_HEADER_SIZE;
}
} else {
ret = sqfs_read_entry(&dirs->entry, dirs->table);
if (ret)
return -SQFS_STOP_READDIR;
}
i_number = dirs->dir_header->inode_number + dirs->entry->inode_offset;
ipos = sqfs_find_inode(dirs->inode_table, i_number, sblk->inodes,
sblk->block_size);
base = (struct squashfs_base_inode *)ipos;
/* Set entry type and size */
switch (dirs->entry->type) {
case SQFS_DIR_TYPE:
case SQFS_LDIR_TYPE:
dent->type = FS_DT_DIR;
break;
case SQFS_REG_TYPE:
case SQFS_LREG_TYPE:
/*
* Entries do not differentiate extended from regular types, so
* it needs to be verified manually.
*/
if (get_unaligned_le16(&base->inode_type) == SQFS_LREG_TYPE) {
lreg = (struct squashfs_lreg_inode *)ipos;
dent->size = get_unaligned_le64(&lreg->file_size);
} else {
reg = (struct squashfs_reg_inode *)ipos;
dent->size = get_unaligned_le32(®->file_size);
}
dent->type = FS_DT_REG;
break;
case SQFS_BLKDEV_TYPE:
case SQFS_CHRDEV_TYPE:
case SQFS_LBLKDEV_TYPE:
case SQFS_LCHRDEV_TYPE:
case SQFS_FIFO_TYPE:
case SQFS_SOCKET_TYPE:
case SQFS_LFIFO_TYPE:
case SQFS_LSOCKET_TYPE:
dent->type = SQFS_MISC_ENTRY_TYPE;
break;
case SQFS_SYMLINK_TYPE:
case SQFS_LSYMLINK_TYPE:
dent->type = FS_DT_LNK;
break;
default:
return -SQFS_STOP_READDIR;
}
/* Set entry name */
strncpy(dent->name, dirs->entry->name, dirs->entry->name_size + 1);
dent->name[dirs->entry->name_size + 1] = '\0';
offset = dirs->entry->name_size + 1 + SQFS_ENTRY_BASE_LENGTH;
dirs->entry_count--;
/* Decrement size to be read */
if (dirs->size > offset)
dirs->size -= offset;
else
dirs->size = 0;
/* Keep a reference to the current entry before incrementing it */
dirs->table += offset;
*dentp = dent;
return 0;
}
| null | null | 197,899
|
185658142793571267133875338779495413570
| 112
|
fs/squashfs: sqfs_read: Prevent arbitrary code execution
Following Jincheng's report, an out-of-band write leading to arbitrary
code execution is possible because on one side the squashfs logic
accepts directory names up to 65535 bytes (u16), while U-Boot fs logic
accepts directory names up to 255 bytes long.
Prevent such an exploit from happening by capping directory name sizes
to 255. Use a define for this purpose so that developers can link the
limitation to its source and eventually kill it some day by dynamically
allocating this array (if ever desired).
Link: https://lore.kernel.org/all/CALO=DHFB+yBoXxVr5KcsK0iFdg+e7ywko4-e+72kjbcS8JBfPw@mail.gmail.com
Reported-by: Jincheng Wang <[email protected]>
Signed-off-by: Miquel Raynal <[email protected]>
Tested-by: Jincheng Wang <[email protected]>
|
other
|
cortx-s3server
|
dd6bfbc4b84f14f898598922ca9efd5aaa645c5d
| 1
|
int mempool_destroy(MemoryPoolHandle *handle) {
struct mempool *pool = NULL;
struct memory_pool_element *pool_item;
char *log_msg_fmt = "mempool(%p): free(%p) called for buffer size(%zu)";
char log_msg[200];
if (handle == NULL) {
return S3_MEMPOOL_INVALID_ARG;
}
pool = (struct mempool *)*handle;
if (pool == NULL) {
return S3_MEMPOOL_INVALID_ARG;
}
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_lock(&pool->lock);
}
if (*handle == NULL) {
return S3_MEMPOOL_INVALID_ARG;
}
/* reset the handle */
*handle = NULL;
/* Free the items in free list */
pool_item = pool->free_list;
while (pool_item != NULL) {
pool->free_list = pool_item->next;
/* Log message about free()'ed item */
if (pool->log_callback_func) {
snprintf(log_msg, sizeof(log_msg), log_msg_fmt, (void *)pool,
(void *)pool_item, pool->mempool_item_size);
pool->log_callback_func(MEMPOOL_LOG_DEBUG, log_msg);
}
free(pool_item);
#if 0
/* Need this if below asserts are there */
pool->total_bufs_allocated_by_pool--;
pool->free_bufs_in_pool--;
#endif
pool_item = pool->free_list;
}
pool->free_list = NULL;
/* TODO: libevhtp/libevent seems to hold some references and not release back
* to pool. Bug will be logged for this to investigate.
*/
/* Assert if there are leaks */
/*
assert(pool->total_bufs_allocated_by_pool == 0);
assert(pool->number_of_bufs_shared == 0);
assert(pool->free_bufs_in_pool == 0);
*/
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_unlock(&pool->lock);
pthread_mutex_destroy(&pool->lock);
}
free(pool);
pool = NULL;
return 0;
}
| null | null | 197,912
|
135921271694027953193959070993632037428
| 64
|
avoid the unrelesed lock after the method returns (#1041)
Update s3_memory_pool.c. Avoid the unreleased lock pool->lock after the method returns.
Co-authored-by: nileshgovande <[email protected]>
|
other
|
crun
|
1aeeed2e4fdeffb4875c0d0b439915894594c8c6
| 1
|
crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err)
{
int first_arg = 0, ret = 0;
libcrun_context_t crun_context = {
0,
};
cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL;
struct libcrun_container_exec_options_s exec_opts;
memset (&exec_opts, 0, sizeof (exec_opts));
exec_opts.struct_size = sizeof (exec_opts);
crun_context.preserve_fds = 0;
crun_context.listen_fds = 0;
argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options);
crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1);
ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err);
if (UNLIKELY (ret < 0))
return ret;
crun_context.detach = exec_options.detach;
crun_context.console_socket = exec_options.console_socket;
crun_context.pid_file = exec_options.pid_file;
crun_context.preserve_fds = exec_options.preserve_fds;
if (getenv ("LISTEN_FDS"))
{
crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10);
crun_context.preserve_fds += crun_context.listen_fds;
}
if (exec_options.process)
exec_opts.path = exec_options.process;
else
{
process = xmalloc0 (sizeof (*process));
int i;
process->args_len = argc;
process->args = xmalloc0 ((argc + 1) * sizeof (*process->args));
for (i = 0; i < argc - first_arg; i++)
process->args[i] = xstrdup (argv[first_arg + i + 1]);
process->args[i] = NULL;
if (exec_options.cwd)
process->cwd = exec_options.cwd;
process->terminal = exec_options.tty;
process->env = exec_options.env;
process->env_len = exec_options.env_size;
process->user = make_oci_process_user (exec_options.user);
if (exec_options.process_label != NULL)
process->selinux_label = exec_options.process_label;
if (exec_options.apparmor != NULL)
process->apparmor_profile = exec_options.apparmor;
if (exec_options.cap_size > 0)
{
runtime_spec_schema_config_schema_process_capabilities *capabilities
= xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities));
capabilities->effective = exec_options.cap;
capabilities->effective_len = exec_options.cap_size;
capabilities->inheritable = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->inheritable_len = exec_options.cap_size;
capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->bounding_len = exec_options.cap_size;
capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->ambient_len = exec_options.cap_size;
capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->permitted_len = exec_options.cap_size;
process->capabilities = capabilities;
}
// noNewPriviledges will remain `false` if basespec has `false` unless specified
// Default is always `true` in generated basespec config
if (exec_options.no_new_privs)
process->no_new_privileges = 1;
exec_opts.process = process;
}
exec_opts.cgroup = exec_options.cgroup;
return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err);
}
| null | null | 197,973
|
67555112620788823184421673030526843273
| 93
|
exec: --cap do not set inheritable capabilities
Closes: CVE-2022-27650
Signed-off-by: Giuseppe Scrivano <[email protected]>
|
other
|
jerryscript
|
3ad76f932c8d2e3b9ba2d95e64848698ec7d7290
| 1
|
vm_loop (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
{
const ecma_compiled_code_t *bytecode_header_p = frame_ctx_p->shared_p->bytecode_header_p;
const uint8_t *byte_code_p = frame_ctx_p->byte_code_p;
ecma_value_t *literal_start_p = frame_ctx_p->literal_start_p;
ecma_value_t *stack_top_p;
uint16_t encoding_limit;
uint16_t encoding_delta;
uint16_t register_end;
uint16_t ident_end;
uint16_t const_literal_end;
int32_t branch_offset = 0;
uint8_t branch_offset_length = 0;
ecma_value_t left_value;
ecma_value_t right_value;
ecma_value_t result = ECMA_VALUE_EMPTY;
bool is_strict = ((bytecode_header_p->status_flags & CBC_CODE_FLAGS_STRICT_MODE) != 0);
/* Prepare for byte code execution. */
if (!(bytecode_header_p->status_flags & CBC_CODE_FLAGS_FULL_LITERAL_ENCODING))
{
encoding_limit = CBC_SMALL_LITERAL_ENCODING_LIMIT;
encoding_delta = CBC_SMALL_LITERAL_ENCODING_DELTA;
}
else
{
encoding_limit = CBC_FULL_LITERAL_ENCODING_LIMIT;
encoding_delta = CBC_FULL_LITERAL_ENCODING_DELTA;
}
if (bytecode_header_p->status_flags & CBC_CODE_FLAGS_UINT16_ARGUMENTS)
{
cbc_uint16_arguments_t *args_p = (cbc_uint16_arguments_t *) (bytecode_header_p);
register_end = args_p->register_end;
ident_end = args_p->ident_end;
const_literal_end = args_p->const_literal_end;
}
else
{
cbc_uint8_arguments_t *args_p = (cbc_uint8_arguments_t *) (bytecode_header_p);
register_end = args_p->register_end;
ident_end = args_p->ident_end;
const_literal_end = args_p->const_literal_end;
}
stack_top_p = frame_ctx_p->stack_top_p;
/* Outer loop for exception handling. */
while (true)
{
/* Internal loop for byte code execution. */
while (true)
{
const uint8_t *byte_code_start_p = byte_code_p;
uint8_t opcode = *byte_code_p++;
uint32_t opcode_data = opcode;
if (opcode == CBC_EXT_OPCODE)
{
opcode = *byte_code_p++;
opcode_data = (uint32_t) ((CBC_END + 1) + opcode);
}
opcode_data = vm_decode_table[opcode_data];
left_value = ECMA_VALUE_UNDEFINED;
right_value = ECMA_VALUE_UNDEFINED;
uint32_t operands = VM_OC_GET_ARGS_INDEX (opcode_data);
if (operands >= VM_OC_GET_LITERAL)
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
READ_LITERAL (literal_index, left_value);
if (operands != VM_OC_GET_LITERAL)
{
switch (operands)
{
case VM_OC_GET_LITERAL_LITERAL:
{
uint16_t second_literal_index;
READ_LITERAL_INDEX (second_literal_index);
READ_LITERAL (second_literal_index, right_value);
break;
}
case VM_OC_GET_STACK_LITERAL:
{
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
right_value = left_value;
left_value = *(--stack_top_p);
break;
}
default:
{
JERRY_ASSERT (operands == VM_OC_GET_THIS_LITERAL);
right_value = left_value;
left_value = ecma_copy_value (frame_ctx_p->this_binding);
break;
}
}
}
}
else if (operands >= VM_OC_GET_STACK)
{
JERRY_ASSERT (operands == VM_OC_GET_STACK
|| operands == VM_OC_GET_STACK_STACK);
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
left_value = *(--stack_top_p);
if (operands == VM_OC_GET_STACK_STACK)
{
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
right_value = left_value;
left_value = *(--stack_top_p);
}
}
else if (operands == VM_OC_GET_BRANCH)
{
branch_offset_length = CBC_BRANCH_OFFSET_LENGTH (opcode);
JERRY_ASSERT (branch_offset_length >= 1 && branch_offset_length <= 3);
branch_offset = *(byte_code_p++);
if (JERRY_UNLIKELY (branch_offset_length != 1))
{
branch_offset <<= 8;
branch_offset |= *(byte_code_p++);
if (JERRY_UNLIKELY (branch_offset_length == 3))
{
branch_offset <<= 8;
branch_offset |= *(byte_code_p++);
}
}
if (opcode_data & VM_OC_BACKWARD_BRANCH)
{
#if JERRY_VM_EXEC_STOP
if (JERRY_CONTEXT (vm_exec_stop_cb) != NULL
&& --JERRY_CONTEXT (vm_exec_stop_counter) == 0)
{
result = JERRY_CONTEXT (vm_exec_stop_cb) (JERRY_CONTEXT (vm_exec_stop_user_p));
if (ecma_is_value_undefined (result))
{
JERRY_CONTEXT (vm_exec_stop_counter) = JERRY_CONTEXT (vm_exec_stop_frequency);
}
else
{
JERRY_CONTEXT (vm_exec_stop_counter) = 1;
if (ecma_is_value_error_reference (result))
{
ecma_raise_error_from_error_reference (result);
}
else
{
jcontext_raise_exception (result);
}
JERRY_ASSERT (jcontext_has_pending_exception ());
jcontext_set_abort_flag (true);
result = ECMA_VALUE_ERROR;
goto error;
}
}
#endif /* JERRY_VM_EXEC_STOP */
branch_offset = -branch_offset;
}
}
switch (VM_OC_GROUP_GET_INDEX (opcode_data))
{
case VM_OC_POP:
{
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
ecma_free_value (*(--stack_top_p));
continue;
}
case VM_OC_POP_BLOCK:
{
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, 0));
VM_GET_REGISTERS (frame_ctx_p)[0] = *(--stack_top_p);
continue;
}
case VM_OC_PUSH:
{
*stack_top_p++ = left_value;
continue;
}
case VM_OC_PUSH_TWO:
{
*stack_top_p++ = left_value;
*stack_top_p++ = right_value;
continue;
}
case VM_OC_PUSH_THREE:
{
uint16_t literal_index;
*stack_top_p++ = left_value;
left_value = ECMA_VALUE_UNDEFINED;
READ_LITERAL_INDEX (literal_index);
READ_LITERAL (literal_index, left_value);
*stack_top_p++ = right_value;
*stack_top_p++ = left_value;
continue;
}
case VM_OC_PUSH_UNDEFINED:
{
*stack_top_p++ = ECMA_VALUE_UNDEFINED;
continue;
}
case VM_OC_PUSH_TRUE:
{
*stack_top_p++ = ECMA_VALUE_TRUE;
continue;
}
case VM_OC_PUSH_FALSE:
{
*stack_top_p++ = ECMA_VALUE_FALSE;
continue;
}
case VM_OC_PUSH_NULL:
{
*stack_top_p++ = ECMA_VALUE_NULL;
continue;
}
case VM_OC_PUSH_THIS:
{
*stack_top_p++ = ecma_copy_value (frame_ctx_p->this_binding);
continue;
}
case VM_OC_PUSH_0:
{
*stack_top_p++ = ecma_make_integer_value (0);
continue;
}
case VM_OC_PUSH_POS_BYTE:
{
ecma_integer_value_t number = *byte_code_p++;
*stack_top_p++ = ecma_make_integer_value (number + 1);
continue;
}
case VM_OC_PUSH_NEG_BYTE:
{
ecma_integer_value_t number = *byte_code_p++;
*stack_top_p++ = ecma_make_integer_value (-(number + 1));
continue;
}
case VM_OC_PUSH_LIT_0:
{
stack_top_p[0] = left_value;
stack_top_p[1] = ecma_make_integer_value (0);
stack_top_p += 2;
continue;
}
case VM_OC_PUSH_LIT_POS_BYTE:
{
ecma_integer_value_t number = *byte_code_p++;
stack_top_p[0] = left_value;
stack_top_p[1] = ecma_make_integer_value (number + 1);
stack_top_p += 2;
continue;
}
case VM_OC_PUSH_LIT_NEG_BYTE:
{
ecma_integer_value_t number = *byte_code_p++;
stack_top_p[0] = left_value;
stack_top_p[1] = ecma_make_integer_value (-(number + 1));
stack_top_p += 2;
continue;
}
case VM_OC_PUSH_OBJECT:
{
ecma_object_t *obj_p = ecma_create_object (ecma_builtin_get (ECMA_BUILTIN_ID_OBJECT_PROTOTYPE),
0,
ECMA_OBJECT_TYPE_GENERAL);
*stack_top_p++ = ecma_make_object_value (obj_p);
continue;
}
case VM_OC_PUSH_NAMED_FUNC_EXPR:
{
ecma_object_t *func_p = ecma_get_object_from_value (left_value);
JERRY_ASSERT (ecma_get_object_type (func_p) == ECMA_OBJECT_TYPE_FUNCTION);
ecma_extended_object_t *ext_func_p = (ecma_extended_object_t *) func_p;
JERRY_ASSERT (frame_ctx_p->lex_env_p ==
ECMA_GET_NON_NULL_POINTER_FROM_POINTER_TAG (ecma_object_t, ext_func_p->u.function.scope_cp));
ecma_object_t *name_lex_env = ecma_create_decl_lex_env (frame_ctx_p->lex_env_p);
ecma_op_create_immutable_binding (name_lex_env, ecma_get_string_from_value (right_value), left_value);
ECMA_SET_NON_NULL_POINTER_TAG (ext_func_p->u.function.scope_cp, name_lex_env, 0);
ecma_free_value (right_value);
ecma_deref_object (name_lex_env);
*stack_top_p++ = left_value;
continue;
}
case VM_OC_CREATE_BINDING:
{
#if !JERRY_ESNEXT
JERRY_ASSERT (opcode == CBC_CREATE_VAR);
#endif /* !JERRY_ESNEXT */
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
JERRY_ASSERT (ecma_get_lex_env_type (frame_ctx_p->lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
JERRY_ASSERT (ecma_find_named_property (frame_ctx_p->lex_env_p, name_p) == NULL);
uint8_t prop_attributes = ECMA_PROPERTY_FLAG_WRITABLE;
#if JERRY_ESNEXT
if (opcode == CBC_CREATE_LET)
{
prop_attributes = ECMA_PROPERTY_ENUMERABLE_WRITABLE;
}
else if (opcode == CBC_CREATE_CONST)
{
prop_attributes = ECMA_PROPERTY_FLAG_ENUMERABLE;
}
ecma_property_value_t *property_value_p;
property_value_p = ecma_create_named_data_property (frame_ctx_p->lex_env_p, name_p, prop_attributes, NULL);
if (opcode != CBC_CREATE_VAR)
{
property_value_p->value = ECMA_VALUE_UNINITIALIZED;
}
#else /* !JERRY_ESNEXT */
ecma_create_named_data_property (frame_ctx_p->lex_env_p, name_p, prop_attributes, NULL);
#endif /* JERRY_ESNEXT */
continue;
}
case VM_OC_VAR_EVAL:
{
uint32_t literal_index;
ecma_value_t lit_value = ECMA_VALUE_UNDEFINED;
if (opcode == CBC_CREATE_VAR_FUNC_EVAL)
{
uint32_t value_index;
READ_LITERAL_INDEX (value_index);
JERRY_ASSERT (value_index >= const_literal_end);
lit_value = vm_construct_literal_object (frame_ctx_p,
literal_start_p[value_index]);
}
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index >= register_end);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
while (lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
{
#if JERRY_ESNEXT && !(defined JERRY_NDEBUG)
if (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE)
{
ecma_property_t *property_p = ecma_find_named_property (lex_env_p, name_p);
JERRY_ASSERT (property_p == NULL || !(*property_p & ECMA_PROPERTY_FLAG_ENUMERABLE));
}
#endif /* JERRY_ESNEXT && !JERRY_NDEBUG */
JERRY_ASSERT (lex_env_p->u2.outer_reference_cp != JMEM_CP_NULL);
lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
}
#if JERRY_ESNEXT && !(defined JERRY_NDEBUG)
if (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE)
{
ecma_property_t *property_p = ecma_find_named_property (lex_env_p, name_p);
JERRY_ASSERT (property_p == NULL || !(*property_p & ECMA_PROPERTY_FLAG_ENUMERABLE));
}
#endif /* JERRY_ESNEXT && !JERRY_NDEBUG */
/* 'Variable declaration' */
result = ecma_op_has_binding (lex_env_p, name_p);
#if JERRY_BUILTIN_PROXY
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
#endif /* JERRY_BUILTIN_PROXY */
ecma_property_t *prop_p = NULL;
if (ecma_is_value_false (result))
{
bool is_configurable = (frame_ctx_p->status_flags & VM_FRAME_CTX_DIRECT_EVAL) != 0;
prop_p = ecma_op_create_mutable_binding (lex_env_p, name_p, is_configurable);
if (JERRY_UNLIKELY (prop_p == ECMA_PROPERTY_POINTER_ERROR))
{
result = ECMA_VALUE_ERROR;
goto error;
}
}
if (lit_value != ECMA_VALUE_UNDEFINED)
{
JERRY_ASSERT (ecma_is_value_object (lit_value));
if (prop_p != NULL)
{
JERRY_ASSERT (ecma_is_value_undefined (ECMA_PROPERTY_VALUE_PTR (prop_p)->value));
JERRY_ASSERT (ecma_is_property_writable (*prop_p));
ECMA_PROPERTY_VALUE_PTR (prop_p)->value = lit_value;
ecma_free_object (lit_value);
}
else
{
result = ecma_op_put_value_lex_env_base (lex_env_p, name_p, is_strict, lit_value);
ecma_free_object (lit_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
}
}
continue;
}
#if JERRY_ESNEXT
case VM_OC_EXT_VAR_EVAL:
{
uint32_t literal_index;
ecma_value_t lit_value = ECMA_VALUE_UNDEFINED;
JERRY_ASSERT (byte_code_start_p[0] == CBC_EXT_OPCODE);
if (opcode == CBC_EXT_CREATE_VAR_FUNC_EVAL)
{
uint32_t value_index;
READ_LITERAL_INDEX (value_index);
JERRY_ASSERT (value_index >= const_literal_end);
lit_value = vm_construct_literal_object (frame_ctx_p,
literal_start_p[value_index]);
}
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index >= register_end);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
ecma_object_t *prev_lex_env_p = NULL;
while (lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
{
#if !(defined JERRY_NDEBUG)
if (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE)
{
ecma_property_t *property_p = ecma_find_named_property (lex_env_p, name_p);
JERRY_ASSERT (property_p == NULL || !(*property_p & ECMA_PROPERTY_FLAG_ENUMERABLE));
}
#endif /* !JERRY_NDEBUG */
JERRY_ASSERT (lex_env_p->u2.outer_reference_cp != JMEM_CP_NULL);
prev_lex_env_p = lex_env_p;
lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
}
JERRY_ASSERT (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
JERRY_ASSERT (prev_lex_env_p != NULL
&& ecma_get_lex_env_type (prev_lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
ecma_property_t *property_p = ecma_find_named_property (prev_lex_env_p, name_p);
ecma_property_value_t *property_value_p;
if (property_p == NULL)
{
property_value_p = ecma_create_named_data_property (prev_lex_env_p,
name_p,
ECMA_PROPERTY_CONFIGURABLE_WRITABLE,
NULL);
if (lit_value == ECMA_VALUE_UNDEFINED)
{
continue;
}
}
else
{
if (lit_value == ECMA_VALUE_UNDEFINED)
{
continue;
}
property_value_p = ECMA_PROPERTY_VALUE_PTR (property_p);
ecma_free_value_if_not_object (property_value_p->value);
}
property_value_p->value = lit_value;
ecma_deref_object (ecma_get_object_from_value (lit_value));
continue;
}
#endif /* JERRY_ESNEXT */
case VM_OC_CREATE_ARGUMENTS:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_HAS_ARG_LIST);
result = ecma_op_create_arguments_object ((vm_frame_ctx_shared_args_t *) (frame_ctx_p->shared_p),
frame_ctx_p->lex_env_p);
if (literal_index < register_end)
{
JERRY_ASSERT (VM_GET_REGISTER (frame_ctx_p, literal_index) == ECMA_VALUE_UNDEFINED);
VM_GET_REGISTER (frame_ctx_p, literal_index) = result;
continue;
}
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
JERRY_ASSERT (ecma_find_named_property (frame_ctx_p->lex_env_p, name_p) == NULL);
uint8_t prop_attributes = ECMA_PROPERTY_FLAG_WRITABLE;
ecma_property_value_t *property_value_p;
property_value_p = ecma_create_named_data_property (frame_ctx_p->lex_env_p, name_p, prop_attributes, NULL);
property_value_p->value = result;
ecma_deref_object (ecma_get_object_from_value (result));
continue;
}
#if JERRY_SNAPSHOT_EXEC
case VM_OC_SET_BYTECODE_PTR:
{
memcpy (&byte_code_p, byte_code_p++, sizeof (uintptr_t));
frame_ctx_p->byte_code_start_p = byte_code_p;
continue;
}
#endif /* JERRY_SNAPSHOT_EXEC */
case VM_OC_INIT_ARG_OR_FUNC:
{
uint32_t literal_index, value_index;
ecma_value_t lit_value;
bool release = false;
READ_LITERAL_INDEX (value_index);
if (value_index < register_end)
{
/* Take (not copy) the reference. */
lit_value = ecma_copy_value_if_not_object (VM_GET_REGISTER (frame_ctx_p, value_index));
}
else
{
lit_value = vm_construct_literal_object (frame_ctx_p,
literal_start_p[value_index]);
release = true;
}
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (value_index != literal_index);
JERRY_ASSERT (value_index >= register_end || literal_index >= register_end);
if (literal_index < register_end)
{
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, literal_index));
JERRY_ASSERT (release);
VM_GET_REGISTER (frame_ctx_p, literal_index) = lit_value;
continue;
}
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
JERRY_ASSERT (ecma_get_lex_env_type (frame_ctx_p->lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
JERRY_ASSERT (ecma_find_named_property (frame_ctx_p->lex_env_p, name_p) == NULL);
ecma_property_value_t *property_value_p;
property_value_p = ecma_create_named_data_property (frame_ctx_p->lex_env_p,
name_p,
ECMA_PROPERTY_FLAG_WRITABLE,
NULL);
JERRY_ASSERT (property_value_p->value == ECMA_VALUE_UNDEFINED);
property_value_p->value = lit_value;
if (release)
{
ecma_deref_object (ecma_get_object_from_value (lit_value));
}
continue;
}
#if JERRY_ESNEXT
case VM_OC_CHECK_VAR:
{
JERRY_ASSERT (CBC_FUNCTION_GET_TYPE (frame_ctx_p->shared_p->bytecode_header_p->status_flags)
== CBC_FUNCTION_SCRIPT);
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
if ((frame_ctx_p->lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK) == 0)
{
continue;
}
ecma_string_t *const literal_name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_property_t *const binding_p = ecma_find_named_property (frame_ctx_p->lex_env_p, literal_name_p);
if (binding_p != NULL)
{
result = ecma_raise_syntax_error (ECMA_ERR_MSG (ecma_error_local_variable_is_redeclared));
goto error;
}
continue;
}
case VM_OC_CHECK_LET:
{
JERRY_ASSERT (CBC_FUNCTION_GET_TYPE (frame_ctx_p->shared_p->bytecode_header_p->status_flags)
== CBC_FUNCTION_SCRIPT);
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
ecma_string_t *literal_name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
if (lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
{
result = opfunc_lexical_scope_has_restricted_binding (frame_ctx_p, literal_name_p);
if (!ecma_is_value_false (result))
{
if (ecma_is_value_true (result))
{
result = ecma_raise_syntax_error (ECMA_ERR_MSG (ecma_error_local_variable_is_redeclared));
}
JERRY_ASSERT (ECMA_IS_VALUE_ERROR (result));
goto error;
}
continue;
}
result = ecma_op_has_binding (lex_env_p, literal_name_p);
#if JERRY_BUILTIN_PROXY
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
#endif /* JERRY_BUILTIN_PROXY */
if (ecma_is_value_true (result))
{
result = ecma_raise_syntax_error (ECMA_ERR_MSG (ecma_error_local_variable_is_redeclared));
goto error;
}
continue;
}
case VM_OC_ASSIGN_LET_CONST:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index >= register_end);
JERRY_ASSERT (ecma_get_lex_env_type (frame_ctx_p->lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE
|| (ecma_get_lex_env_type (frame_ctx_p->lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_CLASS
&& (frame_ctx_p->lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_LEXICAL_ENV_HAS_DATA)));
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_property_t *property_p = ecma_find_named_property (frame_ctx_p->lex_env_p, name_p);
JERRY_ASSERT (property_p != NULL
&& ECMA_PROPERTY_IS_RAW_DATA (*property_p)
&& (*property_p & ECMA_PROPERTY_FLAG_DATA));
JERRY_ASSERT (ECMA_PROPERTY_VALUE_PTR (property_p)->value == ECMA_VALUE_UNINITIALIZED);
ECMA_PROPERTY_VALUE_PTR (property_p)->value = left_value;
if (ecma_is_value_object (left_value))
{
ecma_deref_object (ecma_get_object_from_value (left_value));
}
continue;
}
case VM_OC_INIT_BINDING:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index >= register_end);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
JERRY_ASSERT (ecma_get_lex_env_type (frame_ctx_p->lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
JERRY_ASSERT (ecma_find_named_property (frame_ctx_p->lex_env_p, name_p) == NULL);
uint8_t prop_attributes = ECMA_PROPERTY_FLAG_WRITABLE;
if (opcode == CBC_INIT_LET)
{
prop_attributes = ECMA_PROPERTY_ENUMERABLE_WRITABLE;
}
else if (opcode == CBC_INIT_CONST)
{
prop_attributes = ECMA_PROPERTY_FLAG_ENUMERABLE;
}
ecma_property_value_t *property_value_p;
property_value_p = ecma_create_named_data_property (frame_ctx_p->lex_env_p,
name_p,
prop_attributes,
NULL);
JERRY_ASSERT (property_value_p->value == ECMA_VALUE_UNDEFINED);
ecma_value_t value = *(--stack_top_p);
property_value_p->value = value;
ecma_deref_if_object (value);
continue;
}
case VM_OC_THROW_CONST_ERROR:
{
result = ecma_raise_type_error (ECMA_ERR_MSG ("Constant bindings cannot be reassigned"));
goto error;
}
case VM_OC_COPY_TO_GLOBAL:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
while (lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
{
#ifndef JERRY_NDEBUG
if (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE)
{
ecma_property_t *property_p = ecma_find_named_property (lex_env_p, name_p);
JERRY_ASSERT (property_p == NULL || !(*property_p & ECMA_PROPERTY_FLAG_ENUMERABLE));
}
#endif /* !JERRY_NDEBUG */
JERRY_ASSERT (lex_env_p->u2.outer_reference_cp != JMEM_CP_NULL);
lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
}
if (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE)
{
ecma_property_t *property_p = ecma_find_named_property (lex_env_p, name_p);
ecma_property_value_t *prop_value_p;
if (property_p == NULL)
{
prop_value_p = ecma_create_named_data_property (lex_env_p,
name_p,
ECMA_PROPERTY_FLAG_WRITABLE,
NULL);
}
else
{
#ifndef JERRY_NDEBUG
JERRY_ASSERT (!(*property_p & ECMA_PROPERTY_FLAG_ENUMERABLE));
#endif /* !JERRY_NDEBUG */
prop_value_p = ECMA_PROPERTY_VALUE_PTR (property_p);
}
ecma_named_data_property_assign_value (lex_env_p, prop_value_p, left_value);
}
else
{
result = ecma_op_set_mutable_binding (lex_env_p, name_p, left_value, is_strict);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
}
goto free_left_value;
}
case VM_OC_COPY_FROM_ARG:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index >= register_end);
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
ecma_object_t *arg_lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
JERRY_ASSERT ((lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
&& ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
JERRY_ASSERT (arg_lex_env_p != NULL
&& !(arg_lex_env_p->type_flags_refs & ECMA_OBJECT_FLAG_BLOCK)
&& ecma_get_lex_env_type (arg_lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE);
ecma_property_value_t *property_value_p;
property_value_p = ecma_create_named_data_property (lex_env_p,
name_p,
ECMA_PROPERTY_FLAG_WRITABLE,
NULL);
ecma_property_t *property_p = ecma_find_named_property (arg_lex_env_p, name_p);
JERRY_ASSERT (property_p != NULL);
ecma_property_value_t *arg_prop_value_p = ECMA_PROPERTY_VALUE_PTR (property_p);
property_value_p->value = ecma_copy_value_if_not_object (arg_prop_value_p->value);
continue;
}
case VM_OC_CLONE_CONTEXT:
{
JERRY_ASSERT (byte_code_start_p[0] == CBC_EXT_OPCODE);
bool copy_values = (byte_code_start_p[1] == CBC_EXT_CLONE_FULL_CONTEXT);
frame_ctx_p->lex_env_p = ecma_clone_decl_lexical_environment (frame_ctx_p->lex_env_p, copy_values);
continue;
}
case VM_OC_SET__PROTO__:
{
result = ecma_builtin_object_object_set_proto (stack_top_p[-1], left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
goto free_left_value;
}
case VM_OC_PUSH_STATIC_FIELD_FUNC:
{
JERRY_ASSERT (byte_code_start_p[0] == CBC_EXT_OPCODE
&& (byte_code_start_p[1] == CBC_EXT_PUSH_STATIC_FIELD_FUNC
|| byte_code_start_p[1] == CBC_EXT_PUSH_STATIC_COMPUTED_FIELD_FUNC));
bool push_computed = (byte_code_start_p[1] == CBC_EXT_PUSH_STATIC_COMPUTED_FIELD_FUNC);
ecma_value_t value = stack_top_p[-1];
if (!push_computed)
{
stack_top_p++;
}
memmove (stack_top_p - 3, stack_top_p - 4, 3 * sizeof (ecma_value_t));
stack_top_p[-4] = left_value;
if (!push_computed)
{
continue;
}
left_value = value;
/* FALLTHRU */
}
case VM_OC_ADD_COMPUTED_FIELD:
{
JERRY_ASSERT (byte_code_start_p[0] == CBC_EXT_OPCODE
&& (byte_code_start_p[1] == CBC_EXT_PUSH_STATIC_COMPUTED_FIELD_FUNC
|| byte_code_start_p[1] == CBC_EXT_ADD_COMPUTED_FIELD
|| byte_code_start_p[1] == CBC_EXT_ADD_STATIC_COMPUTED_FIELD));
int index = (byte_code_start_p[1] == CBC_EXT_ADD_COMPUTED_FIELD) ? -2 : -4;
result = opfunc_add_computed_field (stack_top_p[index], left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
goto free_left_value;
}
case VM_OC_COPY_DATA_PROPERTIES:
{
left_value = *(--stack_top_p);
if (ecma_is_value_undefined (left_value) || ecma_is_value_null (left_value))
{
continue;
}
result = opfunc_copy_data_properties (stack_top_p[-1], left_value, ECMA_VALUE_UNDEFINED);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
goto free_left_value;
}
case VM_OC_SET_COMPUTED_PROPERTY:
{
/* Swap values. */
left_value ^= right_value;
right_value ^= left_value;
left_value ^= right_value;
/* FALLTHRU */
}
#endif /* JERRY_ESNEXT */
case VM_OC_SET_PROPERTY:
{
JERRY_STATIC_ASSERT (VM_OC_NON_STATIC_FLAG == VM_OC_BACKWARD_BRANCH,
vm_oc_non_static_flag_must_be_equal_to_vm_oc_backward_branch);
JERRY_ASSERT ((opcode_data >> VM_OC_NON_STATIC_SHIFT) <= 0x1);
ecma_string_t *prop_name_p = ecma_op_to_property_key (right_value);
if (JERRY_UNLIKELY (prop_name_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
#if JERRY_ESNEXT
if (JERRY_UNLIKELY (ecma_compare_ecma_string_to_magic_id (prop_name_p, LIT_MAGIC_STRING_PROTOTYPE))
&& !(opcode_data & VM_OC_NON_STATIC_FLAG))
{
result = ecma_raise_type_error (ECMA_ERR_MSG (ecma_error_class_is_non_configurable));
goto error;
}
const int index = (int) (opcode_data >> VM_OC_NON_STATIC_SHIFT) - 2;
#else /* !JERRY_ESNEXT */
const int index = -1;
#endif /* JERRY_ESNEXT */
ecma_object_t *object_p = ecma_get_object_from_value (stack_top_p[index]);
opfunc_set_data_property (object_p, prop_name_p, left_value);
ecma_deref_ecma_string (prop_name_p);
goto free_both_values;
}
case VM_OC_SET_GETTER:
case VM_OC_SET_SETTER:
{
JERRY_ASSERT ((opcode_data >> VM_OC_NON_STATIC_SHIFT) <= 0x1);
ecma_string_t *prop_name_p = ecma_op_to_property_key (left_value);
if (JERRY_UNLIKELY (prop_name_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
#if JERRY_ESNEXT
if (JERRY_UNLIKELY (ecma_compare_ecma_string_to_magic_id (prop_name_p, LIT_MAGIC_STRING_PROTOTYPE))
&& !(opcode_data & VM_OC_NON_STATIC_FLAG))
{
result = ecma_raise_type_error (ECMA_ERR_MSG (ecma_error_class_is_non_configurable));
goto error;
}
const int index = (int) (opcode_data >> VM_OC_NON_STATIC_SHIFT) - 2;
#else /* !JERRY_ESNEXT */
const int index = -1;
#endif /* JERRY_ESNEXT */
opfunc_set_accessor (VM_OC_GROUP_GET_INDEX (opcode_data) == VM_OC_SET_GETTER,
stack_top_p[index],
prop_name_p,
right_value);
ecma_deref_ecma_string (prop_name_p);
goto free_both_values;
}
case VM_OC_PUSH_ARRAY:
{
/* Note: this operation cannot throw an exception */
*stack_top_p++ = ecma_make_object_value (ecma_op_new_array_object (0));
continue;
}
#if JERRY_ESNEXT
case VM_OC_LOCAL_EVAL:
{
ECMA_CLEAR_LOCAL_PARSE_OPTS ();
uint8_t parse_opts = *byte_code_p++;
ECMA_SET_LOCAL_PARSE_OPTS (parse_opts);
continue;
}
case VM_OC_SUPER_CALL:
{
uint8_t arguments_list_len = *byte_code_p++;
if (opcode >= CBC_EXT_SPREAD_SUPER_CALL)
{
stack_top_p -= arguments_list_len;
ecma_collection_t *arguments_p = opfunc_spread_arguments (stack_top_p, arguments_list_len);
if (JERRY_UNLIKELY (arguments_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
stack_top_p++;
ECMA_SET_INTERNAL_VALUE_POINTER (stack_top_p[-1], arguments_p);
}
else
{
stack_top_p -= arguments_list_len;
}
frame_ctx_p->call_operation = VM_EXEC_SUPER_CALL;
frame_ctx_p->byte_code_p = byte_code_start_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_PUSH_CLASS_ENVIRONMENT:
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
opfunc_push_class_environment (frame_ctx_p, &stack_top_p, literal_start_p[literal_index]);
continue;
}
case VM_OC_PUSH_IMPLICIT_CTOR:
{
*stack_top_p++ = opfunc_create_implicit_class_constructor (opcode,
frame_ctx_p->shared_p->bytecode_header_p);
continue;
}
case VM_OC_INIT_CLASS:
{
result = opfunc_init_class (frame_ctx_p, stack_top_p);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
continue;
}
case VM_OC_FINALIZE_CLASS:
{
JERRY_ASSERT (opcode == CBC_EXT_FINALIZE_NAMED_CLASS || opcode == CBC_EXT_FINALIZE_ANONYMOUS_CLASS);
if (opcode == CBC_EXT_FINALIZE_NAMED_CLASS)
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
left_value = literal_start_p[literal_index];
}
opfunc_finalize_class (frame_ctx_p, &stack_top_p, left_value);
continue;
}
case VM_OC_SET_FIELD_INIT:
{
ecma_string_t *property_name_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_CLASS_FIELD_INIT);
ecma_object_t *object_p = ecma_get_object_from_value (stack_top_p[-2]);
ecma_property_value_t *property_value_p = ecma_create_named_data_property (object_p,
property_name_p,
ECMA_PROPERTY_FIXED,
NULL);
property_value_p->value = left_value;
property_name_p = ecma_get_internal_string (LIT_INTERNAL_MAGIC_STRING_CLASS_FIELD_COMPUTED);
ecma_property_t *property_p = ecma_find_named_property (object_p, property_name_p);
if (property_p != NULL)
{
property_value_p = ECMA_PROPERTY_VALUE_PTR (property_p);
ecma_value_t *compact_collection_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_value_t,
property_value_p->value);
compact_collection_p = ecma_compact_collection_shrink (compact_collection_p);
ECMA_SET_INTERNAL_VALUE_POINTER (property_value_p->value, compact_collection_p);
}
goto free_left_value;
}
case VM_OC_RUN_FIELD_INIT:
{
JERRY_ASSERT (frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_NON_ARROW_FUNC);
result = opfunc_init_class_fields (frame_ctx_p->shared_p->function_object_p, frame_ctx_p->this_binding);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
continue;
}
case VM_OC_RUN_STATIC_FIELD_INIT:
{
left_value = stack_top_p[-2];
stack_top_p[-2] = stack_top_p[-1];
stack_top_p--;
result = opfunc_init_static_class_fields (left_value, stack_top_p[-1]);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
goto free_left_value;
}
case VM_OC_SET_NEXT_COMPUTED_FIELD:
{
ecma_integer_value_t next_index = ecma_get_integer_from_value (stack_top_p[-2]) + 1;
stack_top_p[-2] = ecma_make_integer_value (next_index);
stack_top_p++;
JERRY_ASSERT (frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_HAS_CLASS_FIELDS);
ecma_value_t *computed_class_fields_p = VM_GET_COMPUTED_CLASS_FIELDS (frame_ctx_p);
JERRY_ASSERT ((ecma_value_t) next_index < ECMA_COMPACT_COLLECTION_GET_SIZE (computed_class_fields_p));
result = stack_top_p[-2];
stack_top_p[-1] = ecma_copy_value (computed_class_fields_p[next_index]);
stack_top_p[-2] = ecma_copy_value (frame_ctx_p->this_binding);
break;
}
case VM_OC_PUSH_SUPER_CONSTRUCTOR:
{
result = ecma_op_function_get_super_constructor (vm_get_class_function (frame_ctx_p));
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
continue;
}
case VM_OC_RESOLVE_LEXICAL_THIS:
{
result = ecma_op_get_this_binding (frame_ctx_p->lex_env_p);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
continue;
}
case VM_OC_OBJECT_LITERAL_HOME_ENV:
{
if (opcode == CBC_EXT_PUSH_OBJECT_SUPER_ENVIRONMENT)
{
ecma_value_t obj_value = stack_top_p[-1];
ecma_object_t *obj_env_p = ecma_create_lex_env_class (frame_ctx_p->lex_env_p, 0);
ECMA_SET_NON_NULL_POINTER (obj_env_p->u1.bound_object_cp, ecma_get_object_from_value (obj_value));
stack_top_p[-1] = ecma_make_object_value (obj_env_p);
*stack_top_p++ = obj_value;
}
else
{
JERRY_ASSERT (opcode == CBC_EXT_POP_OBJECT_SUPER_ENVIRONMENT);
ecma_deref_object (ecma_get_object_from_value (stack_top_p[-2]));
stack_top_p[-2] = stack_top_p[-1];
stack_top_p--;
}
continue;
}
case VM_OC_SET_HOME_OBJECT:
{
int offset = opcode == CBC_EXT_OBJECT_LITERAL_SET_HOME_OBJECT_COMPUTED ? -1 : 0;
opfunc_set_home_object (ecma_get_object_from_value (stack_top_p[-1]),
ecma_get_object_from_value (stack_top_p[-3 + offset]));
continue;
}
case VM_OC_SUPER_REFERENCE:
{
result = opfunc_form_super_reference (&stack_top_p, frame_ctx_p, left_value, opcode);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
goto free_left_value;
}
case VM_OC_SET_FUNCTION_NAME:
{
char *prefix_p = NULL;
lit_utf8_size_t prefix_size = 0;
if (opcode != CBC_EXT_SET_FUNCTION_NAME)
{
ecma_value_t prop_name_value;
if (opcode == CBC_EXT_SET_CLASS_NAME)
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
prop_name_value = literal_start_p[literal_index];
}
else
{
prop_name_value = stack_top_p[-2];
}
ecma_string_t *prop_name_p = ecma_op_to_property_key (prop_name_value);
if (JERRY_UNLIKELY (prop_name_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
left_value = ecma_make_prop_name_value (prop_name_p);
if (opcode != CBC_EXT_SET_CLASS_NAME)
{
ecma_ref_ecma_string (prop_name_p);
ecma_free_value (stack_top_p[-2]);
stack_top_p[-2] = left_value;
}
if (opcode == CBC_EXT_SET_COMPUTED_GETTER_NAME || opcode == CBC_EXT_SET_COMPUTED_SETTER_NAME)
{
prefix_p = (opcode == CBC_EXT_SET_COMPUTED_GETTER_NAME) ? "get " : "set ";
prefix_size = 4;
}
}
ecma_object_t *func_obj_p = ecma_get_object_from_value (stack_top_p[-1]);
if (ecma_find_named_property (func_obj_p, ecma_get_magic_string (LIT_MAGIC_STRING_NAME)) != NULL)
{
ecma_free_value (left_value);
continue;
}
ecma_property_value_t *value_p;
value_p = ecma_create_named_data_property (func_obj_p,
ecma_get_magic_string (LIT_MAGIC_STRING_NAME),
ECMA_PROPERTY_FLAG_CONFIGURABLE,
NULL);
if (ecma_get_object_type (func_obj_p) == ECMA_OBJECT_TYPE_FUNCTION)
{
ECMA_SET_SECOND_BIT_TO_POINTER_TAG (((ecma_extended_object_t *) func_obj_p)->u.function.scope_cp);
}
value_p->value = ecma_op_function_form_name (ecma_get_prop_name_from_value (left_value),
prefix_p,
prefix_size);
ecma_free_value (left_value);
continue;
}
case VM_OC_PUSH_SPREAD_ELEMENT:
{
*stack_top_p++ = ECMA_VALUE_SPREAD_ELEMENT;
continue;
}
case VM_OC_PUSH_REST_OBJECT:
{
vm_frame_ctx_shared_t *shared_p = frame_ctx_p->shared_p;
JERRY_ASSERT (shared_p->status_flags & VM_FRAME_CTX_SHARED_HAS_ARG_LIST);
const ecma_value_t *arg_list_p = ((vm_frame_ctx_shared_args_t *) shared_p)->arg_list_p;
uint32_t arg_list_len = ((vm_frame_ctx_shared_args_t *) shared_p)->arg_list_len;
uint16_t argument_end;
if (bytecode_header_p->status_flags & CBC_CODE_FLAGS_UINT16_ARGUMENTS)
{
argument_end = ((cbc_uint16_arguments_t *) bytecode_header_p)->argument_end;
}
else
{
argument_end = ((cbc_uint8_arguments_t *) bytecode_header_p)->argument_end;
}
if (arg_list_len < argument_end)
{
arg_list_len = argument_end;
}
result = ecma_op_new_array_object_from_buffer (arg_list_p + argument_end,
arg_list_len - argument_end);
JERRY_ASSERT (!ECMA_IS_VALUE_ERROR (result));
*stack_top_p++ = result;
continue;
}
case VM_OC_ITERATOR_CONTEXT_CREATE:
{
result = ecma_op_get_iterator (stack_top_p[-1], ECMA_VALUE_SYNC_ITERATOR, &left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
uint32_t context_size = (uint32_t) (stack_top_p
+ PARSER_ITERATOR_CONTEXT_STACK_ALLOCATION
- VM_LAST_CONTEXT_END ());
stack_top_p += PARSER_ITERATOR_CONTEXT_STACK_ALLOCATION;
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, context_size);
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_ITERATOR, context_size) | VM_CONTEXT_CLOSE_ITERATOR;
stack_top_p[-2] = result;
stack_top_p[-3] = left_value;
continue;
}
case VM_OC_ITERATOR_STEP:
{
ecma_value_t *last_context_end_p = VM_LAST_CONTEXT_END ();
ecma_value_t iterator = last_context_end_p[-2];
ecma_value_t next_method = last_context_end_p[-3];
result = ecma_op_iterator_step (iterator, next_method);
if (ECMA_IS_VALUE_ERROR (result))
{
last_context_end_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
goto error;
}
ecma_value_t value = ECMA_VALUE_UNDEFINED;
if (!ecma_is_value_false (result))
{
value = ecma_op_iterator_value (result);
ecma_free_value (result);
if (ECMA_IS_VALUE_ERROR (value))
{
last_context_end_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
result = value;
goto error;
}
}
else
{
last_context_end_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
}
*stack_top_p++ = value;
continue;
}
case VM_OC_ITERATOR_CONTEXT_END:
{
JERRY_ASSERT (VM_LAST_CONTEXT_END () == stack_top_p);
if (stack_top_p[-1] & VM_CONTEXT_CLOSE_ITERATOR)
{
stack_top_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
result = ecma_op_iterator_close (stack_top_p[-2]);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
}
stack_top_p = vm_stack_context_abort_variable_length (frame_ctx_p,
stack_top_p,
PARSER_ITERATOR_CONTEXT_STACK_ALLOCATION);
continue;
}
case VM_OC_DEFAULT_INITIALIZER:
{
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
if (stack_top_p[-1] != ECMA_VALUE_UNDEFINED)
{
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
stack_top_p--;
continue;
}
case VM_OC_REST_INITIALIZER:
{
ecma_object_t *array_p = ecma_op_new_array_object (0);
JERRY_ASSERT (ecma_op_object_is_fast_array (array_p));
ecma_value_t *last_context_end_p = VM_LAST_CONTEXT_END ();
ecma_value_t iterator = last_context_end_p[-2];
ecma_value_t next_method = last_context_end_p[-3];
uint32_t index = 0;
while (true)
{
result = ecma_op_iterator_step (iterator, next_method);
if (ECMA_IS_VALUE_ERROR (result))
{
last_context_end_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
ecma_deref_object (array_p);
goto error;
}
if (ecma_is_value_false (result))
{
last_context_end_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
break;
}
ecma_value_t value = ecma_op_iterator_value (result);
ecma_free_value (result);
if (ECMA_IS_VALUE_ERROR (value))
{
ecma_deref_object (array_p);
result = value;
goto error;
}
bool set_result = ecma_fast_array_set_property (array_p, index++, value);
JERRY_ASSERT (set_result);
ecma_free_value (value);
}
*stack_top_p++ = ecma_make_object_value (array_p);
continue;
}
case VM_OC_OBJ_INIT_CONTEXT_CREATE:
{
left_value = stack_top_p[-1];
vm_stack_context_type_t context_type = VM_CONTEXT_OBJ_INIT;
uint32_t context_stack_allocation = PARSER_OBJ_INIT_CONTEXT_STACK_ALLOCATION;
if (opcode == CBC_EXT_OBJ_INIT_REST_CONTEXT_CREATE)
{
context_type = VM_CONTEXT_OBJ_INIT_REST;
context_stack_allocation = PARSER_OBJ_INIT_REST_CONTEXT_STACK_ALLOCATION;
}
uint32_t context_size = (uint32_t) (stack_top_p + context_stack_allocation - VM_LAST_CONTEXT_END ());
stack_top_p += context_stack_allocation;
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, context_size);
stack_top_p[-1] = VM_CREATE_CONTEXT (context_type, context_size);
stack_top_p[-2] = left_value;
if (context_type == VM_CONTEXT_OBJ_INIT_REST)
{
stack_top_p[-3] = ecma_make_object_value (ecma_op_new_array_object (0));
}
continue;
}
case VM_OC_OBJ_INIT_CONTEXT_END:
{
JERRY_ASSERT (stack_top_p == VM_LAST_CONTEXT_END ());
uint32_t context_stack_allocation = PARSER_OBJ_INIT_CONTEXT_STACK_ALLOCATION;
if (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_OBJ_INIT_REST)
{
context_stack_allocation = PARSER_OBJ_INIT_REST_CONTEXT_STACK_ALLOCATION;
}
stack_top_p = vm_stack_context_abort_variable_length (frame_ctx_p,
stack_top_p,
context_stack_allocation);
continue;
}
case VM_OC_OBJ_INIT_PUSH_REST:
{
ecma_value_t *last_context_end_p = VM_LAST_CONTEXT_END ();
if (!ecma_op_require_object_coercible (last_context_end_p[-2]))
{
result = ECMA_VALUE_ERROR;
goto error;
}
ecma_object_t *prototype_p = ecma_builtin_get (ECMA_BUILTIN_ID_OBJECT_PROTOTYPE);
ecma_object_t *result_object_p = ecma_create_object (prototype_p, 0, ECMA_OBJECT_TYPE_GENERAL);
left_value = ecma_make_object_value (result_object_p);
result = opfunc_copy_data_properties (left_value, last_context_end_p[-2], last_context_end_p[-3]);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_free_value (last_context_end_p[-3]);
last_context_end_p[-3] = last_context_end_p[-2];
last_context_end_p[-2] = ECMA_VALUE_UNDEFINED;
*stack_top_p++ = left_value;
continue;
}
case VM_OC_INITIALIZER_PUSH_NAME:
{
if (JERRY_UNLIKELY (!ecma_is_value_prop_name (left_value)))
{
ecma_string_t *property_key = ecma_op_to_property_key (left_value);
if (property_key == NULL)
{
result = ECMA_VALUE_ERROR;
goto error;
}
ecma_free_value (left_value);
left_value = ecma_make_string_value (property_key);
}
ecma_value_t *last_context_end_p = VM_LAST_CONTEXT_END ();
ecma_object_t *array_obj_p = ecma_get_object_from_value (last_context_end_p[-3]);
JERRY_ASSERT (ecma_get_object_type (array_obj_p) == ECMA_OBJECT_TYPE_ARRAY);
ecma_extended_object_t *ext_array_obj_p = (ecma_extended_object_t *) array_obj_p;
ecma_fast_array_set_property (array_obj_p, ext_array_obj_p->u.array.length, left_value);
/* FALLTHRU */
}
case VM_OC_INITIALIZER_PUSH_PROP:
{
ecma_value_t *last_context_end_p = VM_LAST_CONTEXT_END ();
ecma_value_t base = last_context_end_p[-2];
if (opcode == CBC_EXT_INITIALIZER_PUSH_PROP)
{
left_value = *last_context_end_p++;
while (last_context_end_p < stack_top_p)
{
last_context_end_p[-1] = *last_context_end_p;
last_context_end_p++;
}
stack_top_p--;
}
result = vm_op_get_value (base, left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_left_value;
}
case VM_OC_SPREAD_ARGUMENTS:
{
uint8_t arguments_list_len = *byte_code_p++;
stack_top_p -= arguments_list_len;
ecma_collection_t *arguments_p = opfunc_spread_arguments (stack_top_p, arguments_list_len);
if (JERRY_UNLIKELY (arguments_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
stack_top_p++;
ECMA_SET_INTERNAL_VALUE_POINTER (stack_top_p[-1], arguments_p);
frame_ctx_p->call_operation = VM_EXEC_SPREAD_OP;
frame_ctx_p->byte_code_p = byte_code_start_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_CREATE_GENERATOR:
{
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = stack_top_p;
vm_executable_object_t *executable_object_p;
executable_object_p = opfunc_create_executable_object (frame_ctx_p, VM_CREATE_EXECUTABLE_OBJECT_GENERATOR);
return ecma_make_object_value ((ecma_object_t *) executable_object_p);
}
case VM_OC_YIELD:
{
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = --stack_top_p;
return *stack_top_p;
}
case VM_OC_ASYNC_YIELD:
{
ecma_extended_object_t *async_generator_object_p = VM_GET_EXECUTABLE_OBJECT (frame_ctx_p);
opfunc_async_generator_yield (async_generator_object_p, stack_top_p[-1]);
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = --stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_ASYNC_YIELD_ITERATOR:
{
ecma_extended_object_t *async_generator_object_p = VM_GET_EXECUTABLE_OBJECT (frame_ctx_p);
JERRY_ASSERT (!(async_generator_object_p->u.cls.u2.executable_obj_flags
& ECMA_EXECUTABLE_OBJECT_DO_AWAIT_OR_YIELD));
/* Byte code is executed at the first time. */
left_value = stack_top_p[-1];
result = ecma_op_get_iterator (left_value, ECMA_VALUE_ASYNC_ITERATOR, stack_top_p - 1);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_free_value (left_value);
left_value = result;
result = ecma_op_iterator_next (left_value, stack_top_p[-1], ECMA_VALUE_UNDEFINED);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
result = ecma_promise_async_await (async_generator_object_p, result);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
async_generator_object_p->u.cls.u2.executable_obj_flags |= ECMA_EXECUTABLE_OBJECT_DO_AWAIT_OR_YIELD;
*VM_GET_EXECUTABLE_ITERATOR (frame_ctx_p) = left_value;
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_AWAIT:
{
if (JERRY_UNLIKELY (!(frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_EXECUTABLE)))
{
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = --stack_top_p;
result = opfunc_async_create_and_await (frame_ctx_p, *stack_top_p, 0);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
return result;
}
/* FALLTHRU */
}
case VM_OC_GENERATOR_AWAIT:
{
ecma_extended_object_t *async_generator_object_p = VM_GET_EXECUTABLE_OBJECT (frame_ctx_p);
result = ecma_promise_async_await (async_generator_object_p, *(--stack_top_p));
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_EXT_RETURN:
{
result = left_value;
left_value = ECMA_VALUE_UNDEFINED;
ecma_value_t *stack_bottom_p = VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth;
while (stack_top_p > stack_bottom_p)
{
ecma_fast_free_value (*(--stack_top_p));
}
goto error;
}
case VM_OC_ASYNC_EXIT:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
if (!(frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_EXECUTABLE))
{
result = ecma_op_create_promise_object (ECMA_VALUE_EMPTY, ECMA_VALUE_UNDEFINED, NULL);
}
else
{
result = *VM_GET_EXECUTABLE_ITERATOR (frame_ctx_p);
*VM_GET_EXECUTABLE_ITERATOR (frame_ctx_p) = ECMA_VALUE_UNDEFINED;
}
vm_stack_context_type_t context_type = VM_GET_CONTEXT_TYPE (stack_top_p[-1]);
if (context_type == VM_CONTEXT_TRY)
{
JERRY_ASSERT (frame_ctx_p->context_depth == PARSER_TRY_CONTEXT_STACK_ALLOCATION);
left_value = ECMA_VALUE_UNDEFINED;
}
else
{
JERRY_ASSERT (frame_ctx_p->context_depth == PARSER_FINALLY_CONTEXT_STACK_ALLOCATION);
left_value = stack_top_p[-2];
}
if (context_type == VM_CONTEXT_FINALLY_THROW)
{
ecma_reject_promise (result, left_value);
}
else
{
JERRY_ASSERT (context_type == VM_CONTEXT_TRY || context_type == VM_CONTEXT_FINALLY_RETURN);
ecma_fulfill_promise (result, left_value);
}
ecma_free_value (left_value);
frame_ctx_p->context_depth = 0;
frame_ctx_p->call_operation = VM_NO_EXEC_OP;
return result;
}
case VM_OC_STRING_CONCAT:
{
ecma_string_t *left_str_p = ecma_op_to_string (left_value);
if (JERRY_UNLIKELY (left_str_p == NULL))
{
result = ECMA_VALUE_ERROR;
goto error;
}
ecma_string_t *right_str_p = ecma_op_to_string (right_value);
if (JERRY_UNLIKELY (right_str_p == NULL))
{
ecma_deref_ecma_string (left_str_p);
result = ECMA_VALUE_ERROR;
goto error;
}
ecma_string_t *result_str_p = ecma_concat_ecma_strings (left_str_p, right_str_p);
ecma_deref_ecma_string (right_str_p);
*stack_top_p++ = ecma_make_string_value (result_str_p);
goto free_both_values;
}
case VM_OC_GET_TEMPLATE_OBJECT:
{
uint8_t tagged_idx = *byte_code_p++;
ecma_collection_t *collection_p = ecma_compiled_code_get_tagged_template_collection (bytecode_header_p);
JERRY_ASSERT (tagged_idx < collection_p->item_count);
*stack_top_p++ = ecma_copy_value (collection_p->buffer_p[tagged_idx]);
continue;
}
case VM_OC_PUSH_NEW_TARGET:
{
ecma_object_t *new_target_object_p = JERRY_CONTEXT (current_new_target_p);
if (new_target_object_p == NULL)
{
*stack_top_p++ = ECMA_VALUE_UNDEFINED;
}
else
{
ecma_ref_object (new_target_object_p);
*stack_top_p++ = ecma_make_object_value (new_target_object_p);
}
continue;
}
case VM_OC_REQUIRE_OBJECT_COERCIBLE:
{
if (!ecma_op_require_object_coercible (stack_top_p[-1]))
{
result = ECMA_VALUE_ERROR;
goto error;
}
continue;
}
case VM_OC_ASSIGN_SUPER:
{
result = opfunc_assign_super_reference (&stack_top_p, frame_ctx_p, opcode_data);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
continue;
}
#endif /* JERRY_ESNEXT */
case VM_OC_PUSH_ELISON:
{
*stack_top_p++ = ECMA_VALUE_ARRAY_HOLE;
continue;
}
case VM_OC_APPEND_ARRAY:
{
uint16_t values_length = *byte_code_p++;
stack_top_p -= values_length;
#if JERRY_ESNEXT
if (*byte_code_start_p == CBC_EXT_OPCODE)
{
values_length = (uint16_t) (values_length | OPFUNC_HAS_SPREAD_ELEMENT);
}
#endif /* JERRY_ESNEXT */
result = opfunc_append_array (stack_top_p, values_length);
#if JERRY_ESNEXT
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
#else /* !JERRY_ESNEXT */
JERRY_ASSERT (ecma_is_value_empty (result));
#endif /* JERRY_ESNEXT */
continue;
}
case VM_OC_IDENT_REFERENCE:
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index < ident_end);
if (literal_index < register_end)
{
*stack_top_p++ = ECMA_VALUE_REGISTER_REF;
*stack_top_p++ = ecma_make_integer_value (literal_index);
*stack_top_p++ = ecma_fast_copy_value (VM_GET_REGISTER (frame_ctx_p, literal_index));
}
else
{
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *ref_base_lex_env_p;
result = ecma_op_get_value_lex_env_base (frame_ctx_p->lex_env_p,
&ref_base_lex_env_p,
name_p);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_ref_object (ref_base_lex_env_p);
ecma_ref_ecma_string (name_p);
*stack_top_p++ = ecma_make_object_value (ref_base_lex_env_p);
*stack_top_p++ = ecma_make_string_value (name_p);
*stack_top_p++ = result;
}
continue;
}
case VM_OC_PROP_GET:
{
result = vm_op_get_value (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_PROP_REFERENCE:
{
/* Forms with reference requires preserving the base and offset. */
if (opcode == CBC_PUSH_PROP_REFERENCE)
{
left_value = stack_top_p[-2];
right_value = stack_top_p[-1];
}
else if (opcode == CBC_PUSH_PROP_LITERAL_REFERENCE)
{
*stack_top_p++ = left_value;
right_value = left_value;
left_value = stack_top_p[-2];
}
else
{
JERRY_ASSERT (opcode == CBC_PUSH_PROP_LITERAL_LITERAL_REFERENCE
|| opcode == CBC_PUSH_PROP_THIS_LITERAL_REFERENCE);
*stack_top_p++ = left_value;
*stack_top_p++ = right_value;
}
/* FALLTHRU */
}
case VM_OC_PROP_PRE_INCR:
case VM_OC_PROP_PRE_DECR:
case VM_OC_PROP_POST_INCR:
case VM_OC_PROP_POST_DECR:
{
result = vm_op_get_value (left_value,
right_value);
if (opcode < CBC_PRE_INCR)
{
left_value = ECMA_VALUE_UNDEFINED;
right_value = ECMA_VALUE_UNDEFINED;
}
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
if (opcode < CBC_PRE_INCR)
{
break;
}
stack_top_p += 2;
left_value = result;
right_value = ECMA_VALUE_UNDEFINED;
/* FALLTHRU */
}
case VM_OC_PRE_INCR:
case VM_OC_PRE_DECR:
case VM_OC_POST_INCR:
case VM_OC_POST_DECR:
{
uint32_t opcode_flags = VM_OC_GROUP_GET_INDEX (opcode_data) - VM_OC_PROP_PRE_INCR;
ecma_number_t result_number;
byte_code_p = byte_code_start_p + 1;
if (ecma_is_value_integer_number (left_value))
{
result = left_value;
left_value = ECMA_VALUE_UNDEFINED;
ecma_integer_value_t int_value = (ecma_integer_value_t) result;
ecma_integer_value_t int_increase = 0;
if (opcode_flags & VM_OC_DECREMENT_OPERATOR_FLAG)
{
if (int_value > ECMA_INTEGER_NUMBER_MIN_SHIFTED)
{
int_increase = -(1 << ECMA_DIRECT_SHIFT);
}
}
else if (int_value < ECMA_INTEGER_NUMBER_MAX_SHIFTED)
{
int_increase = 1 << ECMA_DIRECT_SHIFT;
}
if (JERRY_LIKELY (int_increase != 0))
{
/* Postfix operators require the unmodifed number value. */
if (opcode_flags & VM_OC_POST_INCR_DECR_OPERATOR_FLAG)
{
POST_INCREASE_DECREASE_PUT_RESULT (result);
}
result = (ecma_value_t) (int_value + int_increase);
break;
}
result_number = (ecma_number_t) ecma_get_integer_from_value (result);
}
else if (ecma_is_value_float_number (left_value))
{
result = left_value;
left_value = ECMA_VALUE_UNDEFINED;
result_number = ecma_get_number_from_value (result);
}
else
{
result = ecma_op_to_numeric (left_value, &result_number, ECMA_TO_NUMERIC_ALLOW_BIGINT);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_free_value (left_value);
left_value = ECMA_VALUE_UNDEFINED;
#if JERRY_BUILTIN_BIGINT
if (JERRY_UNLIKELY (ecma_is_value_bigint (result)))
{
ecma_bigint_unary_operation_type operation_type = ECMA_BIGINT_UNARY_INCREASE;
if (opcode_flags & VM_OC_DECREMENT_OPERATOR_FLAG)
{
operation_type = ECMA_BIGINT_UNARY_DECREASE;
}
/* Postfix operators require the unmodifed number value. */
if (opcode_flags & VM_OC_POST_INCR_DECR_OPERATOR_FLAG)
{
POST_INCREASE_DECREASE_PUT_RESULT (result);
result = ecma_bigint_unary (result, operation_type);
}
else
{
ecma_value_t original_value = result;
result = ecma_bigint_unary (original_value, operation_type);
ecma_free_value (original_value);
}
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
break;
}
#endif /* JERRY_BUILTIN_BIGINT */
result = ecma_make_number_value (result_number);
}
ecma_number_t increase = ECMA_NUMBER_ONE;
if (opcode_flags & VM_OC_DECREMENT_OPERATOR_FLAG)
{
/* For decrement operators */
increase = ECMA_NUMBER_MINUS_ONE;
}
/* Postfix operators require the unmodifed number value. */
if (opcode_flags & VM_OC_POST_INCR_DECR_OPERATOR_FLAG)
{
POST_INCREASE_DECREASE_PUT_RESULT (result);
result = ecma_make_number_value (result_number + increase);
break;
}
if (ecma_is_value_integer_number (result))
{
result = ecma_make_number_value (result_number + increase);
}
else
{
result = ecma_update_float_number (result, result_number + increase);
}
break;
}
case VM_OC_ASSIGN:
{
result = left_value;
left_value = ECMA_VALUE_UNDEFINED;
break;
}
case VM_OC_MOV_IDENT:
{
uint32_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index < register_end);
JERRY_ASSERT (!(opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK)));
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, literal_index));
VM_GET_REGISTER (frame_ctx_p, literal_index) = left_value;
continue;
}
case VM_OC_ASSIGN_PROP:
{
result = stack_top_p[-1];
stack_top_p[-1] = left_value;
left_value = ECMA_VALUE_UNDEFINED;
break;
}
case VM_OC_ASSIGN_PROP_THIS:
{
result = stack_top_p[-1];
stack_top_p[-1] = ecma_copy_value (frame_ctx_p->this_binding);
*stack_top_p++ = left_value;
left_value = ECMA_VALUE_UNDEFINED;
break;
}
case VM_OC_RETURN_FUNCTION_END:
{
if (CBC_FUNCTION_GET_TYPE (bytecode_header_p->status_flags) == CBC_FUNCTION_SCRIPT)
{
result = VM_GET_REGISTER (frame_ctx_p, 0);
VM_GET_REGISTERS (frame_ctx_p)[0] = ECMA_VALUE_UNDEFINED;
}
else
{
result = ECMA_VALUE_UNDEFINED;
}
goto error;
}
case VM_OC_RETURN:
{
JERRY_ASSERT (opcode == CBC_RETURN
|| opcode == CBC_RETURN_WITH_LITERAL);
result = left_value;
left_value = ECMA_VALUE_UNDEFINED;
goto error;
}
case VM_OC_THROW:
{
jcontext_raise_exception (left_value);
result = ECMA_VALUE_ERROR;
left_value = ECMA_VALUE_UNDEFINED;
goto error;
}
case VM_OC_THROW_REFERENCE_ERROR:
{
result = ecma_raise_reference_error (ECMA_ERR_MSG ("Undefined reference"));
goto error;
}
case VM_OC_EVAL:
{
JERRY_CONTEXT (status_flags) |= ECMA_STATUS_DIRECT_EVAL;
JERRY_ASSERT ((*byte_code_p >= CBC_CALL && *byte_code_p <= CBC_CALL2_PROP_BLOCK)
|| (*byte_code_p == CBC_EXT_OPCODE
&& byte_code_p[1] >= CBC_EXT_SPREAD_CALL
&& byte_code_p[1] <= CBC_EXT_SPREAD_CALL_PROP_BLOCK));
continue;
}
case VM_OC_CALL:
{
frame_ctx_p->call_operation = VM_EXEC_CALL;
frame_ctx_p->byte_code_p = byte_code_start_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_NEW:
{
frame_ctx_p->call_operation = VM_EXEC_CONSTRUCT;
frame_ctx_p->byte_code_p = byte_code_start_p;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
case VM_OC_ERROR:
{
JERRY_ASSERT (frame_ctx_p->byte_code_p[1] == CBC_EXT_ERROR);
#if JERRY_DEBUGGER
frame_ctx_p->byte_code_p = JERRY_CONTEXT (debugger_exception_byte_code_p);
#endif /* JERRY_DEBUGGER */
result = ECMA_VALUE_ERROR;
goto error;
}
case VM_OC_RESOLVE_BASE_FOR_CALL:
{
ecma_value_t this_value = stack_top_p[-3];
if (this_value == ECMA_VALUE_REGISTER_REF)
{
/* Lexical environment cannot be 'this' value. */
stack_top_p[-2] = ECMA_VALUE_UNDEFINED;
stack_top_p[-3] = ECMA_VALUE_UNDEFINED;
}
else if (vm_get_implicit_this_value (&this_value))
{
ecma_free_value (stack_top_p[-3]);
stack_top_p[-3] = this_value;
}
continue;
}
case VM_OC_PROP_DELETE:
{
result = vm_op_delete_prop (left_value, right_value, is_strict);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
JERRY_ASSERT (ecma_is_value_boolean (result));
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_DELETE:
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
if (literal_index < register_end)
{
*stack_top_p++ = ECMA_VALUE_FALSE;
continue;
}
result = vm_op_delete_var (literal_start_p[literal_index],
frame_ctx_p->lex_env_p);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
JERRY_ASSERT (ecma_is_value_boolean (result));
*stack_top_p++ = result;
continue;
}
case VM_OC_JUMP:
{
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
case VM_OC_BRANCH_IF_STRICT_EQUAL:
{
ecma_value_t value = *(--stack_top_p);
JERRY_ASSERT (stack_top_p > VM_GET_REGISTERS (frame_ctx_p) + register_end);
if (ecma_op_strict_equality_compare (value, stack_top_p[-1]))
{
byte_code_p = byte_code_start_p + branch_offset;
ecma_free_value (*--stack_top_p);
}
ecma_free_value (value);
continue;
}
case VM_OC_BRANCH_IF_TRUE:
case VM_OC_BRANCH_IF_FALSE:
case VM_OC_BRANCH_IF_LOGICAL_TRUE:
case VM_OC_BRANCH_IF_LOGICAL_FALSE:
{
uint32_t opcode_flags = VM_OC_GROUP_GET_INDEX (opcode_data) - VM_OC_BRANCH_IF_TRUE;
ecma_value_t value = *(--stack_top_p);
bool boolean_value = ecma_op_to_boolean (value);
if (opcode_flags & VM_OC_BRANCH_IF_FALSE_FLAG)
{
boolean_value = !boolean_value;
}
if (boolean_value)
{
byte_code_p = byte_code_start_p + branch_offset;
if (opcode_flags & VM_OC_LOGICAL_BRANCH_FLAG)
{
/* "Push" the value back to the stack. */
++stack_top_p;
continue;
}
}
ecma_fast_free_value (value);
continue;
}
#if JERRY_ESNEXT
case VM_OC_BRANCH_IF_NULLISH:
{
left_value = stack_top_p[-1];
if (!ecma_is_value_null (left_value) && !ecma_is_value_undefined (left_value))
{
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
--stack_top_p;
continue;
}
#endif /* JERRY_ESNEXT */
case VM_OC_PLUS:
case VM_OC_MINUS:
{
result = opfunc_unary_operation (left_value, VM_OC_GROUP_GET_INDEX (opcode_data) == VM_OC_PLUS);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_left_value;
}
case VM_OC_NOT:
{
*stack_top_p++ = ecma_make_boolean_value (!ecma_op_to_boolean (left_value));
JERRY_ASSERT (ecma_is_value_boolean (stack_top_p[-1]));
goto free_left_value;
}
case VM_OC_BIT_NOT:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_is_value_integer_number (left_value))
{
*stack_top_p++ = (~ECMA_DIRECT_TYPE_MASK) ^ left_value;
goto free_left_value;
}
result = do_number_bitwise_not (left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_left_value;
}
case VM_OC_VOID:
{
*stack_top_p++ = ECMA_VALUE_UNDEFINED;
goto free_left_value;
}
case VM_OC_TYPEOF_IDENT:
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
JERRY_ASSERT (literal_index < ident_end);
if (literal_index < register_end)
{
left_value = ecma_copy_value (VM_GET_REGISTER (frame_ctx_p, literal_index));
}
else
{
ecma_string_t *name_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_object_t *ref_base_lex_env_p;
result = ecma_op_get_value_lex_env_base (frame_ctx_p->lex_env_p,
&ref_base_lex_env_p,
name_p);
if (ref_base_lex_env_p == NULL)
{
jcontext_release_exception ();
result = ECMA_VALUE_UNDEFINED;
}
else if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
left_value = result;
}
/* FALLTHRU */
}
case VM_OC_TYPEOF:
{
result = opfunc_typeof (left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_left_value;
}
case VM_OC_ADD:
{
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
*stack_top_p++ = ecma_make_int32_value ((int32_t) (left_integer + right_integer));
continue;
}
if (ecma_is_value_float_number (left_value)
&& ecma_is_value_number (right_value))
{
ecma_number_t new_value = (ecma_get_float_from_value (left_value) +
ecma_get_number_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (left_value, new_value);
ecma_free_number (right_value);
continue;
}
if (ecma_is_value_float_number (right_value)
&& ecma_is_value_integer_number (left_value))
{
ecma_number_t new_value = ((ecma_number_t) ecma_get_integer_from_value (left_value) +
ecma_get_float_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (right_value, new_value);
continue;
}
result = opfunc_addition (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_SUB:
{
JERRY_STATIC_ASSERT (ECMA_INTEGER_NUMBER_MAX * 2 <= INT32_MAX
&& ECMA_INTEGER_NUMBER_MIN * 2 >= INT32_MIN,
doubled_ecma_numbers_must_fit_into_int32_range);
JERRY_ASSERT (!ECMA_IS_VALUE_ERROR (left_value)
&& !ECMA_IS_VALUE_ERROR (right_value));
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
*stack_top_p++ = ecma_make_int32_value ((int32_t) (left_integer - right_integer));
continue;
}
if (ecma_is_value_float_number (left_value)
&& ecma_is_value_number (right_value))
{
ecma_number_t new_value = (ecma_get_float_from_value (left_value) -
ecma_get_number_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (left_value, new_value);
ecma_free_number (right_value);
continue;
}
if (ecma_is_value_float_number (right_value)
&& ecma_is_value_integer_number (left_value))
{
ecma_number_t new_value = ((ecma_number_t) ecma_get_integer_from_value (left_value) -
ecma_get_float_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (right_value, new_value);
continue;
}
result = do_number_arithmetic (NUMBER_ARITHMETIC_SUBTRACTION,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_MUL:
{
JERRY_ASSERT (!ECMA_IS_VALUE_ERROR (left_value)
&& !ECMA_IS_VALUE_ERROR (right_value));
JERRY_STATIC_ASSERT (ECMA_INTEGER_MULTIPLY_MAX * ECMA_INTEGER_MULTIPLY_MAX <= ECMA_INTEGER_NUMBER_MAX
&& -(ECMA_INTEGER_MULTIPLY_MAX * ECMA_INTEGER_MULTIPLY_MAX) >= ECMA_INTEGER_NUMBER_MIN,
square_of_integer_multiply_max_must_fit_into_integer_value_range);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
if (-ECMA_INTEGER_MULTIPLY_MAX <= left_integer
&& left_integer <= ECMA_INTEGER_MULTIPLY_MAX
&& -ECMA_INTEGER_MULTIPLY_MAX <= right_integer
&& right_integer <= ECMA_INTEGER_MULTIPLY_MAX
&& left_integer != 0
&& right_integer != 0)
{
*stack_top_p++ = ecma_integer_multiply (left_integer, right_integer);
continue;
}
ecma_number_t multiply = (ecma_number_t) left_integer * (ecma_number_t) right_integer;
*stack_top_p++ = ecma_make_number_value (multiply);
continue;
}
if (ecma_is_value_float_number (left_value)
&& ecma_is_value_number (right_value))
{
ecma_number_t new_value = (ecma_get_float_from_value (left_value) *
ecma_get_number_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (left_value, new_value);
ecma_free_number (right_value);
continue;
}
if (ecma_is_value_float_number (right_value)
&& ecma_is_value_integer_number (left_value))
{
ecma_number_t new_value = ((ecma_number_t) ecma_get_integer_from_value (left_value) *
ecma_get_float_from_value (right_value));
*stack_top_p++ = ecma_update_float_number (right_value, new_value);
continue;
}
result = do_number_arithmetic (NUMBER_ARITHMETIC_MULTIPLICATION,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_DIV:
{
JERRY_ASSERT (!ECMA_IS_VALUE_ERROR (left_value)
&& !ECMA_IS_VALUE_ERROR (right_value));
result = do_number_arithmetic (NUMBER_ARITHMETIC_DIVISION,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_MOD:
{
JERRY_ASSERT (!ECMA_IS_VALUE_ERROR (left_value)
&& !ECMA_IS_VALUE_ERROR (right_value));
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
if (right_integer != 0)
{
ecma_integer_value_t mod_result = left_integer % right_integer;
if (mod_result != 0 || left_integer >= 0)
{
*stack_top_p++ = ecma_make_integer_value (mod_result);
continue;
}
}
}
result = do_number_arithmetic (NUMBER_ARITHMETIC_REMAINDER,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
#if JERRY_ESNEXT
case VM_OC_EXP:
{
result = do_number_arithmetic (NUMBER_ARITHMETIC_EXPONENTIATION,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
#endif /* JERRY_ESNEXT */
case VM_OC_EQUAL:
{
result = opfunc_equality (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_NOT_EQUAL:
{
result = opfunc_equality (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = ecma_invert_boolean_value (result);
goto free_both_values;
}
case VM_OC_STRICT_EQUAL:
{
bool is_equal = ecma_op_strict_equality_compare (left_value, right_value);
result = ecma_make_boolean_value (is_equal);
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_STRICT_NOT_EQUAL:
{
bool is_equal = ecma_op_strict_equality_compare (left_value, right_value);
result = ecma_make_boolean_value (!is_equal);
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_BIT_OR:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
*stack_top_p++ = left_value | right_value;
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_LOGIC_OR,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_BIT_XOR:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
*stack_top_p++ = left_value ^ right_value;
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_LOGIC_XOR,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_BIT_AND:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
*stack_top_p++ = left_value & right_value;
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_LOGIC_AND,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_LEFT_SHIFT:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
*stack_top_p++ = ecma_make_int32_value ((int32_t) (left_integer << (right_integer & 0x1f)));
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_SHIFT_LEFT,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_RIGHT_SHIFT:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
*stack_top_p++ = ecma_make_integer_value (left_integer >> (right_integer & 0x1f));
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_SHIFT_RIGHT,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_UNS_RIGHT_SHIFT:
{
JERRY_STATIC_ASSERT (ECMA_DIRECT_TYPE_MASK == ((1 << ECMA_DIRECT_SHIFT) - 1),
direct_type_mask_must_fill_all_bits_before_the_value_starts);
if (ecma_are_values_integer_numbers (left_value, right_value))
{
uint32_t left_uint32 = (uint32_t) ecma_get_integer_from_value (left_value);
ecma_integer_value_t right_integer = ecma_get_integer_from_value (right_value);
*stack_top_p++ = ecma_make_uint32_value (left_uint32 >> (right_integer & 0x1f));
continue;
}
result = do_number_bitwise_logic (NUMBER_BITWISE_SHIFT_URIGHT,
left_value,
right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_LESS:
{
if (ecma_are_values_integer_numbers (left_value, right_value))
{
bool is_less = (ecma_integer_value_t) left_value < (ecma_integer_value_t) right_value;
#if !JERRY_VM_EXEC_STOP
/* This is a lookahead to the next opcode to improve performance.
* If it is CBC_BRANCH_IF_TRUE_BACKWARD, execute it. */
if (*byte_code_p <= CBC_BRANCH_IF_TRUE_BACKWARD_3 && *byte_code_p >= CBC_BRANCH_IF_TRUE_BACKWARD)
{
byte_code_start_p = byte_code_p++;
branch_offset_length = CBC_BRANCH_OFFSET_LENGTH (*byte_code_start_p);
JERRY_ASSERT (branch_offset_length >= 1 && branch_offset_length <= 3);
if (is_less)
{
branch_offset = *(byte_code_p++);
if (JERRY_UNLIKELY (branch_offset_length != 1))
{
branch_offset <<= 8;
branch_offset |= *(byte_code_p++);
if (JERRY_UNLIKELY (branch_offset_length == 3))
{
branch_offset <<= 8;
branch_offset |= *(byte_code_p++);
}
}
/* Note: The opcode is a backward branch. */
byte_code_p = byte_code_start_p - branch_offset;
}
else
{
byte_code_p += branch_offset_length;
}
continue;
}
#endif /* !JERRY_VM_EXEC_STOP */
*stack_top_p++ = ecma_make_boolean_value (is_less);
continue;
}
if (ecma_is_value_number (left_value) && ecma_is_value_number (right_value))
{
ecma_number_t left_number = ecma_get_number_from_value (left_value);
ecma_number_t right_number = ecma_get_number_from_value (right_value);
*stack_top_p++ = ecma_make_boolean_value (left_number < right_number);
goto free_both_values;
}
result = opfunc_relation (left_value, right_value, true, false);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_GREATER:
{
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = (ecma_integer_value_t) left_value;
ecma_integer_value_t right_integer = (ecma_integer_value_t) right_value;
*stack_top_p++ = ecma_make_boolean_value (left_integer > right_integer);
continue;
}
if (ecma_is_value_number (left_value) && ecma_is_value_number (right_value))
{
ecma_number_t left_number = ecma_get_number_from_value (left_value);
ecma_number_t right_number = ecma_get_number_from_value (right_value);
*stack_top_p++ = ecma_make_boolean_value (left_number > right_number);
goto free_both_values;
}
result = opfunc_relation (left_value, right_value, false, false);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_LESS_EQUAL:
{
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = (ecma_integer_value_t) left_value;
ecma_integer_value_t right_integer = (ecma_integer_value_t) right_value;
*stack_top_p++ = ecma_make_boolean_value (left_integer <= right_integer);
continue;
}
if (ecma_is_value_number (left_value) && ecma_is_value_number (right_value))
{
ecma_number_t left_number = ecma_get_number_from_value (left_value);
ecma_number_t right_number = ecma_get_number_from_value (right_value);
*stack_top_p++ = ecma_make_boolean_value (left_number <= right_number);
goto free_both_values;
}
result = opfunc_relation (left_value, right_value, false, true);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_GREATER_EQUAL:
{
if (ecma_are_values_integer_numbers (left_value, right_value))
{
ecma_integer_value_t left_integer = (ecma_integer_value_t) left_value;
ecma_integer_value_t right_integer = (ecma_integer_value_t) right_value;
*stack_top_p++ = ecma_make_boolean_value (left_integer >= right_integer);
continue;
}
if (ecma_is_value_number (left_value) && ecma_is_value_number (right_value))
{
ecma_number_t left_number = ecma_get_number_from_value (left_value);
ecma_number_t right_number = ecma_get_number_from_value (right_value);
*stack_top_p++ = ecma_make_boolean_value (left_number >= right_number);
goto free_both_values;
}
result = opfunc_relation (left_value, right_value, true, true);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_IN:
{
result = opfunc_in (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_INSTANCEOF:
{
result = opfunc_instanceof (left_value, right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
goto free_both_values;
}
case VM_OC_BLOCK_CREATE_CONTEXT:
{
#if JERRY_ESNEXT
ecma_value_t *stack_context_top_p;
stack_context_top_p = VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth;
JERRY_ASSERT (stack_context_top_p == stack_top_p || stack_context_top_p == stack_top_p - 1);
if (byte_code_start_p[0] != CBC_EXT_OPCODE)
{
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
if (stack_context_top_p != stack_top_p)
{
/* Preserve the value of switch statement. */
stack_context_top_p[1] = stack_context_top_p[0];
}
stack_context_top_p[0] = VM_CREATE_CONTEXT_WITH_ENV (VM_CONTEXT_BLOCK, branch_offset);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_BLOCK_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_BLOCK_CONTEXT_STACK_ALLOCATION;
}
else
{
JERRY_ASSERT (byte_code_start_p[1] == CBC_EXT_TRY_CREATE_ENV);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_context_top_p[-1]) == VM_CONTEXT_TRY
|| VM_GET_CONTEXT_TYPE (stack_context_top_p[-1]) == VM_CONTEXT_CATCH
|| VM_GET_CONTEXT_TYPE (stack_context_top_p[-1]) == VM_CONTEXT_FINALLY_JUMP
|| VM_GET_CONTEXT_TYPE (stack_context_top_p[-1]) == VM_CONTEXT_FINALLY_THROW
|| VM_GET_CONTEXT_TYPE (stack_context_top_p[-1]) == VM_CONTEXT_FINALLY_RETURN);
JERRY_ASSERT (!(stack_context_top_p[-1] & VM_CONTEXT_HAS_LEX_ENV));
stack_context_top_p[-1] |= VM_CONTEXT_HAS_LEX_ENV;
}
#else /* !JERRY_ESNEXT */
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-2]) == VM_CONTEXT_CATCH
&& !(stack_top_p[-2] & VM_CONTEXT_HAS_LEX_ENV));
stack_top_p[-2] |= VM_CONTEXT_HAS_LEX_ENV;
#endif /* JERRY_ESNEXT */
frame_ctx_p->lex_env_p = ecma_create_decl_lex_env (frame_ctx_p->lex_env_p);
frame_ctx_p->lex_env_p->type_flags_refs |= ECMA_OBJECT_FLAG_BLOCK;
continue;
}
case VM_OC_WITH:
{
ecma_value_t value = *(--stack_top_p);
ecma_object_t *object_p;
ecma_object_t *with_env_p;
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
result = ecma_op_to_object (value);
ecma_free_value (value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
object_p = ecma_get_object_from_value (result);
with_env_p = ecma_create_object_lex_env (frame_ctx_p->lex_env_p, object_p);
ecma_deref_object (object_p);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_WITH_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_WITH_CONTEXT_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT_WITH_ENV (VM_CONTEXT_WITH, branch_offset);
with_env_p->type_flags_refs |= ECMA_OBJECT_FLAG_BLOCK;
frame_ctx_p->lex_env_p = with_env_p;
continue;
}
case VM_OC_FOR_IN_INIT:
{
ecma_value_t value = *(--stack_top_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
ecma_value_t expr_obj_value = ECMA_VALUE_UNDEFINED;
ecma_collection_t *prop_names_p = opfunc_for_in (value, &expr_obj_value);
ecma_free_value (value);
if (prop_names_p == NULL)
{
#if JERRY_ESNEXT
if (JERRY_UNLIKELY (ECMA_IS_VALUE_ERROR (expr_obj_value)))
{
result = expr_obj_value;
goto error;
}
#endif /* JERRY_ESNEXT */
/* The collection is already released */
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FOR_IN_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_FOR_IN_CONTEXT_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_FOR_IN, branch_offset);
ECMA_SET_INTERNAL_VALUE_ANY_POINTER (stack_top_p[-2], prop_names_p);
stack_top_p[-3] = 0;
stack_top_p[-4] = expr_obj_value;
#if JERRY_ESNEXT
if (byte_code_p[0] == CBC_EXT_OPCODE && byte_code_p[1] == CBC_EXT_CLONE_CONTEXT)
{
/* No need to duplicate the first context. */
byte_code_p += 2;
}
#endif /* JERRY_ESNEXT */
continue;
}
case VM_OC_FOR_IN_GET_NEXT:
{
ecma_value_t *context_top_p = VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth;
ecma_collection_t *collection_p;
collection_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, context_top_p[-2]);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (context_top_p[-1]) == VM_CONTEXT_FOR_IN);
uint32_t index = context_top_p[-3];
ecma_value_t *buffer_p = collection_p->buffer_p;
*stack_top_p++ = buffer_p[index];
context_top_p[-3]++;
continue;
}
case VM_OC_FOR_IN_HAS_NEXT:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
ecma_collection_t *collection_p;
collection_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, stack_top_p[-2]);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FOR_IN);
ecma_value_t *buffer_p = collection_p->buffer_p;
ecma_object_t *object_p = ecma_get_object_from_value (stack_top_p[-4]);
uint32_t index = stack_top_p[-3];
while (index < collection_p->item_count)
{
ecma_string_t *prop_name_p = ecma_get_prop_name_from_value (buffer_p[index]);
result = ecma_op_object_has_property (object_p, prop_name_p);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
if (JERRY_LIKELY (ecma_is_value_true (result)))
{
byte_code_p = byte_code_start_p + branch_offset;
break;
}
ecma_deref_ecma_string (prop_name_p);
index++;
}
if (index == collection_p->item_count)
{
ecma_deref_object (object_p);
ecma_collection_destroy (collection_p);
VM_MINUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FOR_IN_CONTEXT_STACK_ALLOCATION);
stack_top_p -= PARSER_FOR_IN_CONTEXT_STACK_ALLOCATION;
}
else
{
stack_top_p[-3] = index;
}
continue;
}
#if JERRY_ESNEXT
case VM_OC_FOR_OF_INIT:
{
ecma_value_t value = *(--stack_top_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
ecma_value_t next_method;
ecma_value_t iterator = ecma_op_get_iterator (value, ECMA_VALUE_SYNC_ITERATOR, &next_method);
ecma_free_value (value);
if (ECMA_IS_VALUE_ERROR (iterator))
{
result = iterator;
goto error;
}
result = ecma_op_iterator_step (iterator, next_method);
if (ECMA_IS_VALUE_ERROR (result))
{
ecma_free_value (iterator);
ecma_free_value (next_method);
goto error;
}
if (ecma_is_value_false (result))
{
ecma_free_value (iterator);
ecma_free_value (next_method);
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
ecma_value_t next_value = ecma_op_iterator_value (result);
ecma_free_value (result);
if (ECMA_IS_VALUE_ERROR (next_value))
{
result = next_value;
ecma_free_value (iterator);
ecma_free_value (next_method);
goto error;
}
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FOR_OF_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_FOR_OF_CONTEXT_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_FOR_OF, branch_offset) | VM_CONTEXT_CLOSE_ITERATOR;
stack_top_p[-2] = next_value;
stack_top_p[-3] = iterator;
stack_top_p[-4] = next_method;
if (byte_code_p[0] == CBC_EXT_OPCODE && byte_code_p[1] == CBC_EXT_CLONE_CONTEXT)
{
/* No need to duplicate the first context. */
byte_code_p += 2;
}
continue;
}
case VM_OC_FOR_OF_GET_NEXT:
{
ecma_value_t *context_top_p = VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth;
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (context_top_p[-1]) == VM_CONTEXT_FOR_OF
|| VM_GET_CONTEXT_TYPE (context_top_p[-1]) == VM_CONTEXT_FOR_AWAIT_OF);
JERRY_ASSERT (context_top_p[-1] & VM_CONTEXT_CLOSE_ITERATOR);
*stack_top_p++ = context_top_p[-2];
context_top_p[-2] = ECMA_VALUE_UNDEFINED;
continue;
}
case VM_OC_FOR_OF_HAS_NEXT:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FOR_OF);
JERRY_ASSERT (stack_top_p[-1] & VM_CONTEXT_CLOSE_ITERATOR);
stack_top_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
result = ecma_op_iterator_step (stack_top_p[-3], stack_top_p[-4]);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
if (ecma_is_value_false (result))
{
ecma_free_value (stack_top_p[-2]);
ecma_free_value (stack_top_p[-3]);
ecma_free_value (stack_top_p[-4]);
VM_MINUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FOR_OF_CONTEXT_STACK_ALLOCATION);
stack_top_p -= PARSER_FOR_OF_CONTEXT_STACK_ALLOCATION;
continue;
}
ecma_value_t next_value = ecma_op_iterator_value (result);
ecma_free_value (result);
if (ECMA_IS_VALUE_ERROR (next_value))
{
result = next_value;
goto error;
}
JERRY_ASSERT (stack_top_p[-2] == ECMA_VALUE_UNDEFINED);
stack_top_p[-1] |= VM_CONTEXT_CLOSE_ITERATOR;
stack_top_p[-2] = next_value;
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
case VM_OC_FOR_AWAIT_OF_INIT:
{
ecma_value_t value = *(--stack_top_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
ecma_value_t next_method;
result = ecma_op_get_iterator (value, ECMA_VALUE_ASYNC_ITERATOR, &next_method);
ecma_free_value (value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_value_t iterator = result;
result = ecma_op_iterator_next (result, next_method, ECMA_VALUE_EMPTY);
if (ECMA_IS_VALUE_ERROR (result))
{
ecma_free_value (iterator);
ecma_free_value (next_method);
goto error;
}
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FOR_AWAIT_OF_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_FOR_AWAIT_OF_CONTEXT_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_FOR_AWAIT_OF, branch_offset);
stack_top_p[-2] = ECMA_VALUE_UNDEFINED;
stack_top_p[-3] = iterator;
stack_top_p[-4] = next_method;
if (byte_code_p[0] == CBC_EXT_OPCODE && byte_code_p[1] == CBC_EXT_CLONE_CONTEXT)
{
/* No need to duplicate the first context. */
byte_code_p += 2;
}
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_p;
frame_ctx_p->stack_top_p = stack_top_p;
uint16_t extra_flags = (ECMA_EXECUTABLE_OBJECT_DO_AWAIT_OR_YIELD
| (ECMA_AWAIT_FOR_NEXT << ECMA_AWAIT_STATE_SHIFT));
if (CBC_FUNCTION_GET_TYPE (bytecode_header_p->status_flags) == CBC_FUNCTION_ASYNC_GENERATOR
|| (frame_ctx_p->shared_p->status_flags & VM_FRAME_CTX_SHARED_EXECUTABLE))
{
ecma_extended_object_t *executable_object_p = VM_GET_EXECUTABLE_OBJECT (frame_ctx_p);
result = ecma_promise_async_await (executable_object_p, result);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
executable_object_p->u.cls.u2.executable_obj_flags |= extra_flags;
return ECMA_VALUE_UNDEFINED;
}
result = opfunc_async_create_and_await (frame_ctx_p, result, extra_flags);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
return result;
}
case VM_OC_FOR_AWAIT_OF_HAS_NEXT:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FOR_AWAIT_OF);
JERRY_ASSERT (stack_top_p[-1] & VM_CONTEXT_CLOSE_ITERATOR);
stack_top_p[-1] &= (uint32_t) ~VM_CONTEXT_CLOSE_ITERATOR;
result = ecma_op_iterator_next (stack_top_p[-3], stack_top_p[-4], ECMA_VALUE_EMPTY);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
ecma_extended_object_t *executable_object_p = VM_GET_EXECUTABLE_OBJECT (frame_ctx_p);
result = ecma_promise_async_await (executable_object_p, result);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
uint16_t extra_flags = (ECMA_EXECUTABLE_OBJECT_DO_AWAIT_OR_YIELD
| (ECMA_AWAIT_FOR_NEXT << ECMA_AWAIT_STATE_SHIFT));
executable_object_p->u.cls.u2.executable_obj_flags |= extra_flags;
frame_ctx_p->call_operation = VM_EXEC_RETURN;
frame_ctx_p->byte_code_p = byte_code_start_p + branch_offset;
frame_ctx_p->stack_top_p = stack_top_p;
return ECMA_VALUE_UNDEFINED;
}
#endif /* JERRY_ESNEXT */
case VM_OC_TRY:
{
/* Try opcode simply creates the try context. */
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_TRY_CONTEXT_STACK_ALLOCATION);
stack_top_p += PARSER_TRY_CONTEXT_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_TRY, branch_offset);
continue;
}
case VM_OC_CATCH:
{
/* Catches are ignored and turned to jumps. */
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_TRY);
byte_code_p = byte_code_start_p + branch_offset;
continue;
}
case VM_OC_FINALLY:
{
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_TRY
|| VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_CATCH);
if (stack_top_p[-1] & VM_CONTEXT_HAS_LEX_ENV)
{
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
JERRY_ASSERT (lex_env_p->u2.outer_reference_cp != JMEM_CP_NULL);
frame_ctx_p->lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
ecma_deref_object (lex_env_p);
}
VM_PLUS_EQUAL_U16 (frame_ctx_p->context_depth, PARSER_FINALLY_CONTEXT_EXTRA_STACK_ALLOCATION);
stack_top_p += PARSER_FINALLY_CONTEXT_EXTRA_STACK_ALLOCATION;
stack_top_p[-1] = VM_CREATE_CONTEXT (VM_CONTEXT_FINALLY_JUMP, branch_offset);
stack_top_p[-2] = (ecma_value_t) branch_offset;
continue;
}
case VM_OC_CONTEXT_END:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (!(stack_top_p[-1] & VM_CONTEXT_CLOSE_ITERATOR));
ecma_value_t context_type = VM_GET_CONTEXT_TYPE (stack_top_p[-1]);
if (!VM_CONTEXT_IS_FINALLY (context_type))
{
stack_top_p = vm_stack_context_abort (frame_ctx_p, stack_top_p);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
continue;
}
#if JERRY_ESNEXT
if (stack_top_p[-1] & VM_CONTEXT_HAS_LEX_ENV)
{
ecma_object_t *lex_env_p = frame_ctx_p->lex_env_p;
JERRY_ASSERT (lex_env_p->u2.outer_reference_cp != JMEM_CP_NULL);
frame_ctx_p->lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
ecma_deref_object (lex_env_p);
}
#endif /* JERRY_ESNEXT */
VM_MINUS_EQUAL_U16 (frame_ctx_p->context_depth,
PARSER_FINALLY_CONTEXT_STACK_ALLOCATION);
stack_top_p -= PARSER_FINALLY_CONTEXT_STACK_ALLOCATION;
if (context_type == VM_CONTEXT_FINALLY_RETURN)
{
result = *stack_top_p;
goto error;
}
if (context_type == VM_CONTEXT_FINALLY_THROW)
{
jcontext_raise_exception (*stack_top_p);
#if JERRY_VM_THROW
JERRY_CONTEXT (status_flags) |= ECMA_STATUS_ERROR_THROWN;
#endif /* JERRY_VM_THROW */
result = ECMA_VALUE_ERROR;
#if JERRY_DEBUGGER
JERRY_DEBUGGER_SET_FLAGS (JERRY_DEBUGGER_VM_EXCEPTION_THROWN);
#endif /* JERRY_DEBUGGER */
goto error;
}
JERRY_ASSERT (context_type == VM_CONTEXT_FINALLY_JUMP);
uint32_t jump_target = *stack_top_p;
vm_stack_found_type type = vm_stack_find_finally (frame_ctx_p,
stack_top_p,
VM_CONTEXT_FINALLY_JUMP,
jump_target);
stack_top_p = frame_ctx_p->stack_top_p;
switch (type)
{
case VM_CONTEXT_FOUND_FINALLY:
{
byte_code_p = frame_ctx_p->byte_code_p;
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_JUMP);
stack_top_p[-2] = jump_target;
break;
}
#if JERRY_ESNEXT
case VM_CONTEXT_FOUND_ERROR:
{
JERRY_ASSERT (jcontext_has_pending_exception ());
result = ECMA_VALUE_ERROR;
goto error;
}
case VM_CONTEXT_FOUND_AWAIT:
{
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_JUMP);
stack_top_p[-2] = jump_target;
return ECMA_VALUE_UNDEFINED;
}
#endif /* JERRY_ESNEXT */
default:
{
byte_code_p = frame_ctx_p->byte_code_start_p + jump_target;
break;
}
}
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
continue;
}
case VM_OC_JUMP_AND_EXIT_CONTEXT:
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (!jcontext_has_pending_exception ());
branch_offset += (int32_t) (byte_code_start_p - frame_ctx_p->byte_code_start_p);
vm_stack_found_type type = vm_stack_find_finally (frame_ctx_p,
stack_top_p,
VM_CONTEXT_FINALLY_JUMP,
(uint32_t) branch_offset);
stack_top_p = frame_ctx_p->stack_top_p;
switch (type)
{
case VM_CONTEXT_FOUND_FINALLY:
{
byte_code_p = frame_ctx_p->byte_code_p;
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_JUMP);
stack_top_p[-2] = (uint32_t) branch_offset;
break;
}
#if JERRY_ESNEXT
case VM_CONTEXT_FOUND_ERROR:
{
JERRY_ASSERT (jcontext_has_pending_exception ());
result = ECMA_VALUE_ERROR;
goto error;
}
case VM_CONTEXT_FOUND_AWAIT:
{
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_JUMP);
stack_top_p[-2] = (uint32_t) branch_offset;
return ECMA_VALUE_UNDEFINED;
}
#endif /* JERRY_ESNEXT */
default:
{
byte_code_p = frame_ctx_p->byte_code_start_p + branch_offset;
break;
}
}
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
continue;
}
#if JERRY_MODULE_SYSTEM
case VM_OC_MODULE_IMPORT:
{
left_value = *(--stack_top_p);
ecma_value_t user_value = ECMA_VALUE_UNDEFINED;
ecma_value_t script_value = ((cbc_uint8_arguments_t *) bytecode_header_p)->script_value;
#if JERRY_SNAPSHOT_EXEC
if (JERRY_UNLIKELY (!(bytecode_header_p->status_flags & CBC_CODE_FLAGS_STATIC_FUNCTION)))
{
#endif /* JERRY_SNAPSHOT_EXEC */
cbc_script_t *script_p = ECMA_GET_INTERNAL_VALUE_POINTER (cbc_script_t, script_value);
if (script_p->refs_and_type & CBC_SCRIPT_HAS_USER_VALUE)
{
user_value = CBC_SCRIPT_GET_USER_VALUE (script_p);
}
#if JERRY_SNAPSHOT_EXEC
}
#endif /* JERRY_SNAPSHOT_EXEC */
result = ecma_module_import (left_value, user_value);
ecma_free_value (left_value);
if (ECMA_IS_VALUE_ERROR (result))
{
goto error;
}
*stack_top_p++ = result;
continue;
}
case VM_OC_MODULE_IMPORT_META:
{
ecma_value_t script_value = ((cbc_uint8_arguments_t *) bytecode_header_p)->script_value;
cbc_script_t *script_p = ECMA_GET_INTERNAL_VALUE_POINTER (cbc_script_t, script_value);
JERRY_ASSERT (script_p->refs_and_type & CBC_SCRIPT_HAS_IMPORT_META);
ecma_value_t import_meta = CBC_SCRIPT_GET_IMPORT_META (script_p, script_p->refs_and_type);
ecma_object_t *import_meta_object_p = ecma_get_object_from_value (import_meta);
if (ecma_get_object_type (import_meta_object_p) != ECMA_OBJECT_TYPE_GENERAL)
{
JERRY_ASSERT (ecma_object_class_is (import_meta_object_p, ECMA_OBJECT_CLASS_MODULE));
ecma_value_t module = import_meta;
import_meta_object_p = ecma_create_object (NULL, 0, ECMA_OBJECT_TYPE_GENERAL);
import_meta = ecma_make_object_value (import_meta_object_p);
if (JERRY_CONTEXT (module_import_meta_callback_p) != NULL)
{
void *user_p = JERRY_CONTEXT (module_import_meta_callback_user_p);
JERRY_CONTEXT (module_import_meta_callback_p) (module, import_meta, user_p);
}
CBC_SCRIPT_GET_IMPORT_META (script_p, script_p->refs_and_type) = import_meta;
}
else
{
ecma_ref_object (import_meta_object_p);
}
*stack_top_p++ = import_meta;
continue;
}
#endif /* JERRY_MODULE_SYSTEM */
#if JERRY_DEBUGGER
case VM_OC_BREAKPOINT_ENABLED:
{
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_IGNORE)
{
continue;
}
JERRY_ASSERT (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_CONNECTED);
JERRY_ASSERT (!(frame_ctx_p->shared_p->bytecode_header_p->status_flags & CBC_CODE_FLAGS_DEBUGGER_IGNORE));
frame_ctx_p->byte_code_p = byte_code_start_p;
jerry_debugger_breakpoint_hit (JERRY_DEBUGGER_BREAKPOINT_HIT);
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_EXCEPTION_THROWN)
{
result = ECMA_VALUE_ERROR;
goto error;
}
continue;
}
case VM_OC_BREAKPOINT_DISABLED:
{
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_IGNORE)
{
continue;
}
JERRY_ASSERT (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_CONNECTED);
JERRY_ASSERT (!(frame_ctx_p->shared_p->bytecode_header_p->status_flags & CBC_CODE_FLAGS_DEBUGGER_IGNORE));
frame_ctx_p->byte_code_p = byte_code_start_p;
if ((JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_STOP)
&& (JERRY_CONTEXT (debugger_stop_context) == NULL
|| JERRY_CONTEXT (debugger_stop_context) == JERRY_CONTEXT (vm_top_context_p)))
{
jerry_debugger_breakpoint_hit (JERRY_DEBUGGER_BREAKPOINT_HIT);
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_EXCEPTION_THROWN)
{
result = ECMA_VALUE_ERROR;
goto error;
}
continue;
}
if (JERRY_CONTEXT (debugger_message_delay) > 0)
{
JERRY_CONTEXT (debugger_message_delay)--;
continue;
}
JERRY_CONTEXT (debugger_message_delay) = JERRY_DEBUGGER_MESSAGE_FREQUENCY;
if (jerry_debugger_receive (NULL))
{
continue;
}
if ((JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_STOP)
&& (JERRY_CONTEXT (debugger_stop_context) == NULL
|| JERRY_CONTEXT (debugger_stop_context) == JERRY_CONTEXT (vm_top_context_p)))
{
jerry_debugger_breakpoint_hit (JERRY_DEBUGGER_BREAKPOINT_HIT);
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_EXCEPTION_THROWN)
{
result = ECMA_VALUE_ERROR;
goto error;
}
}
continue;
}
#endif /* JERRY_DEBUGGER */
case VM_OC_NONE:
default:
{
JERRY_ASSERT (VM_OC_GROUP_GET_INDEX (opcode_data) == VM_OC_NONE);
jerry_fatal (ERR_DISABLED_BYTE_CODE);
}
}
JERRY_ASSERT (VM_OC_HAS_PUT_RESULT (opcode_data));
if (opcode_data & VM_OC_PUT_IDENT)
{
uint16_t literal_index;
READ_LITERAL_INDEX (literal_index);
if (literal_index < register_end)
{
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, literal_index));
VM_GET_REGISTER (frame_ctx_p, literal_index) = result;
if (opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK))
{
result = ecma_fast_copy_value (result);
}
}
else
{
ecma_string_t *var_name_str_p = ecma_get_string_from_value (literal_start_p[literal_index]);
ecma_value_t put_value_result = ecma_op_put_value_lex_env_base (frame_ctx_p->lex_env_p,
var_name_str_p,
is_strict,
result);
if (ECMA_IS_VALUE_ERROR (put_value_result))
{
ecma_free_value (result);
result = put_value_result;
goto error;
}
if (!(opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK)))
{
ecma_fast_free_value (result);
}
}
}
else if (opcode_data & VM_OC_PUT_REFERENCE)
{
ecma_value_t property = *(--stack_top_p);
ecma_value_t base = *(--stack_top_p);
if (base == ECMA_VALUE_REGISTER_REF)
{
property = (ecma_value_t) ecma_get_integer_from_value (property);
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, property));
VM_GET_REGISTER (frame_ctx_p, property) = result;
if (!(opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK)))
{
goto free_both_values;
}
result = ecma_fast_copy_value (result);
}
else
{
ecma_value_t set_value_result = vm_op_set_value (base,
property,
result,
is_strict);
if (ECMA_IS_VALUE_ERROR (set_value_result))
{
ecma_free_value (result);
result = set_value_result;
goto error;
}
if (!(opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK)))
{
ecma_fast_free_value (result);
goto free_both_values;
}
}
}
if (opcode_data & VM_OC_PUT_STACK)
{
*stack_top_p++ = result;
}
else if (opcode_data & VM_OC_PUT_BLOCK)
{
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, 0));
VM_GET_REGISTERS (frame_ctx_p)[0] = result;
}
free_both_values:
ecma_fast_free_value (right_value);
free_left_value:
ecma_fast_free_value (left_value);
}
error:
ecma_fast_free_value (left_value);
ecma_fast_free_value (right_value);
if (ECMA_IS_VALUE_ERROR (result))
{
JERRY_ASSERT (jcontext_has_pending_exception ());
ecma_value_t *stack_bottom_p = VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth;
while (stack_top_p > stack_bottom_p)
{
ecma_value_t stack_item = *(--stack_top_p);
#if JERRY_ESNEXT
if (stack_item == ECMA_VALUE_RELEASE_LEX_ENV)
{
opfunc_pop_lexical_environment (frame_ctx_p);
continue;
}
#endif /* JERRY_ESNEXT */
ecma_fast_free_value (stack_item);
}
#if JERRY_VM_THROW
if (!(JERRY_CONTEXT (status_flags) & ECMA_STATUS_ERROR_THROWN))
{
JERRY_CONTEXT (status_flags) |= ECMA_STATUS_ERROR_THROWN;
jerry_vm_throw_callback_t vm_throw_callback_p = JERRY_CONTEXT (vm_throw_callback_p);
if (vm_throw_callback_p != NULL)
{
vm_throw_callback_p (JERRY_CONTEXT (error_value), JERRY_CONTEXT (vm_throw_callback_user_p));
}
}
#endif /* JERRY_VM_THROW */
#if JERRY_DEBUGGER
const uint32_t dont_stop = (JERRY_DEBUGGER_VM_IGNORE_EXCEPTION
| JERRY_DEBUGGER_VM_IGNORE
| JERRY_DEBUGGER_VM_EXCEPTION_THROWN);
if ((JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_CONNECTED)
&& !(frame_ctx_p->shared_p->bytecode_header_p->status_flags
& (CBC_CODE_FLAGS_DEBUGGER_IGNORE | CBC_CODE_FLAGS_STATIC_FUNCTION))
&& !(JERRY_CONTEXT (debugger_flags) & dont_stop))
{
/* Save the error to a local value, because the engine enters breakpoint mode after,
therefore an evaluation error, or user-created error throw would overwrite it. */
ecma_value_t current_error_value = JERRY_CONTEXT (error_value);
if (jerry_debugger_send_exception_string (current_error_value))
{
jerry_debugger_breakpoint_hit (JERRY_DEBUGGER_EXCEPTION_HIT);
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_VM_EXCEPTION_THROWN)
{
ecma_free_value (current_error_value);
}
else
{
JERRY_CONTEXT (error_value) = current_error_value;
}
JERRY_DEBUGGER_SET_FLAGS (JERRY_DEBUGGER_VM_EXCEPTION_THROWN);
}
}
#endif /* JERRY_DEBUGGER */
}
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
if (frame_ctx_p->context_depth == 0)
{
/* In most cases there is no context. */
frame_ctx_p->call_operation = VM_NO_EXEC_OP;
return result;
}
if (!ECMA_IS_VALUE_ERROR (result))
{
switch (vm_stack_find_finally (frame_ctx_p,
stack_top_p,
VM_CONTEXT_FINALLY_RETURN,
0))
{
case VM_CONTEXT_FOUND_FINALLY:
{
stack_top_p = frame_ctx_p->stack_top_p;
byte_code_p = frame_ctx_p->byte_code_p;
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_RETURN);
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
stack_top_p[-2] = result;
continue;
}
#if JERRY_ESNEXT
case VM_CONTEXT_FOUND_ERROR:
{
JERRY_ASSERT (jcontext_has_pending_exception ());
ecma_free_value (result);
stack_top_p = frame_ctx_p->stack_top_p;
result = ECMA_VALUE_ERROR;
break;
}
case VM_CONTEXT_FOUND_AWAIT:
{
stack_top_p = frame_ctx_p->stack_top_p;
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_RETURN);
stack_top_p[-2] = result;
return ECMA_VALUE_UNDEFINED;
}
#endif /* JERRY_ESNEXT */
default:
{
goto finish;
}
}
}
JERRY_ASSERT (jcontext_has_pending_exception ());
if (!jcontext_has_pending_abort ())
{
switch (vm_stack_find_finally (frame_ctx_p,
stack_top_p,
VM_CONTEXT_FINALLY_THROW,
0))
{
case VM_CONTEXT_FOUND_FINALLY:
{
stack_top_p = frame_ctx_p->stack_top_p;
byte_code_p = frame_ctx_p->byte_code_p;
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
JERRY_ASSERT (!(stack_top_p[-1] & VM_CONTEXT_HAS_LEX_ENV));
#if JERRY_DEBUGGER
JERRY_DEBUGGER_CLEAR_FLAGS (JERRY_DEBUGGER_VM_EXCEPTION_THROWN);
#endif /* JERRY_DEBUGGER */
result = jcontext_take_exception ();
if (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_FINALLY_THROW)
{
stack_top_p[-2] = result;
continue;
}
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (stack_top_p[-1]) == VM_CONTEXT_CATCH);
*stack_top_p++ = result;
continue;
}
#if JERRY_ESNEXT
case VM_CONTEXT_FOUND_AWAIT:
{
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (frame_ctx_p->stack_top_p[-1]) == VM_CONTEXT_FINALLY_THROW);
return ECMA_VALUE_UNDEFINED;
}
#endif /* JERRY_ESNEXT */
default:
{
break;
}
}
}
else
{
do
{
JERRY_ASSERT (VM_GET_REGISTERS (frame_ctx_p) + register_end + frame_ctx_p->context_depth == stack_top_p);
stack_top_p = vm_stack_context_abort (frame_ctx_p, stack_top_p);
}
while (frame_ctx_p->context_depth > 0);
}
finish:
frame_ctx_p->call_operation = VM_NO_EXEC_OP;
return result;
}
} /* vm_loop */
| null | null | 197,989
|
117248586394637887953246786581777711038
| 3,977
|
Fix for-in collection cleanup on abrupt 'has' result (#4807)
This patch fixes #4747
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected]
|
other
|
libjxl
|
1c05e110d69b457696366fb4e762057b6855349b
| 1
|
Status ModularFrameDecoder::DecodeGroup(const Rect& rect, BitReader* reader,
int minShift, int maxShift,
const ModularStreamId& stream,
bool zerofill,
PassesDecoderState* dec_state,
ImageBundle* output) {
JXL_DASSERT(stream.kind == ModularStreamId::kModularDC ||
stream.kind == ModularStreamId::kModularAC);
const size_t xsize = rect.xsize();
const size_t ysize = rect.ysize();
Image gi(xsize, ysize, full_image.bitdepth, 0);
// start at the first bigger-than-groupsize non-metachannel
size_t c = full_image.nb_meta_channels;
for (; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
if (fc.w > frame_dim.group_dim || fc.h > frame_dim.group_dim) break;
}
size_t beginc = c;
for (; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
int shift = std::min(fc.hshift, fc.vshift);
if (shift > maxShift) continue;
if (shift < minShift) continue;
Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift,
rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h);
if (r.xsize() == 0 || r.ysize() == 0) continue;
Channel gc(r.xsize(), r.ysize());
gc.hshift = fc.hshift;
gc.vshift = fc.vshift;
gi.channel.emplace_back(std::move(gc));
}
if (zerofill) {
int gic = 0;
for (c = beginc; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
int shift = std::min(fc.hshift, fc.vshift);
if (shift > maxShift) continue;
if (shift < minShift) continue;
Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift,
rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h);
if (r.xsize() == 0 || r.ysize() == 0) continue;
for (size_t y = 0; y < r.ysize(); ++y) {
pixel_type* const JXL_RESTRICT row_out = r.Row(&fc.plane, y);
memset(row_out, 0, r.xsize() * sizeof(*row_out));
}
gic++;
}
return true;
}
ModularOptions options;
if (!ModularGenericDecompress(
reader, gi, /*header=*/nullptr, stream.ID(frame_dim), &options,
/*undo_transforms=*/-1, &tree, &code, &context_map))
return JXL_FAILURE("Failed to decode modular group");
if (minShift == 0) {
// Undo global transforms that have been pushed to the group level
for (auto t : global_transform) {
JXL_RETURN_IF_ERROR(t.Inverse(gi, global_header.wp_header));
}
if (!use_full_image) {
JXL_RETURN_IF_ERROR(ModularImageToDecodedRect(
gi, dec_state, nullptr, output, rect.Crop(dec_state->decoded)));
return true;
}
}
int gic = 0;
for (c = beginc; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
int shift = std::min(fc.hshift, fc.vshift);
if (shift > maxShift) continue;
if (shift < minShift) continue;
Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift,
rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h);
if (r.xsize() == 0 || r.ysize() == 0) continue;
CopyImageTo(/*rect_from=*/Rect(0, 0, r.xsize(), r.ysize()),
/*from=*/gi.channel[gic].plane,
/*rect_to=*/r, /*to=*/&fc.plane);
gic++;
}
return true;
}
| null | null | 197,992
|
21178916405896710995060758052833004149
| 81
|
fix use_full_image==false case (#365)
Some fixes to the case where the full modular image is skipped:
- don't assume that everything happens at the modular AC group level (minShift==0), there can also be upsampling causing channels to have nonzero shift even when there's no Squeeze
- for partial decodes (when zerofill is true), don't try to fill the full image when it's not used. Instead initialize the decoded image with zeroes and skip the decoding.
|
other
|
tensorflow
|
704866eabe03a9aeda044ec91a8d0c83fc1ebdbe
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const TensorShape& input_shape = input.shape();
const int32 input_dims = input_shape.dims();
const Tensor& segment_id = context->input(1);
const TensorShape& segment_id_shape = segment_id.shape();
const int32 segment_dims = segment_id_shape.dims();
const Tensor& num_segments_tensor = context->input(2);
auto num_segments = num_segments_tensor.scalar<NUM_SEGMENTS_TYPE>()();
OP_REQUIRES(context, segment_dims != 0,
errors::InvalidArgument("Segment_id cannot have rank 0"));
OP_REQUIRES(
context, segment_dims <= input_dims,
errors::OutOfRange("Invalid segment_id rank ", segment_dims,
" for input with ", input_dims, " dimension(s)"));
for (auto i = 0; i < segment_dims; i++) {
OP_REQUIRES(
context, segment_id_shape.dim_size(i) == input_shape.dim_size(i),
errors::InvalidArgument(
"Segment dimension is ", segment_id_shape.dim_size(i),
" while input dimension is ", input_dims, " in rank ", i));
}
// Making output tensor.
Tensor* output_tensor = nullptr;
TensorShape output_shape =
GetOutputShape(input_shape, segment_id_shape, num_segments);
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
// Preparating flat tensors.
auto output_flat = output_tensor->flat<tstring>();
auto flat_segment_id = segment_id.flat<INDICES_TYPE>();
auto flat_input = input.flat<tstring>();
for (int i = 0; i < flat_segment_id.size(); i++) {
OP_REQUIRES(
context,
((flat_segment_id(i) < num_segments) && (flat_segment_id(i) >= 0)),
errors::InvalidArgument(
"segment_ids are not allowed to exceed num_segments or"
" to have negative values."));
}
int64 big_stride;
int64 small_stride;
std::tie(big_stride, small_stride) =
GetStrides<INDICES_TYPE>(input_shape, segment_id_shape);
auto relative_offset_set =
GetFlattenedRelativeOffsets<INDICES_TYPE>(small_stride, big_stride);
for (auto start_offset = 0; start_offset < big_stride; start_offset++) {
for (auto i = 0; i < relative_offset_set.size(); i++) {
auto output_index = start_offset + flat_segment_id(i) * big_stride;
auto offset = start_offset + relative_offset_set[i];
if (output_flat(output_index).length() != 0)
output_flat(output_index).append(separator_.c_str());
output_flat(output_index).append(flat_input(offset));
}
}
}
| null | null | 197,998
|
63466493117126952833887281584130653418
| 64
|
Fix overflow CHECK issue with `tf.raw_ops.UnsortedSegmentJoin`.
PiperOrigin-RevId: 370766155
Change-Id: I33e7c6626224e1060a8a4ab51ad5d861c6d4c63e
|
other
|
open5gs
|
c0f695525088486c509f37a02ff1fda211b141bb
| 1
|
char *ogs_nas_5gs_suci_from_mobile_identity(
ogs_nas_5gs_mobile_identity_t *mobile_identity)
{
ogs_nas_5gs_mobile_identity_suci_t *mobile_identity_suci = NULL;
ogs_plmn_id_t plmn_id;
char tmp[OGS_MAX_IMSI_BCD_LEN+1];
char routing_indicator[5];
char *suci = NULL;
ogs_assert(mobile_identity);
mobile_identity_suci =
(ogs_nas_5gs_mobile_identity_suci_t *)mobile_identity->buffer;
ogs_assert(mobile_identity_suci);
ogs_expect_or_return_val(mobile_identity_suci->h.supi_format ==
OGS_NAS_5GS_SUPI_FORMAT_IMSI, NULL);
ogs_expect_or_return_val(mobile_identity_suci->protection_scheme_id ==
OGS_NAS_5GS_NULL_SCHEME, NULL);
suci = ogs_msprintf("suci-%d-", mobile_identity_suci->h.supi_format);
ogs_expect_or_return_val(suci, NULL);
ogs_nas_to_plmn_id(&plmn_id, &mobile_identity_suci->nas_plmn_id);
if (ogs_plmn_id_mnc_len(&plmn_id) == 2) {
suci = ogs_mstrcatf(suci, "%03d-%02d-",
ogs_plmn_id_mcc(&plmn_id), ogs_plmn_id_mnc(&plmn_id));
ogs_expect_or_return_val(suci, NULL);
} else {
suci = ogs_mstrcatf(suci, "%03d-%03d-",
ogs_plmn_id_mcc(&plmn_id), ogs_plmn_id_mnc(&plmn_id));
ogs_expect_or_return_val(suci, NULL);
}
memset(routing_indicator, 0, sizeof(routing_indicator));
if (mobile_identity_suci->routing_indicator1 != 0xf) {
routing_indicator[0] =
mobile_identity_suci->routing_indicator1 + '0';
if (mobile_identity_suci->routing_indicator2 != 0xf) {
routing_indicator[1] =
mobile_identity_suci->routing_indicator2 + '0';
if (mobile_identity_suci->routing_indicator3 != 0xf) {
routing_indicator[2] =
mobile_identity_suci->routing_indicator3 + '0';
if (mobile_identity_suci->routing_indicator4 != 0xf)
routing_indicator[3] =
mobile_identity_suci->routing_indicator4 + '0';
}
}
}
ogs_expect_or_return_val(mobile_identity->length > 8, NULL);
ogs_buffer_to_bcd(mobile_identity_suci->scheme_output,
mobile_identity->length - 8, tmp);
suci = ogs_mstrcatf(suci, "%s-%d-%d-%s",
routing_indicator,
mobile_identity_suci->protection_scheme_id,
mobile_identity_suci->home_network_pki_value,
tmp);
ogs_expect(suci);
return suci;
}
| null | null | 197,999
|
22172238106439330702976732210944258616
| 64
|
[AMF] fix the crash when long MSIN length (#1206)
When the UE is in initially registered period,
if the length of MSIN(Part of SUPI) exceeds the normal length,
AMF stack smashing will be caused
|
other
|
tensorflow
|
e86605c0a336c088b638da02135ea6f9f6753618
| 1
|
void Compute(OpKernelContext* ctx) override {
auto x = ctx->input(0);
auto i = ctx->input(1);
auto v = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(i.shape()),
errors::InvalidArgument("i must be a vector. ",
i.shape().DebugString()));
OP_REQUIRES(ctx, x.dims() == v.dims(),
errors::InvalidArgument(
"x and v shape doesn't match (ranks differ): ",
x.shape().DebugString(), " vs. ", v.shape().DebugString()));
for (int i = 1; i < x.dims(); ++i) {
OP_REQUIRES(
ctx, x.dim_size(i) == v.dim_size(i),
errors::InvalidArgument("x and v shape doesn't match at index ", i,
" : ", x.shape().DebugString(), " vs. ",
v.shape().DebugString()));
}
OP_REQUIRES(ctx, i.dim_size(0) == v.dim_size(0),
errors::InvalidArgument(
"i and x shape doesn't match at index 0: ",
i.shape().DebugString(), " vs. ", v.shape().DebugString()));
Tensor y = x; // This creates an alias intentionally.
// Skip processing if tensors are empty.
if (x.NumElements() > 0 || v.NumElements() > 0) {
OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y));
}
ctx->set_output(0, y);
}
| null | null | 198,003
|
93663684494140260682969182544172664356
| 31
|
Fix FPE in inpace update ops.
PiperOrigin-RevId: 388303197
Change-Id: Ib48309b6213ffe53eba81004b00e889d653e4b83
|
other
|
tensorflow
|
b5cdbf12ffcaaffecf98f22a6be5a64bb96e4f58
| 1
|
void Compute(OpKernelContext* context) override {
// boxes: [batch_size, num_anchors, q, 4]
const Tensor& boxes = context->input(0);
// scores: [batch_size, num_anchors, num_classes]
const Tensor& scores = context->input(1);
OP_REQUIRES(
context, (boxes.dim_size(0) == scores.dim_size(0)),
errors::InvalidArgument("boxes and scores must have same batch size"));
// max_output_size: scalar
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_size_per_class must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const int max_size_per_class = max_output_size.scalar<int>()();
// max_total_size: scalar
const Tensor& max_total_size = context->input(3);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_total_size.shape()),
errors::InvalidArgument("max_total_size must be 0-D, got shape ",
max_total_size.shape().DebugString()));
const int max_total_size_per_batch = max_total_size.scalar<int>()();
OP_REQUIRES(context, max_total_size_per_batch > 0,
errors::InvalidArgument("max_total_size must be > 0"));
// Throw warning when `max_total_size` is too large as it may cause OOM.
if (max_total_size_per_batch > pow(10, 6)) {
LOG(WARNING) << "Detected a large value for `max_total_size`. This may "
<< "cause OOM error. (max_total_size: "
<< max_total_size.scalar<int>()() << ")";
}
// iou_threshold: scalar
const Tensor& iou_threshold = context->input(4);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const float iou_threshold_val = iou_threshold.scalar<float>()();
// score_threshold: scalar
const Tensor& score_threshold = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
const int num_classes = scores.dim_size(2);
ParseAndCheckCombinedNMSBoxSizes(context, boxes, &num_boxes, num_classes);
CheckCombinedNMSScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
BatchedNonMaxSuppressionOp(context, boxes, scores, num_boxes,
max_size_per_class, max_total_size_per_batch,
score_threshold_val, iou_threshold_val,
pad_per_class_, clip_boxes_);
}
| null | null | 198,004
|
154917976059736074963847009813784787458
| 61
|
Prevent overflow due to integer conversion to unsigned.
PiperOrigin-RevId: 387738045
Change-Id: Id7e95bc07e02df1c66b72bd09f389608c87bdebe
|
other
|
radare2
|
193f4fe01d7f626e2ea937450f2e0c4604420e9d
| 1
|
static int string_scan_range(RList *list, RBinFile *bf, int min,
const ut64 from, const ut64 to, int type, int raw, RBinSection *section) {
RBin *bin = bf->rbin;
ut8 tmp[R_STRING_SCAN_BUFFER_SIZE];
ut64 str_start, needle = from;
int count = 0, i, rc, runes;
int str_type = R_STRING_TYPE_DETECT;
// if list is null it means its gonna dump
r_return_val_if_fail (bf, -1);
if (type == -1) {
type = R_STRING_TYPE_DETECT;
}
if (from == to) {
return 0;
}
if (from > to) {
eprintf ("Invalid range to find strings 0x%"PFMT64x" .. 0x%"PFMT64x"\n", from, to);
return -1;
}
st64 len = (st64)(to - from);
if (len < 1 || len > ST32_MAX) {
eprintf ("String scan range is invalid (%"PFMT64d" bytes)\n", len);
return -1;
}
ut8 *buf = calloc (len, 1);
if (!buf || !min) {
free (buf);
return -1;
}
st64 vdelta = 0, pdelta = 0;
RBinSection *s = NULL;
bool ascii_only = false;
PJ *pj = NULL;
if (bf->strmode == R_MODE_JSON && !list) {
pj = pj_new ();
if (pj) {
pj_a (pj);
}
}
r_buf_read_at (bf->buf, from, buf, len);
char *charset = r_sys_getenv ("RABIN2_CHARSET");
if (!R_STR_ISEMPTY (charset)) {
RCharset *ch = r_charset_new ();
if (r_charset_use (ch, charset)) {
int outlen = len * 4;
ut8 *out = calloc (len, 4);
if (out) {
int res = r_charset_encode_str (ch, out, outlen, buf, len);
int i;
// TODO unknown chars should be translated to null bytes
for (i = 0; i < res; i++) {
if (out[i] == '?') {
out[i] = 0;
}
}
len = res;
free (buf);
buf = out;
} else {
eprintf ("Cannot allocate\n");
}
} else {
eprintf ("Invalid value for RABIN2_CHARSET.\n");
}
r_charset_free (ch);
}
free (charset);
RConsIsBreaked is_breaked = (bin && bin->consb.is_breaked)? bin->consb.is_breaked: NULL;
// may oobread
while (needle < to) {
if (is_breaked && is_breaked ()) {
break;
}
// smol optimization
if (needle + 4 < to) {
ut32 n1 = r_read_le32 (buf + needle - from);
if (!n1) {
needle += 4;
continue;
}
}
rc = r_utf8_decode (buf + needle - from, to - needle, NULL);
if (!rc) {
needle++;
continue;
}
bool addr_aligned = !(needle % 4);
if (type == R_STRING_TYPE_DETECT) {
char *w = (char *)buf + needle + rc - from;
if (((to - needle) > 8 + rc)) {
// TODO: support le and be
bool is_wide32le = (needle + rc + 2 < to) && (!w[0] && !w[1] && !w[2] && w[3] && !w[4]);
// reduce false positives
if (is_wide32le) {
if (!w[5] && !w[6] && w[7] && w[8]) {
is_wide32le = false;
}
}
if (!addr_aligned) {
is_wide32le = false;
}
///is_wide32be &= (n1 < 0xff && n11 < 0xff); // false; // n11 < 0xff;
if (is_wide32le && addr_aligned) {
str_type = R_STRING_TYPE_WIDE32; // asume big endian,is there little endian w32?
} else {
// bool is_wide = (n1 && n2 && n1 < 0xff && (!n2 || n2 < 0xff));
bool is_wide = needle + rc + 4 < to && !w[0] && w[1] && !w[2] && w[3] && !w[4];
str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII;
}
} else {
if (rc > 1) {
str_type = R_STRING_TYPE_UTF8; // could be charset if set :?
} else {
str_type = R_STRING_TYPE_ASCII;
}
}
} else if (type == R_STRING_TYPE_UTF8) {
str_type = R_STRING_TYPE_ASCII; // initial assumption
} else {
str_type = type;
}
runes = 0;
str_start = needle;
/* Eat a whole C string */
for (i = 0; i < sizeof (tmp) - 4 && needle < to; i += rc) {
RRune r = {0};
if (str_type == R_STRING_TYPE_WIDE32) {
rc = r_utf32le_decode (buf + needle - from, to - needle, &r);
if (rc) {
rc = 4;
}
} else if (str_type == R_STRING_TYPE_WIDE) {
rc = r_utf16le_decode (buf + needle - from, to - needle, &r);
if (rc == 1) {
rc = 2;
}
} else {
rc = r_utf8_decode (buf + needle - from, to - needle, &r);
if (rc > 1) {
str_type = R_STRING_TYPE_UTF8;
}
}
/* Invalid sequence detected */
if (!rc || (ascii_only && r > 0x7f)) {
needle++;
break;
}
needle += rc;
if (r_isprint (r) && r != '\\') {
if (str_type == R_STRING_TYPE_WIDE32) {
if (r == 0xff) {
r = 0;
}
}
rc = r_utf8_encode (tmp + i, r);
runes++;
/* Print the escape code */
} else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\033\\", (char)r)) {
if ((i + 32) < sizeof (tmp) && r < 93) {
tmp[i + 0] = '\\';
tmp[i + 1] = " abtnvfr e "
" "
" "
" \\"[r];
} else {
// string too long
break;
}
rc = 2;
runes++;
} else {
/* \0 marks the end of C-strings */
break;
}
}
tmp[i++] = '\0';
if (runes < min && runes >= 2 && str_type == R_STRING_TYPE_ASCII && needle < to) {
// back up past the \0 to the last char just in case it starts a wide string
needle -= 2;
}
if (runes >= min) {
// reduce false positives
int j, num_blocks, *block_list;
int *freq_list = NULL, expected_ascii, actual_ascii, num_chars;
if (str_type == R_STRING_TYPE_ASCII) {
for (j = 0; j < i; j++) {
char ch = tmp[j];
if (ch != '\n' && ch != '\r' && ch != '\t') {
if (!IS_PRINTABLE (tmp[j])) {
continue;
}
}
}
}
switch (str_type) {
case R_STRING_TYPE_UTF8:
case R_STRING_TYPE_WIDE:
case R_STRING_TYPE_WIDE32:
num_blocks = 0;
block_list = r_utf_block_list ((const ut8*)tmp, i - 1,
str_type == R_STRING_TYPE_WIDE? &freq_list: NULL);
if (block_list) {
for (j = 0; block_list[j] != -1; j++) {
num_blocks++;
}
}
if (freq_list) {
num_chars = 0;
actual_ascii = 0;
for (j = 0; freq_list[j] != -1; j++) {
num_chars += freq_list[j];
if (!block_list[j]) { // ASCII
actual_ascii = freq_list[j];
}
}
free (freq_list);
expected_ascii = num_blocks ? num_chars / num_blocks : 0;
if (actual_ascii > expected_ascii) {
ascii_only = true;
needle = str_start;
free (block_list);
continue;
}
}
free (block_list);
if (num_blocks > R_STRING_MAX_UNI_BLOCKS) {
needle++;
continue;
}
}
RBinString *bs = R_NEW0 (RBinString);
if (!bs) {
break;
}
bs->type = str_type;
bs->length = runes;
bs->size = needle - str_start;
bs->ordinal = count++;
// TODO: move into adjust_offset
switch (str_type) {
case R_STRING_TYPE_WIDE:
if (str_start - from > 1) {
const ut8 *p = buf + str_start - 2 - from;
if (p[0] == 0xff && p[1] == 0xfe) {
str_start -= 2; // \xff\xfe
}
}
break;
case R_STRING_TYPE_WIDE32:
if (str_start - from > 3) {
const ut8 *p = buf + str_start - 4 - from;
if (p[0] == 0xff && p[1] == 0xfe) {
str_start -= 4; // \xff\xfe\x00\x00
}
}
break;
}
if (!s) {
if (section) {
s = section;
} else if (bf->o) {
s = r_bin_get_section_at (bf->o, str_start, false);
}
if (s) {
vdelta = s->vaddr;
pdelta = s->paddr;
}
}
ut64 baddr = bf->loadaddr && bf->o? bf->o->baddr: bf->loadaddr;
bs->paddr = str_start + baddr;
bs->vaddr = str_start - pdelta + vdelta + baddr;
bs->string = r_str_ndup ((const char *)tmp, i);
if (list) {
r_list_append (list, bs);
if (bf->o) {
ht_up_insert (bf->o->strings_db, bs->vaddr, bs);
}
} else {
print_string (bf, bs, raw, pj);
r_bin_string_free (bs);
}
if (from == 0 && to == bf->size) {
/* force lookup section at the next one */
s = NULL;
}
}
ascii_only = false;
}
free (buf);
if (pj) {
pj_end (pj);
if (bin) {
RIO *io = bin->iob.io;
if (io) {
io->cb_printf ("%s", pj_string (pj));
}
}
pj_free (pj);
}
return count;
}
| null | null | 198,010
|
111222441560032345362598385552469977676
| 310
|
Fix integer overflow in string search causing oobread ##crash
* Reported by @greatergoodest via huntrdev
* BountyID: 8a3dc5cb-08b3-4807-82b2-77f08c137a04
* Reproducer bfileovf
|
other
|
tensorflow
|
3150642acbbe254e3c3c5d2232143fa591855ac9
| 1
|
void Compute(OpKernelContext* context) override {
// Checks what we're remapping and inverts the relevant remapping Tensors to
// be maps with key = old ID, value = new ID.
std::unordered_map<int64_t, int64_t> old_row_to_new_row_map;
std::vector<bool> row_id_present;
const Tensor* row_remapping_t;
OP_REQUIRES_OK(context, context->input("row_remapping", &row_remapping_t));
const auto row_remapping = row_remapping_t->vec<int64_t>();
OP_REQUIRES(context, row_remapping.size() == num_rows_,
errors::InvalidArgument(strings::StrCat(
"Size of row_remapping is ", row_remapping.size(),
" instead of being equal to num_rows=", num_rows_)));
OP_REQUIRES_OK(context, RemapVectorToMap(row_remapping, &row_id_present,
&old_row_to_new_row_map));
// Calculates the min/max old row ID that we need to read, to save us from
// reading some unnecessary slices of the old tensor.
int64_t min_old_row = -1;
int64_t max_old_row = -1;
for (int i = 0; i < row_remapping.size(); ++i) {
if (min_old_row < 0 ||
(row_remapping(i) >= 0 && row_remapping(i) < min_old_row)) {
min_old_row = row_remapping(i);
}
if (max_old_row < 0 ||
(row_remapping(i) >= 0 && row_remapping(i) > max_old_row)) {
max_old_row = row_remapping(i);
}
}
// Processes the remapping for columns.
std::unordered_map<int64_t, int64_t> old_col_to_new_col_map;
std::vector<bool> col_id_present;
const Tensor* col_remapping_t;
OP_REQUIRES_OK(context, context->input("col_remapping", &col_remapping_t));
const auto col_remapping = col_remapping_t->vec<int64_t>();
// Note that we always "remap rows", even when the row vocabulary does
// not change, because partitioning requires a mapping from partitioned
// Variables to the full checkpoints we load.
const bool remap_cols = col_remapping.size() > 0;
if (remap_cols) {
OP_REQUIRES(
context, col_remapping.size() == num_cols_,
errors::InvalidArgument(strings::StrCat(
"Provided col_remapping, but its size is ", col_remapping.size(),
" instead of being equal to num_cols=", num_cols_)));
OP_REQUIRES_OK(context, RemapVectorToMap(col_remapping, &col_id_present,
&old_col_to_new_col_map));
} else {
col_id_present.clear();
col_id_present.resize(num_cols_, true);
}
// Processes the checkpoint source and the provided Tensor name.
const Tensor* ckpt_path_t;
OP_REQUIRES_OK(context, context->input("ckpt_path", &ckpt_path_t));
OP_REQUIRES(
context, ckpt_path_t->NumElements() == 1,
errors::InvalidArgument("The `ckpt_path` tensor must have exactly one "
"element, got tensor of shape ",
ckpt_path_t->shape().DebugString()));
const string& ckpt_path = ckpt_path_t->scalar<tstring>()();
const Tensor* old_tensor_name_t;
OP_REQUIRES_OK(context,
context->input("old_tensor_name", &old_tensor_name_t));
const string& old_tensor_name = old_tensor_name_t->scalar<tstring>()();
LOG(INFO) << "Processing checkpoint : " << ckpt_path;
BundleReader reader(context->env(), ckpt_path);
OP_REQUIRES_OK(context, reader.status());
DataType tensor_type;
TensorShape tensor_shape;
OP_REQUIRES_OK(context, reader.LookupDtypeAndShape(
old_tensor_name, &tensor_type, &tensor_shape));
OP_REQUIRES(context, tensor_type == DT_FLOAT,
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has invalid type ",
DataTypeString(tensor_type), " instead of expected type ",
DataTypeString(DT_FLOAT))));
// This op is limited to loading Tensors of rank 2 (matrices).
OP_REQUIRES(
context, tensor_shape.dims() == 2,
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has shape ",
tensor_shape.DebugString(), " of invalid rank ",
tensor_shape.dims(), " instead of expected shape of rank 2.")));
if (!remap_cols) {
// TODO(weiho): Consider relaxing this restriction to allow partial column
// loading (even when no column remapping is specified) if there turns out
// to be a use case for it.
OP_REQUIRES(context, num_cols_ == tensor_shape.dim_size(1),
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has shape ",
tensor_shape.DebugString(),
", where the size of its 2nd dimension is ",
tensor_shape.dim_size(1),
" instead of being equal to num_cols=", num_cols_)));
}
// Uses TensorSlice to potentially load the old tensor in chunks in case
// memory usage is a concern.
std::vector<TensorSlice> tensor_slices;
TensorSlice slice(tensor_shape.dims());
if (min_old_row >= 0 && max_old_row >= 0) {
int64_t row_start = min_old_row;
// TODO(weiho): Given the list of old row IDs of interest (the keys of
// old_row_to_new_row_map), we could also try something smarter to
// find some minimal set of covering ranges for the list of old row IDs
// such that the size of each range is less than max_rows_in_memory_.
while (row_start <= max_old_row) {
const int64_t slice_length =
max_rows_in_memory_ <= 0
// If max_rows_in_memory_ <= 0, we just load the entire chunk.
? max_old_row - row_start + 1
: std::min(max_rows_in_memory_, max_old_row - row_start + 1);
slice.set_start(0, row_start);
slice.set_length(0, slice_length);
tensor_slices.push_back(slice);
row_start += slice_length;
}
}
// Allocates the output matrix.
Tensor* output_matrix_t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output("output_matrix",
TensorShape({num_rows_, num_cols_}),
&output_matrix_t));
auto output_matrix = output_matrix_t->matrix<float>();
// Iterates through tensor slices and copies over values from the old tensor
// to the output matrix.
int64_t row_index = min_old_row;
int64_t rows_copied = 0;
Tensor loaded_tensor_t;
for (const TensorSlice& tensor_slice : tensor_slices) {
LOG(INFO) << "Loading slice " << tensor_slice.DebugString();
TensorShape slice_shape;
OP_REQUIRES_OK(context,
tensor_slice.SliceTensorShape(tensor_shape, &slice_shape));
// Potentially re-allocates the tensor buffer since the last slice may
// have fewer rows than the other slices.
if (loaded_tensor_t.shape() != slice_shape) {
loaded_tensor_t = Tensor(DT_FLOAT, slice_shape);
}
OP_REQUIRES_OK(context, reader.LookupSlice(old_tensor_name, tensor_slice,
&loaded_tensor_t));
// Iterates through the old loaded tensor slice row-by-row.
for (int row = 0; row < loaded_tensor_t.dim_size(0); ++row, ++row_index) {
if (row_index % 500000 == min_old_row) {
LOG(INFO) << "Processing old row " << row_index;
}
// If the old row ID is not found in old_row_to_new_row_map, continue
// to the next row; otherwise, copy it to the output matrix.
const int64_t* new_row_ptr =
gtl::FindOrNull(old_row_to_new_row_map, row_index);
if (new_row_ptr == nullptr) {
continue;
}
++rows_copied;
const int64_t new_row = *new_row_ptr;
// Copies over the row element-by-element, in case remapping is needed
// along the column axis.
const auto& loaded_tensor = loaded_tensor_t.matrix<float>();
for (int old_col = 0; old_col < loaded_tensor_t.dim_size(1);
++old_col) {
int64_t new_col = old_col;
if (remap_cols) {
const int64_t* new_col_ptr =
gtl::FindOrNull(old_col_to_new_col_map, old_col);
if (new_col_ptr == nullptr) {
// Column remapping is specified, but this column is not found in
// old_col_to_new_col_map, so we leave it uninitialized, to be
// filled in with initializing_values later.
continue;
}
new_col = *new_col_ptr;
}
OP_REQUIRES(context,
new_row < num_rows_ && new_col < num_cols_ &&
new_row >= 0 && new_col >= 0,
errors::Internal(strings::StrCat(
"new_row=", new_row, " and new_col=", new_col,
" should have been less than num_rows_=", num_rows_,
" and num_cols_=", num_cols_,
" and non-negative. This should never have happened "
"if the code were correct. Please file a bug.")));
output_matrix(new_row, new_col) = loaded_tensor(row, old_col);
}
}
}
LOG(INFO) << "Copied " << rows_copied << " rows from old matrix (with "
<< tensor_shape.dim_size(0) << " rows) to new matrix (with "
<< num_rows_ << " rows).";
// At this point, there are potentially whole rows/columns uninitialized
// (corresponding to the indices where row_id_present/col_id_present are
// false). We fill this in cell-by-cell using row_id_present and
// col_id_present while dequeuing from the initializing_values vector.
const Tensor* initializing_values_t;
OP_REQUIRES_OK(
context, context->input("initializing_values", &initializing_values_t));
const auto initializing_values = initializing_values_t->flat<float>();
int64_t initializing_values_index = 0;
for (int i = 0; i < num_rows_; ++i) {
for (int j = 0; j < num_cols_; ++j) {
if (row_id_present[i] && col_id_present[j]) continue;
OP_REQUIRES(
context, initializing_values_index < initializing_values.size(),
errors::InvalidArgument(
"initializing_values contained ", initializing_values.size(),
" elements, but more missing values remain."));
output_matrix(i, j) = initializing_values(initializing_values_index);
++initializing_values_index;
}
}
// Checks that we used all the given initializing values.
OP_REQUIRES(
context, initializing_values_index == initializing_values.size(),
errors::InvalidArgument(
"initializing_values contained ", initializing_values.size(),
" elements, but only ", initializing_values_index,
" elements were used to fill in missing values."));
}
| null | null | 198,013
|
149718048967192541664708298570113806879
| 231
|
Fix tf.raw_ops.LoadAndRemapMatrix vulnerability with invalid `row_remapping`.
Check that `row_remapping` has the correct dims().
PiperOrigin-RevId: 445522800
|
other
|
FFmpeg
|
61d59703c91869f4e5cdacd8d6be52f8b89d4ba4
| 1
|
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
const int b_width = s->b_width << s->block_max_depth;
const int b_height= s->b_height << s->block_max_depth;
const int b_stride= b_width;
BlockNode *lt= &s->block[b_x + b_y*b_stride];
BlockNode *rt= lt+1;
BlockNode *lb= lt+b_stride;
BlockNode *rb= lb+1;
uint8_t *block[4];
int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
uint8_t *tmp = s->scratchbuf;
uint8_t *ptmp;
int x,y;
if(b_x<0){
lt= rt;
lb= rb;
}else if(b_x + 1 >= b_width){
rt= lt;
rb= lb;
}
if(b_y<0){
lt= lb;
rt= rb;
}else if(b_y + 1 >= b_height){
lb= lt;
rb= rt;
}
if(src_x<0){ //FIXME merge with prev & always round internal width up to *16
obmc -= src_x;
b_w += src_x;
if(!sliced && !offset_dst)
dst -= src_x;
src_x=0;
}else if(src_x + b_w > w){
b_w = w - src_x;
}
if(src_y<0){
obmc -= src_y*obmc_stride;
b_h += src_y;
if(!sliced && !offset_dst)
dst -= src_y*dst_stride;
src_y=0;
}else if(src_y + b_h> h){
b_h = h - src_y;
}
if(b_w<=0 || b_h<=0) return;
av_assert2(src_stride > 2*MB_SIZE + 5);
if(!sliced && offset_dst)
dst += src_x + src_y*dst_stride;
dst8+= src_x + src_y*src_stride;
// src += src_x + src_y*src_stride;
ptmp= tmp + 3*tmp_step;
block[0]= ptmp;
ptmp+=tmp_step;
ff_snow_pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
if(same_block(lt, rt)){
block[1]= block[0];
}else{
block[1]= ptmp;
ptmp+=tmp_step;
ff_snow_pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
}
if(same_block(lt, lb)){
block[2]= block[0];
}else if(same_block(rt, lb)){
block[2]= block[1];
}else{
block[2]= ptmp;
ptmp+=tmp_step;
ff_snow_pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
}
if(same_block(lt, rb) ){
block[3]= block[0];
}else if(same_block(rt, rb)){
block[3]= block[1];
}else if(same_block(lb, rb)){
block[3]= block[2];
}else{
block[3]= ptmp;
ff_snow_pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
}
if(sliced){
s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
}else{
for(y=0; y<b_h; y++){
//FIXME ugly misuse of obmc_stride
const uint8_t *obmc1= obmc + y*obmc_stride;
const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
for(x=0; x<b_w; x++){
int v= obmc1[x] * block[3][x + y*src_stride]
+obmc2[x] * block[2][x + y*src_stride]
+obmc3[x] * block[1][x + y*src_stride]
+obmc4[x] * block[0][x + y*src_stride];
v <<= 8 - LOG2_OBMC_MAX;
if(FRAC_BITS != 8){
v >>= 8 - FRAC_BITS;
}
if(add){
v += dst[x + y*dst_stride];
v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
if(v&(~255)) v= ~(v>>31);
dst8[x + y*src_stride] = v;
}else{
dst[x + y*dst_stride] -= v;
}
}
}
}
}
| null | null | 198,115
|
130981242716370455651346005101230835197
| 121
|
avcodec/snow: split block clipping checks
Fixes out of array read
Fixes: d4476f68ca1c1c57afbc45806f581963-asan_heap-oob_2266b27_8607_cov_4044577381_snow_chroma_bug.avi
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
other
|
tensorflow
|
87158f43f05f2720a374f3e6d22a7aaa3a33f750
| 1
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *reduction_axes_t;
OP_REQUIRES_OK(ctx, ctx->input("input_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("input_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("input_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("reduction_axes", &reduction_axes_t));
OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t));
// TODO(zongheng): we will call Reorder() below, which will modify
// in-place the underlying indices and values buffers. To avoid
// surprises of this kernel being stateful, we work around the above by
// making deep copies here. Remove this if/when we change Reorder()'s
// semantics.
const auto shape_vec = shape_t->vec<int64>();
SparseTensor sp;
OP_REQUIRES_OK(ctx, SparseTensor::Create(
tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
TensorShape(shape_vec), &sp));
ReduceDetails reduction = SparseTensorReduceHelper(
sp, reduction_axes_t->flat<int32>(), keep_dims_);
Tensor *out_values;
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, reduction.reduced_shape, &out_values));
auto out_flat = out_values->flat<T>();
out_flat.setZero();
Tensor tmp_reduced_val;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({}), &tmp_reduced_val));
auto reduced_val = tmp_reduced_val.scalar<T>();
// Compute strides, and use it to convert coords to flat index. The
// coordinates returned by .group() have the same ndims as group_by_dims.
gtl::InlinedVector<int64, 8> output_strides(reduction.group_by_dims.size());
if (!output_strides.empty()) { // Do this iff we don't reduce all.
output_strides.back() = 1;
for (int d = output_strides.size() - 2; d >= 0; --d) {
output_strides[d] =
output_strides[d + 1] * shape_vec(reduction.group_by_dims[d + 1]);
}
}
auto CoordinatesToFlatIndex = [](ArraySlice<int64> coords,
ArraySlice<int64> strides) -> int64 {
if (strides.empty()) { // Reduce all.
return 0;
}
CHECK_EQ(coords.size(), strides.size());
int64_t idx = 0;
for (int i = 0; i < coords.size(); ++i) {
idx += coords[i] * strides[i];
}
return idx;
};
// Each group maps one-on-one onto a value in the reduced tensor.
// g.group() provides the coordinates of a particular reduced value.
sp.Reorder<T>(reduction.reorder_dims);
for (const auto &g : sp.group(reduction.group_by_dims)) {
Op::template Run<T>(ctx, reduced_val, g.template values<T>());
const int64_t idx = CoordinatesToFlatIndex(g.group(), output_strides);
out_flat(idx) = reduced_val();
VLOG(2) << "coords: " << absl::StrJoin(g.group(), ",")
<< "; idx: " << idx << "; group " << Op::Name() << ": "
<< reduced_val();
}
}
| null | null | 198,116
|
51050455024866713197969944234997171688
| 69
|
Prevent heap OOB in sparse reduction ops.
PiperOrigin-RevId: 387934524
Change-Id: I894aa30f1e454f09b471d565b4a325da49322c1a
|
other
|
tensorflow
|
0f931751fb20f565c4e94aa6df58d54a003cdb30
| 1
|
void Compute(OpKernelContext* context) override {
// Here's the basic idea:
// Batch and depth dimension are independent from row and col dimension. And
// because FractionalAvgPool currently only support pooling along row and
// col, we can basically think of this 4D tensor backpropagation as
// operation of a series of 2D planes.
//
// For each element of a 'slice' (2D plane) of output_backprop, we need to
// figure out its contributors when doing FractionalAvgPool operation. This
// can be done based on row_pooling_sequence, col_pooling_seq and
// overlapping.
// Once we figure out the original contributors, we just need to evenly
// divide the value of this element among these contributors.
//
// Internally, we divide the out_backprop tensor and store it in a temporary
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
// Grab the inputs.
const Tensor& orig_input_tensor_shape = context->input(0);
OP_REQUIRES(context,
orig_input_tensor_shape.dims() == 1 &&
orig_input_tensor_shape.NumElements() == 4,
errors::InvalidArgument("original input tensor shape must be"
"1-dimensional and 4 elements"));
const Tensor& out_backprop = context->input(1);
const Tensor& row_seq_tensor = context->input(2);
const Tensor& col_seq_tensor = context->input(3);
const int64_t out_batch = out_backprop.dim_size(0);
const int64_t out_rows = out_backprop.dim_size(1);
const int64_t out_cols = out_backprop.dim_size(2);
const int64_t out_depth = out_backprop.dim_size(3);
OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", row_seq_tensor must have at least ",
out_rows + 1, " elements, but got ",
row_seq_tensor.NumElements()));
OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", col_seq_tensor must have at least ",
out_cols + 1, " elements, but got ",
col_seq_tensor.NumElements()));
auto row_seq_tensor_flat = row_seq_tensor.flat<int64>();
auto col_seq_tensor_flat = col_seq_tensor.flat<int64>();
auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>();
const int64_t in_batch = orig_input_tensor_shape_flat(0);
const int64_t in_rows = orig_input_tensor_shape_flat(1);
const int64_t in_cols = orig_input_tensor_shape_flat(2);
const int64_t in_depth = orig_input_tensor_shape_flat(3);
constexpr int tensor_in_and_out_dims = 4;
// Transform orig_input_tensor_shape into TensorShape
TensorShape in_shape;
for (auto i = 0; i < tensor_in_and_out_dims; ++i) {
in_shape.AddDim(orig_input_tensor_shape_flat(i));
}
// Create intermediate in_backprop.
Tensor in_backprop_tensor_temp;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp(
{0}, DataTypeToEnum<double>::v(), in_shape,
&in_backprop_tensor_temp));
in_backprop_tensor_temp.flat<double>().setZero();
// Transform 4D tensor to 2D matrix.
EigenDoubleMatrixMap in_backprop_tensor_temp_mat(
in_backprop_tensor_temp.flat<double>().data(), in_depth,
in_cols * in_rows * in_batch);
ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(),
out_depth,
out_cols * out_rows * out_batch);
// Loop through each element of out_backprop and evenly distribute the
// element to the corresponding pooling cell.
const int64_t in_max_row_index = in_rows - 1;
const int64_t in_max_col_index = in_cols - 1;
for (int64_t b = 0; b < out_batch; ++b) {
for (int64_t r = 0; r < out_rows; ++r) {
const int64_t in_row_start = row_seq_tensor_flat(r);
int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1)
: row_seq_tensor_flat(r + 1) - 1;
in_row_end = std::min(in_row_end, in_max_row_index);
for (int64_t c = 0; c < out_cols; ++c) {
const int64_t in_col_start = col_seq_tensor_flat(c);
int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1)
: col_seq_tensor_flat(c + 1) - 1;
in_col_end = std::min(in_col_end, in_max_col_index);
const int64_t num_elements_in_pooling_cell =
(in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1);
const int64_t out_index = (b * out_rows + r) * out_cols + c;
// Now we can evenly distribute out_backprop(b, h, w, *) to
// in_backprop(b, hs:he, ws:we, *).
for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) {
for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) {
const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c;
// Walk through each channel (depth).
for (int64_t d = 0; d < out_depth; ++d) {
const double out_backprop_element = static_cast<double>(
out_backprop_mat.coeffRef(d, out_index));
double& in_backprop_ref =
in_backprop_tensor_temp_mat.coeffRef(d, in_index);
in_backprop_ref +=
out_backprop_element / num_elements_in_pooling_cell;
}
}
}
}
}
}
// Depending on the type, cast double to type T.
Tensor* in_backprop_tensor = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, in_shape, &in_backprop_tensor));
auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>();
auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>();
for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) {
in_backprop_tensor_flat(i) =
static_cast<T>(in_backprop_tensor_temp_flat(i));
}
}
| null | null | 198,117
|
321331517740946158662148739652819934224
| 129
|
Validate dimensions of input tensor in `FractionalAvgPoolGrad`
PiperOrigin-RevId: 388286227
Change-Id: Ieb7566155e92acc8993a2212c76deacadc0edc8a
|
other
|
ImageMagick
|
c8718305f120293d8bf13724f12eed885d830b09
| 1
|
static Image *ReadTIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowTIFFException(severity,message) \
{ \
if (pixel_info != (MemoryInfo *) NULL) \
pixel_info=RelinquishVirtualMemory(pixel_info); \
if (quantum_info != (QuantumInfo *) NULL) \
quantum_info=DestroyQuantumInfo(quantum_info); \
TIFFClose(tiff); \
ThrowReaderException(severity,message); \
}
float
*chromaticity = (float *) NULL,
x_position,
y_position,
x_resolution,
y_resolution;
Image
*image;
int
tiff_status = 0;
MagickBooleanType
more_frames;
MagickSizeType
number_pixels;
MagickStatusType
status;
MemoryInfo
*pixel_info = (MemoryInfo *) NULL;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
ssize_t
i,
scanline_size,
y;
TIFF
*tiff;
TIFFMethodType
method;
uint16
compress_tag = 0,
bits_per_sample = 0,
endian = 0,
extra_samples = 0,
interlace = 0,
max_sample_value = 0,
min_sample_value = 0,
orientation = 0,
pages = 0,
photometric = 0,
*sample_info = NULL,
sample_format = 0,
samples_per_pixel = 0,
units = 0,
value = 0;
uint32
height,
rows_per_strip,
width;
unsigned char
*pixels;
void
*sans[8] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
/*
Open image.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SetMagickThreadValue(tiff_exception,exception);
tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob,
TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob,
TIFFUnmapBlob);
if (tiff == (TIFF *) NULL)
{
if (exception->severity == UndefinedException)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
image=DestroyImageList(image);
return((Image *) NULL);
}
if (exception->severity > ErrorException)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (image_info->number_scenes != 0)
{
/*
Generate blank images for subimage specification (e.g. image.tif[4].
We need to check the number of directores because it is possible that
the subimage(s) are stored in the photoshop profile.
*/
if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff))
{
for (i=0; i < (ssize_t) image_info->scene; i++)
{
status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (status == MagickFalse)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
}
}
}
more_frames=MagickTrue;
do
{
/* TIFFPrintDirectory(tiff,stdout,MagickFalse); */
photometric=PHOTOMETRIC_RGB;
if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) ||
(TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) &&
((bits_per_sample <= 0) || (bits_per_sample > 32)))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
}
if (samples_per_pixel > MaxPixelChannels)
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
}
if (sample_format == SAMPLEFORMAT_IEEEFP)
(void) SetImageProperty(image,"quantum:format","floating-point",
exception);
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-black",
exception);
break;
}
case PHOTOMETRIC_MINISWHITE:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-white",
exception);
break;
}
case PHOTOMETRIC_PALETTE:
{
(void) SetImageProperty(image,"tiff:photometric","palette",exception);
break;
}
case PHOTOMETRIC_RGB:
{
(void) SetImageProperty(image,"tiff:photometric","RGB",exception);
break;
}
case PHOTOMETRIC_CIELAB:
{
(void) SetImageProperty(image,"tiff:photometric","CIELAB",exception);
break;
}
case PHOTOMETRIC_LOGL:
{
(void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)",
exception);
break;
}
case PHOTOMETRIC_LOGLUV:
{
(void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception);
break;
}
#if defined(PHOTOMETRIC_MASK)
case PHOTOMETRIC_MASK:
{
(void) SetImageProperty(image,"tiff:photometric","MASK",exception);
break;
}
#endif
case PHOTOMETRIC_SEPARATED:
{
(void) SetImageProperty(image,"tiff:photometric","separated",exception);
break;
}
case PHOTOMETRIC_YCBCR:
{
(void) SetImageProperty(image,"tiff:photometric","YCBCR",exception);
break;
}
default:
{
(void) SetImageProperty(image,"tiff:photometric","unknown",exception);
break;
}
}
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u",
(unsigned int) width,(unsigned int) height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u",
interlace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Bits per sample: %u",bits_per_sample);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Min sample value: %u",min_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Max sample value: %u",max_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric "
"interpretation: %s",GetImageProperty(image,"tiff:photometric",
exception));
}
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=(size_t) bits_per_sample;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g",
(double) image->depth);
image->endian=MSBEndian;
if (endian == FILLORDER_LSB2MSB)
image->endian=LSBEndian;
#if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN)
if (TIFFIsBigEndian(tiff) == 0)
{
(void) SetImageProperty(image,"tiff:endian","lsb",exception);
image->endian=LSBEndian;
}
else
{
(void) SetImageProperty(image,"tiff:endian","msb",exception);
image->endian=MSBEndian;
}
#endif
if ((photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
(void) SetImageColorspace(image,GRAYColorspace,exception);
if (photometric == PHOTOMETRIC_SEPARATED)
(void) SetImageColorspace(image,CMYKColorspace,exception);
if (photometric == PHOTOMETRIC_CIELAB)
(void) SetImageColorspace(image,LabColorspace,exception);
if ((photometric == PHOTOMETRIC_YCBCR) &&
(compress_tag != COMPRESSION_OJPEG) &&
(compress_tag != COMPRESSION_JPEG))
(void) SetImageColorspace(image,YCbCrColorspace,exception);
status=TIFFGetProfiles(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=TIFFGetProperties(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
TIFFGetEXIFProperties(tiff,image,image_info,exception);
TIFFGetGPSProperties(tiff,image,image_info,exception);
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1))
{
image->resolution.x=x_resolution;
image->resolution.y=y_resolution;
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1)
{
if (units == RESUNIT_INCH)
image->units=PixelsPerInchResolution;
if (units == RESUNIT_CENTIMETER)
image->units=PixelsPerCentimeterResolution;
}
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1))
{
image->page.x=CastDoubleToLong(ceil(x_position*
image->resolution.x-0.5));
image->page.y=CastDoubleToLong(ceil(y_position*
image->resolution.y-0.5));
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1)
image->orientation=(OrientationType) orientation;
if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.white_point.x=chromaticity[0];
image->chromaticity.white_point.y=chromaticity[1];
}
}
if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.red_primary.x=chromaticity[0];
image->chromaticity.red_primary.y=chromaticity[1];
image->chromaticity.green_primary.x=chromaticity[2];
image->chromaticity.green_primary.y=chromaticity[3];
image->chromaticity.blue_primary.x=chromaticity[4];
image->chromaticity.blue_primary.y=chromaticity[5];
}
}
#if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919)
if ((compress_tag != COMPRESSION_NONE) &&
(TIFFIsCODECConfigured(compress_tag) == 0))
{
TIFFClose(tiff);
ThrowReaderException(CoderError,"CompressNotSupported");
}
#endif
switch (compress_tag)
{
case COMPRESSION_NONE: image->compression=NoCompression; break;
case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break;
case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break;
case COMPRESSION_JPEG:
{
image->compression=JPEGCompression;
#if defined(JPEG_SUPPORT)
{
char
sampling_factor[MagickPathExtent];
uint16
horizontal,
vertical;
tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal,
&vertical);
if (tiff_status == 1)
{
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d",horizontal,vertical);
(void) SetImageProperty(image,"jpeg:sampling-factor",
sampling_factor,exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling Factors: %s",sampling_factor);
}
}
#endif
break;
}
case COMPRESSION_OJPEG: image->compression=JPEGCompression; break;
#if defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA: image->compression=LZMACompression; break;
#endif
case COMPRESSION_LZW: image->compression=LZWCompression; break;
case COMPRESSION_DEFLATE: image->compression=ZipCompression; break;
case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break;
#if defined(COMPRESSION_WEBP)
case COMPRESSION_WEBP: image->compression=WebPCompression; break;
#endif
#if defined(COMPRESSION_ZSTD)
case COMPRESSION_ZSTD: image->compression=ZstdCompression; break;
#endif
default: image->compression=RLECompression; break;
}
quantum_info=(QuantumInfo *) NULL;
if ((photometric == PHOTOMETRIC_PALETTE) &&
(pow(2.0,1.0*bits_per_sample) <= MaxColormapSize))
{
size_t
colors;
colors=(size_t) GetQuantumRange(bits_per_sample)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
TIFFClose(tiff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
value=(unsigned short) image->scene;
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1)
image->scene=value;
if (image->storage_class == PseudoClass)
{
size_t
range;
uint16
*blue_colormap = (uint16 *) NULL,
*green_colormap = (uint16 *) NULL,
*red_colormap = (uint16 *) NULL;
/*
Initialize colormap.
*/
tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap,
&green_colormap,&blue_colormap);
if (tiff_status == 1)
{
if ((red_colormap != (uint16 *) NULL) &&
(green_colormap != (uint16 *) NULL) &&
(blue_colormap != (uint16 *) NULL))
{
range=255; /* might be old style 8-bit colormap */
for (i=0; i < (ssize_t) image->colors; i++)
if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) ||
(blue_colormap[i] >= 256))
{
range=65535;
break;
}
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ClampToQuantum(((double)
QuantumRange*red_colormap[i])/range);
image->colormap[i].green=ClampToQuantum(((double)
QuantumRange*green_colormap[i])/range);
image->colormap[i].blue=ClampToQuantum(((double)
QuantumRange*blue_colormap[i])/range);
}
}
}
}
if (image_info->ping != MagickFalse)
{
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
goto next_tiff_frame;
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=SetImageColorspace(image,image->colorspace,exception);
status&=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
/*
Allocate memory for the image and pixel buffer.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
if (sample_format == SAMPLEFORMAT_UINT)
status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_INT)
status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_IEEEFP)
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
quantum_info->min_is_white=MagickFalse;
break;
}
case PHOTOMETRIC_MINISWHITE:
{
quantum_info->min_is_white=MagickTrue;
break;
}
default:
break;
}
extra_samples=0;
tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples,
&sample_info,sans);
if (tiff_status == 1)
{
(void) SetImageProperty(image,"tiff:alpha","unspecified",exception);
if (extra_samples == 0)
{
if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB))
image->alpha_trait=BlendPixelTrait;
}
else
for (i=0; i < extra_samples; i++)
{
image->alpha_trait=BlendPixelTrait;
if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA)
{
SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","associated",
exception);
}
else
if (sample_info[i] == EXTRASAMPLE_UNASSALPHA)
{
SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","unassociated",
exception);
}
}
}
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
method=ReadGenericMethod;
rows_per_strip=(uint32) image->rows;
if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1)
{
char
buffer[MagickPathExtent];
(void) FormatLocaleString(buffer,MagickPathExtent,"%u",
(unsigned int) rows_per_strip);
(void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception);
method=ReadStripMethod;
if (rows_per_strip > (uint32) image->rows)
rows_per_strip=(uint32) image->rows;
}
if (TIFFIsTiled(tiff) != MagickFalse)
{
uint32
columns,
rows;
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,rows) == MagickFalse))
ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit");
method=ReadTileMethod;
}
if ((photometric == PHOTOMETRIC_LOGLUV) ||
(compress_tag == COMPRESSION_CCITTFAX3))
method=ReadGenericMethod;
if (image->compression == JPEGCompression)
method=GetJPEGMethod(image,tiff,photometric,bits_per_sample,
samples_per_pixel);
quantum_info->endian=LSBEndian;
scanline_size=TIFFScanlineSize(tiff);
if (scanline_size <= 0)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel*
pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns*
rows_per_strip);
if ((double) scanline_size > 1.5*number_pixels)
ThrowTIFFException(CorruptImageError,"CorruptImage");
number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels);
pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (pixel_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) memset(pixels,0,number_pixels*sizeof(uint32));
quantum_type=GrayQuantum;
if (image->storage_class == PseudoClass)
quantum_type=IndexQuantum;
if (interlace != PLANARCONFIG_SEPARATE)
{
size_t
pad;
pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
if (image->storage_class == PseudoClass)
quantum_type=IndexAlphaQuantum;
else
quantum_type=samples_per_pixel == 1 ? AlphaQuantum :
GrayAlphaQuantum;
}
if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE))
{
quantum_type=RGBQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=RGBAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
}
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=CMYKAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0);
}
}
status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >>
3));
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
}
switch (method)
{
case ReadYCCKMethod:
{
/*
Convert YCC TIFF image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
unsigned char
*p;
tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels);
if (tiff_status == -1)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
p=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.402*(double) *(p+2))-179.456)),q);
SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p-
(0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+
135.45984)),q);
SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.772*(double) *(p+1))-226.816)),q);
SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q);
q+=GetPixelChannels(image);
p+=4;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case ReadStripMethod:
{
unsigned char
*p;
size_t
extent;
ssize_t
stride,
strip_id;
tsize_t
strip_size;
unsigned char
*strip_pixels;
/*
Convert stripped TIFF image.
*/
extent=(samples_per_pixel+1)*TIFFStripSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
strip_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*strip_pixels));
if (strip_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(strip_pixels,0,extent*sizeof(*strip_pixels));
stride=TIFFVStripSize(tiff,1);
strip_id=0;
p=strip_pixels;
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
size_t
rows_remaining;
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
rows_remaining=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (rows_remaining == 0)
{
strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels,
TIFFStripSize(tiff));
if (strip_size == -1)
break;
rows_remaining=rows_per_strip;
if ((y+rows_per_strip) > (ssize_t) image->rows)
rows_remaining=(rows_per_strip-(y+rows_per_strip-
image->rows));
p=strip_pixels;
strip_id++;
}
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=stride;
rows_remaining--;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
}
strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels);
break;
}
case ReadTileMethod:
{
unsigned char
*p;
size_t
extent;
uint32
columns,
rows;
unsigned char
*tile_pixels;
/*
Convert tiled TIFF image.
*/
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
number_pixels=(MagickSizeType) columns*rows;
if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
extent=MagickMax(rows*TIFFTileRowSize(tiff),TIFFTileSize(tiff));
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
tile_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*tile_pixels));
if (tile_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(tile_pixels,0,extent*sizeof(*tile_pixels));
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
for (y=0; y < (ssize_t) image->rows; y+=rows)
{
ssize_t
x;
size_t
rows_remaining;
rows_remaining=image->rows-y;
if ((ssize_t) (y+rows) < (ssize_t) image->rows)
rows_remaining=rows;
for (x=0; x < (ssize_t) image->columns; x+=columns)
{
size_t
columns_remaining,
row;
columns_remaining=image->columns-x;
if ((ssize_t) (x+columns) < (ssize_t) image->columns)
columns_remaining=columns;
tiff_status=TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,
0,i);
if (tiff_status == -1)
break;
p=tile_pixels;
for (row=0; row < rows_remaining; row++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,x,y+row,columns_remaining,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=TIFFTileRowSize(tiff);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i,
samples_per_pixel);
if (status == MagickFalse)
break;
}
}
tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels);
break;
}
case ReadGenericMethod:
default:
{
MemoryInfo
*generic_info = (MemoryInfo * ) NULL;
uint32
*p;
/*
Convert generic TIFF image.
*/
if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
#if defined(TIFF_VERSION_BIG)
number_pixels+=image->columns*sizeof(uint64);
#else
number_pixels+=image->columns*sizeof(uint32);
#endif
generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (generic_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
p=(uint32 *) GetVirtualMemoryBlob(generic_info);
tiff_status=TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32)
image->rows,(uint32 *) p,0);
if (tiff_status == -1)
{
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
p+=(image->columns*image->rows)-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
q+=GetPixelChannels(image)*(image->columns-1);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
TIFFGetR(*p)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
TIFFGetG(*p)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
TIFFGetB(*p)),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
TIFFGetA(*p)),q);
p--;
q-=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
SetQuantumImageType(image,quantum_type);
next_tiff_frame:
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (tiff_status == -1)
{
status=MagickFalse;
break;
}
if (photometric == PHOTOMETRIC_CIELAB)
DecodeLabImage(image,exception);
if ((photometric == PHOTOMETRIC_LOGL) ||
(photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
{
image->type=GrayscaleType;
if (bits_per_sample == 1)
image->type=BilevelType;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (more_frames != MagickFalse)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,image->scene-1,
image->scene);
if (status == MagickFalse)
break;
}
} while ((status != MagickFalse) && (more_frames != MagickFalse));
TIFFClose(tiff);
if (status != MagickFalse)
TIFFReadPhotoshopLayers(image_info,image,exception);
if ((image_info->number_scenes != 0) &&
(image_info->scene >= GetImageListLength(image)))
status=MagickFalse;
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| null | null | 198,121
|
175955554095429192429912268769734051186
| 1,015
|
https://github.com/ImageMagick/ImageMagick/issues/4974
|
other
|
tensorflow
|
429f009d2b2c09028647dd4bb7b3f6f414bbaad7
| 1
|
void Compute(OpKernelContext* const context) override {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
const auto node_id_range = node_id_range_t->vec<int32>();
const int32_t node_id_first = node_id_range(0); // inclusive
const int32_t node_id_last = node_id_range(1); // exclusive
const Tensor* stats_summary_t;
OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t));
TTypes<float, 4>::ConstTensor stats_summary =
stats_summary_t->tensor<float, 4>();
const int32_t feature_dims = stats_summary_t->dim_size(1);
// The last bucket is for default/missing value.
const int32_t num_buckets = stats_summary_t->dim_size(2) - 1;
const int32_t logits_dim = logits_dim_;
const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim;
DCHECK_GT(hessian_dim, 0);
DCHECK_LE(hessian_dim, logits_dim * logits_dim);
const Tensor* l1_t;
OP_REQUIRES_OK(context, context->input("l1", &l1_t));
const auto l1 = l1_t->scalar<float>()();
DCHECK_GE(l1, 0);
if (logits_dim_ > 1) {
// Multi-class L1 regularization not supported yet.
DCHECK_EQ(l1, 0);
}
const Tensor* l2_t;
OP_REQUIRES_OK(context, context->input("l2", &l2_t));
const auto l2 = l2_t->scalar<float>()();
DCHECK_GE(l2, 0);
const Tensor* tree_complexity_t;
OP_REQUIRES_OK(context,
context->input("tree_complexity", &tree_complexity_t));
const auto tree_complexity = tree_complexity_t->scalar<float>()();
const Tensor* min_node_weight_t;
OP_REQUIRES_OK(context,
context->input("min_node_weight", &min_node_weight_t));
const auto min_node_weight = min_node_weight_t->scalar<float>()();
std::vector<int32> output_node_ids;
std::vector<float> output_gains;
std::vector<int32> output_feature_dimensions;
std::vector<int32> output_thresholds;
std::vector<Eigen::VectorXf> output_left_node_contribs;
std::vector<Eigen::VectorXf> output_right_node_contribs;
std::vector<string> output_split_types;
// TODO(tanzheny) parallelize the computation.
// Iterate each node and find the best gain per node.
for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) {
float best_gain = std::numeric_limits<float>::lowest();
int32_t best_bucket = 0;
int32_t best_f_dim = 0;
string best_split_type;
Eigen::VectorXf best_contrib_for_left(logits_dim);
Eigen::VectorXf best_contrib_for_right(logits_dim);
float parent_gain;
// Including default bucket.
ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0),
num_buckets + 1, logits_dim + hessian_dim);
const Eigen::VectorXf total_grad =
stats_mat.leftCols(logits_dim).colwise().sum();
const Eigen::VectorXf total_hess =
stats_mat.rightCols(hessian_dim).colwise().sum();
if (total_hess.norm() < min_node_weight) {
continue;
}
Eigen::VectorXf parent_weight(logits_dim);
CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight,
&parent_gain);
if (split_type_ == "inequality") {
CalculateBestInequalitySplit(
stats_summary, node_id, feature_dims, logits_dim, hessian_dim,
num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket,
&best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
} else {
CalculateBestEqualitySplit(
stats_summary, total_grad, total_hess, node_id, feature_dims,
logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain,
&best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
}
if (best_gain == std::numeric_limits<float>::lowest()) {
// Do not add the node if not split if found.
continue;
}
output_node_ids.push_back(node_id);
// Remove the parent gain for the parent node.
output_gains.push_back(best_gain - parent_gain);
output_feature_dimensions.push_back(best_f_dim);
// default direction is fixed for dense splits.
// TODO(tanzheny) account for default values.
output_split_types.push_back(best_split_type);
output_thresholds.push_back(best_bucket);
output_left_node_contribs.push_back(best_contrib_for_left);
output_right_node_contribs.push_back(best_contrib_for_right);
} // for node id
const int num_nodes = output_node_ids.size();
// output_node_ids
Tensor* output_node_ids_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes},
&output_node_ids_t));
auto output_node_ids_vec = output_node_ids_t->vec<int32>();
// output_gains
Tensor* output_gains_t;
OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes},
&output_gains_t));
auto output_gains_vec = output_gains_t->vec<float>();
// output_feature_dimensions
Tensor* output_feature_dimension_t;
OP_REQUIRES_OK(context,
context->allocate_output("feature_dimensions", {num_nodes},
&output_feature_dimension_t));
auto output_feature_dimensions_vec =
output_feature_dimension_t->vec<int32>();
// output_thresholds
Tensor* output_thresholds_t;
OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes},
&output_thresholds_t));
auto output_thresholds_vec = output_thresholds_t->vec<int32>();
// output_left_node_contribs
Tensor* output_left_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"left_node_contribs", {num_nodes, logits_dim},
&output_left_node_contribs_t));
auto output_left_node_contribs_matrix =
output_left_node_contribs_t->matrix<float>();
// output_right_node_contribs
Tensor* output_right_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"right_node_contribs", {num_nodes, logits_dim},
&output_right_node_contribs_t));
auto output_right_node_contribs_matrix =
output_right_node_contribs_t->matrix<float>();
// split type
Tensor* output_split_types_t;
OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) {
output_node_ids_vec(i) = output_node_ids[i];
// Adjust the gains to penalize by tree complexity.
output_gains_vec(i) = output_gains[i] - tree_complexity;
output_feature_dimensions_vec(i) = output_feature_dimensions[i];
output_thresholds_vec(i) = output_thresholds[i];
for (int j = 0; j < logits_dim; ++j) {
output_left_node_contribs_matrix(i, j) =
output_left_node_contribs[i][j];
output_right_node_contribs_matrix(i, j) =
output_right_node_contribs[i][j];
}
output_split_types_vec(i) = output_split_types[i];
}
}
| null | null | 198,146
|
27348192869449002529615461959499590790
| 172
|
Add remaining missing validation to `BoostedTreesCalculateBestFeatureSplit`
PiperOrigin-RevId: 387423006
Change-Id: I8eaf30efb223011519e60707bfa751b275d3a443
|
other
|
ImageMagick
|
a6240a163cb787909703d9fc649cf861f60ddd7c
| 1
|
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
register Quantum *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info,exception);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
clone_info=CloneImageInfo(image_info);
if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
MATLAB_KO: ThrowReaderException(CorruptImageError,"ImproperImageHeader");
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = DecompressBlock(image,MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
(void) ReadBlobXXXLong(image2);
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
break;
default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
image->type=GrayscaleType;
SetImageColorspace(image,GRAYColorspace,exception);
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(image,q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow(image, (double *)BImgBuff, i, MinVal, MaxVal,
exception);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow(image,(float *)BImgBuff,i,MinVal,MaxVal,
exception);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if ((image2!=NULL) && (image2!=image)) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
}
clone_info=DestroyImageInfo(clone_info);
RelinquishMagickMemory(BImgBuff);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if(image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
return (image);
}
| null | null | 198,161
|
201330309137613498322904815795359643333
| 486
|
https://github.com/ImageMagick/ImageMagick/issues/131
|
other
|
tensorflow
|
5b048e87e4e55990dae6b547add4dae59f4e1c76
| 1
|
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
int index) {
TfLiteTensor* tensor = GetMutableInput(context, node, index);
return tensor->is_variable ? tensor : nullptr;
}
| null | null | 198,169
|
213393638605228148425492190927951362914
| 5
|
Fix a null pointer exception in SVDF
This is due to not checking that `GetVariableInput` returns non-null tensor.
Also fix a potential null pointer exception in `GetVariableInput`.
PiperOrigin-RevId: 385160147
Change-Id: Iadf3f0705b036a9014d27caa5a8bbd91f4c4c401
|
other
|
tensorflow
|
5b048e87e4e55990dae6b547add4dae59f4e1c76
| 1
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
TfLiteTensor* scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &scratch));
TfLiteTensor* state = GetVariableInput(context, node, kStateTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (weights_feature->type) {
case kTfLiteFloat32: {
reference_ops::EvalFloatSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<float>(weights_feature), GetTensorShape(weights_time),
GetTensorData<float>(weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(state), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2,
&scaling_factors));
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3,
&float_weights_time));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/4,
&zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/5, &row_sums));
// Dequantize weights time.
// TODO(alanchiao): this dequantization initialization only needs to
// happen once per model and should theoretically be placed in either
// Init or Prepare. However, TFLite doesn't allocate float_weights_time
// until the Eval function.
// TODO(alanchiao): refactor logic out into dequantize function.
if (!op_data->float_weights_time_initialized) {
const float dequantization_scale = weights_time->params.scale;
const int8_t* weights_time_ptr = GetTensorData<int8_t>(weights_time);
float* float_weights_time_ptr =
GetTensorData<float>(float_weights_time);
for (int i = 0; i < NumElements(float_weights_time); ++i) {
float_weights_time_ptr[i] =
weights_time_ptr[i] * dequantization_scale;
}
op_data->float_weights_time_initialized = true;
}
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs && row_sums != nullptr) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
reference_ops::EvalHybridSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature),
weights_feature->params.scale, GetTensorShape(float_weights_time),
GetTensorData<float>(float_weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(scaling_factors),
GetTensorData<int8_t>(input_quantized), GetTensorData<float>(state),
GetTensorShape(output), GetTensorData<float>(output),
zero_points_ptr, row_sums_ptr, &op_data->compute_row_sums);
return kTfLiteOk;
}
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
input->quantization.params);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &output_temp));
// Currently supports only ReLU.
// TODO(jianlijianli): support other activations.
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActRelu);
reference_ops::EvalIntegerSVDF(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature), GetTensorShape(weights_time),
GetTensorData<int16_t>(weights_time), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorData<int16_t>(state),
GetTensorShape(output), GetTensorData<int8_t>(output),
GetTensorData<int32_t>(scratch), GetTensorData<int32_t>(output_temp),
op_data->effective_scale_1_a, op_data->effective_scale_1_b,
op_data->effective_scale_2_a, op_data->effective_scale_2_b,
input_params->zero_point->data[0],
output_params->zero_point->data[0]);
return kTfLiteOk;
}
default:
context->ReportError(context, "Type %s not currently supported.",
TfLiteTypeGetName(weights_feature->type));
return kTfLiteError;
}
}
| null | null | 198,170
|
48006172578674889644863416691565225644
| 123
|
Fix a null pointer exception in SVDF
This is due to not checking that `GetVariableInput` returns non-null tensor.
Also fix a potential null pointer exception in `GetVariableInput`.
PiperOrigin-RevId: 385160147
Change-Id: Iadf3f0705b036a9014d27caa5a8bbd91f4c4c401
|
other
|
gpac
|
d7daa8aeb6df4b6c3ec102622e1599279310a19e
| 1
|
GF_Err gf_isom_get_sample_for_movie_time(GF_ISOFile *the_file, u32 trackNumber, u64 movieTime, u32 *StreamDescriptionIndex, GF_ISOSearchMode SearchMode, GF_ISOSample **sample, u32 *sampleNumber, u64 *data_offset)
{
Double tsscale;
GF_Err e;
GF_TrackBox *trak;
u64 mediaTime, nextMediaTime;
s64 segStartTime, mediaOffset;
u32 sampNum;
u8 useEdit;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak) return GF_BAD_PARAM;
//only check duration if initially set - do not check duration as updated after fragment merge since that duration does not take
//into account tfdt
if (trak->Header->initial_duration
&& gf_timestamp_greater(movieTime, trak->Media->mediaHeader->timeScale, trak->Header->initial_duration, trak->moov->mvhd->timeScale)
) {
if (sampleNumber) *sampleNumber = 0;
*StreamDescriptionIndex = 0;
return GF_EOS;
}
//get the media time for this movie time...
mediaTime = segStartTime = 0;
*StreamDescriptionIndex = 0;
nextMediaTime = 0;
e = GetMediaTime(trak, (SearchMode==GF_ISOM_SEARCH_SYNC_FORWARD) ? GF_TRUE : GF_FALSE, movieTime, &mediaTime, &segStartTime, &mediaOffset, &useEdit, &nextMediaTime);
if (e) return e;
/*here we check if we were playing or not and return no sample in normal search modes*/
if (useEdit && mediaOffset == -1) {
if ((SearchMode==GF_ISOM_SEARCH_FORWARD) || (SearchMode==GF_ISOM_SEARCH_BACKWARD)) {
/*get next sample time in MOVIE timescale*/
if (SearchMode==GF_ISOM_SEARCH_FORWARD)
e = GetNextMediaTime(trak, movieTime, &mediaTime);
else
e = GetPrevMediaTime(trak, movieTime, &mediaTime);
if (e) return e;
return gf_isom_get_sample_for_movie_time(the_file, trackNumber, (u32) mediaTime, StreamDescriptionIndex, GF_ISOM_SEARCH_SYNC_FORWARD, sample, sampleNumber, data_offset);
}
if (sampleNumber) *sampleNumber = 0;
if (sample) {
if (! (*sample)) {
*sample = gf_isom_sample_new();
if (! *sample) return GF_OUT_OF_MEM;
}
(*sample)->DTS = movieTime;
(*sample)->dataLength = 0;
(*sample)->CTS_Offset = 0;
}
return GF_OK;
}
/*dwell edit in non-sync mode, fetch next/prev sample depending on mode.
Otherwise return the dwell entry*/
if (useEdit==2) {
if ((SearchMode==GF_ISOM_SEARCH_FORWARD) || (SearchMode==GF_ISOM_SEARCH_BACKWARD)) {
/*get next sample time in MOVIE timescale*/
if (SearchMode==GF_ISOM_SEARCH_FORWARD)
e = GetNextMediaTime(trak, movieTime, &mediaTime);
else
e = GetPrevMediaTime(trak, movieTime, &mediaTime);
if (e) return e;
return gf_isom_get_sample_for_movie_time(the_file, trackNumber, (u32) mediaTime, StreamDescriptionIndex, GF_ISOM_SEARCH_SYNC_FORWARD, sample, sampleNumber, data_offset);
}
}
tsscale = trak->Media->mediaHeader->timeScale;
tsscale /= trak->moov->mvhd->timeScale;
//OK, we have a sample so fetch it
e = gf_isom_get_sample_for_media_time(the_file, trackNumber, mediaTime, StreamDescriptionIndex, SearchMode, sample, &sampNum, data_offset);
if (e) {
if (e==GF_EOS) {
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
//movie is fragmented and samples not yet received, return EOS
if (the_file->moov->mvex && !trak->Media->information->sampleTable->SampleSize->sampleCount)
return e;
#endif
if (nextMediaTime && (nextMediaTime-1 != movieTime))
return gf_isom_get_sample_for_movie_time(the_file, trackNumber, nextMediaTime-1, StreamDescriptionIndex, SearchMode, sample, sampleNumber, data_offset);
}
return e;
}
//OK, now the trick: we have to rebuild the time stamps, according
//to the media time scale (used by SLConfig) - add the edit start time but stay in
//the track TS
if (sample && useEdit) {
u64 _ts = (u64)(segStartTime * tsscale);
(*sample)->DTS += _ts;
/*watchout, the sample fetched may be before the first sample in the edit list (when seeking)*/
if ( (*sample)->DTS > (u64) mediaOffset) {
(*sample)->DTS -= (u64) mediaOffset;
} else {
(*sample)->DTS = 0;
}
}
if (sampleNumber) *sampleNumber = sampNum;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (sample && (*sample) ) (*sample)->DTS += trak->dts_at_seg_start;
#endif
return GF_OK;
}
| null | null | 198,178
|
148501486251921845480245286379253106973
| 108
|
fixed #2108
|
other
|
tensorflow
|
01cff3f986259d661103412a20745928c727326f
| 1
|
void DoCompute(OpKernelContext* c) {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
Tensor* params = v->tensor();
const Tensor& indices = c->input(1);
const Tensor& updates = c->input(2);
// Check that rank(updates.shape) = rank(indices.shape + params.shape[1:])
OP_REQUIRES(c,
updates.dims() == 0 ||
updates.dims() == indices.dims() + params->dims() - 1,
errors::InvalidArgument(
"Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got ",
"updates.shape ", updates.shape().DebugString(),
", indices.shape ", indices.shape().DebugString(),
", params.shape ", params->shape().DebugString()));
// Check that we have enough index space
const int64_t N_big = indices.NumElements();
OP_REQUIRES(
c, N_big <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("indices has too many elements for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", N_big, " > ",
std::numeric_limits<Index>::max()));
const Index N = static_cast<Index>(N_big);
OP_REQUIRES(
c, params->dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params->dim_size(0), " > ",
std::numeric_limits<Index>::max()));
if (N > 0) {
auto indices_flat = indices.flat<Index>();
auto params_flat = params->flat_outer_dims<T>();
if (TensorShapeUtils::IsScalar(updates.shape())) {
const auto update = updates.scalar<T>();
functor::ScatterScalarFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, update, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params->dim_size(0), ")"));
} else {
int64_t num_updates = updates.NumElements();
OP_REQUIRES(c, num_updates % N == 0,
errors::InvalidArgument(
"shape of indices (", indices.shape().DebugString(),
") is not compatible with the shape of updates (",
updates.shape().DebugString(), ")"));
auto updates_flat = updates.shaped<T, 2>({N, num_updates / N});
functor::ScatterFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, updates_flat, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params->dim_size(0), ")"));
}
}
}
| null | null | 198,198
|
58263525248995712789827465195477770320
| 68
|
Fix heap OOB due to dimension mismatch in `ResourceScatterUpdate`
PiperOrigin-RevId: 388292801
Change-Id: Id9bd7244d98d41b1517d4771850b32782c0cc949
|
other
|
jerryscript
|
efe63a5bbc5106164a08ee2eb415a7a701f5311f
| 1
|
parser_parse_for_statement_start (parser_context_t *context_p) /**< context */
{
parser_loop_statement_t loop;
JERRY_ASSERT (context_p->token.type == LEXER_KEYW_FOR);
lexer_next_token (context_p);
#if JERRY_ESNEXT
bool is_for_await = false;
if (context_p->token.type == LEXER_KEYW_AWAIT)
{
if (JERRY_UNLIKELY (context_p->token.lit_location.status_flags & LEXER_LIT_LOCATION_HAS_ESCAPE))
{
parser_raise_error (context_p, PARSER_ERR_INVALID_KEYWORD);
}
lexer_next_token (context_p);
is_for_await = true;
}
#endif /* JERRY_ESNEXT */
if (context_p->token.type != LEXER_LEFT_PAREN)
{
#if JERRY_ESNEXT
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.keyword_type == LEXER_KEYW_AWAIT
&& !(context_p->token.lit_location.status_flags & LEXER_LIT_LOCATION_HAS_ESCAPE))
{
parser_raise_error (context_p, PARSER_ERR_FOR_AWAIT_NO_ASYNC);
}
#endif /* JERRY_ESNEXT */
parser_raise_error (context_p, PARSER_ERR_LEFT_PAREN_EXPECTED);
}
if (context_p->next_scanner_info_p->source_p == context_p->source_p)
{
parser_for_in_of_statement_t for_in_of_statement;
scanner_location_t start_location, end_location;
#if JERRY_ESNEXT
JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_FOR_IN
|| context_p->next_scanner_info_p->type == SCANNER_TYPE_FOR_OF);
bool is_for_in = (context_p->next_scanner_info_p->type == SCANNER_TYPE_FOR_IN);
end_location = ((scanner_location_info_t *) context_p->next_scanner_info_p)->location;
scanner_release_next (context_p, sizeof (scanner_location_info_t));
scanner_get_location (&start_location, context_p);
lexer_next_token (context_p);
uint8_t token_type = LEXER_EOS;
bool has_context = false;
if (context_p->token.type == LEXER_KEYW_VAR
|| context_p->token.type == LEXER_KEYW_LET
|| context_p->token.type == LEXER_KEYW_CONST)
{
token_type = context_p->token.type;
has_context = context_p->next_scanner_info_p->source_p == context_p->source_p;
JERRY_ASSERT (!has_context || context_p->next_scanner_info_p->type == SCANNER_TYPE_BLOCK);
scanner_get_location (&start_location, context_p);
/* TODO: remove this after the pre-scanner supports strict mode detection. */
if (context_p->next_scanner_info_p->source_p == context_p->source_p
&& context_p->next_scanner_info_p->type == SCANNER_TYPE_LET_EXPRESSION)
{
scanner_release_next (context_p, sizeof (scanner_info_t));
}
}
else if (context_p->token.type == LEXER_LITERAL && lexer_token_is_let (context_p))
{
if (context_p->next_scanner_info_p->source_p == context_p->source_p
&& context_p->next_scanner_info_p->type == SCANNER_TYPE_LET_EXPRESSION)
{
scanner_release_next (context_p, sizeof (scanner_info_t));
}
else
{
token_type = LEXER_KEYW_LET;
has_context = (context_p->next_scanner_info_p->source_p == context_p->source_p);
scanner_get_location (&start_location, context_p);
}
}
if (has_context)
{
has_context = parser_push_block_context (context_p, true);
}
scanner_set_location (context_p, &end_location);
#else /* !JERRY_ESNEXT */
JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_FOR_IN);
bool is_for_in = true;
scanner_get_location (&start_location, context_p);
scanner_set_location (context_p, &((scanner_location_info_t *) context_p->next_scanner_info_p)->location);
scanner_release_next (context_p, sizeof (scanner_location_info_t));
#endif /* JERRY_ESNEXT */
/* The length of both 'in' and 'of' is two. */
const uint8_t *source_end_p = context_p->source_p - 2;
scanner_seek (context_p);
#if JERRY_ESNEXT
if (is_for_in && is_for_await)
{
context_p->token.line = context_p->line;
context_p->token.column = context_p->column - 2;
parser_raise_error (context_p, PARSER_ERR_FOR_AWAIT_NO_OF);
}
#endif /* JERRY_ESNEXT */
lexer_next_token (context_p);
int options = is_for_in ? PARSE_EXPR : PARSE_EXPR_LEFT_HAND_SIDE;
parser_parse_expression (context_p, options);
if (context_p->token.type != LEXER_RIGHT_PAREN)
{
parser_raise_error (context_p, PARSER_ERR_RIGHT_PAREN_EXPECTED);
}
#ifndef JERRY_NDEBUG
PARSER_PLUS_EQUAL_U16 (context_p->context_stack_depth,
is_for_in ? PARSER_FOR_IN_CONTEXT_STACK_ALLOCATION
: PARSER_FOR_OF_CONTEXT_STACK_ALLOCATION);
#endif /* !JERRY_NDEBUG */
cbc_ext_opcode_t init_opcode = CBC_EXT_FOR_IN_INIT;
#if JERRY_ESNEXT
if (!is_for_in)
{
init_opcode = is_for_await ? CBC_EXT_FOR_AWAIT_OF_INIT : CBC_EXT_FOR_OF_INIT;
}
#endif /* JERRY_ESNEXT */
parser_emit_cbc_ext_forward_branch (context_p, init_opcode, &for_in_of_statement.branch);
JERRY_ASSERT (context_p->last_cbc_opcode == PARSER_CBC_UNAVAILABLE);
for_in_of_statement.start_offset = context_p->byte_code_size;
#if JERRY_ESNEXT
if (has_context)
{
parser_emit_cbc_ext (context_p, CBC_EXT_CLONE_CONTEXT);
}
#endif /* JERRY_ESNEXT */
/* The expression parser must not read the 'in' or 'of' tokens. */
scanner_get_location (&end_location, context_p);
scanner_set_location (context_p, &start_location);
const uint8_t *original_source_end_p = context_p->source_end_p;
context_p->source_end_p = source_end_p;
scanner_seek (context_p);
#if JERRY_ESNEXT
if (token_type == LEXER_EOS)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LEFT_SQUARE || context_p->token.type == LEXER_LEFT_BRACE)
{
token_type = context_p->token.type;
}
}
#else /* !JERRY_ESNEXT */
lexer_next_token (context_p);
uint8_t token_type = context_p->token.type;
#endif /* JERRY_ESNEXT */
switch (token_type)
{
#if JERRY_ESNEXT
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
#endif /* JERRY_ESNEXT */
case LEXER_KEYW_VAR:
{
#if JERRY_ESNEXT
if (lexer_check_next_characters (context_p, LIT_CHAR_LEFT_SQUARE, LIT_CHAR_LEFT_BRACE))
{
parser_emit_cbc_ext (context_p, is_for_in ? CBC_EXT_FOR_IN_GET_NEXT
: CBC_EXT_FOR_OF_GET_NEXT);
parser_pattern_flags_t flags = (PARSER_PATTERN_BINDING | PARSER_PATTERN_TARGET_ON_STACK);
if (context_p->next_scanner_info_p->source_p == (context_p->source_p + 1))
{
if (context_p->next_scanner_info_p->type == SCANNER_TYPE_INITIALIZER)
{
scanner_release_next (context_p, sizeof (scanner_location_info_t));
}
else
{
JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_LITERAL_FLAGS);
if (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_OBJECT_HAS_REST)
{
flags |= PARSER_PATTERN_HAS_REST_ELEMENT;
}
scanner_release_next (context_p, sizeof (scanner_info_t));
}
}
if (token_type == LEXER_KEYW_LET)
{
flags |= PARSER_PATTERN_LET;
}
else if (token_type == LEXER_KEYW_CONST)
{
flags |= PARSER_PATTERN_CONST;
}
parser_parse_initializer_by_next_char (context_p, flags);
break;
}
#endif /* JERRY_ESNEXT */
lexer_expect_identifier (context_p, LEXER_IDENT_LITERAL);
#if JERRY_ESNEXT
if (context_p->token.keyword_type == LEXER_KEYW_LET
&& token_type != LEXER_KEYW_VAR)
{
parser_raise_error (context_p, PARSER_ERR_LEXICAL_LET_BINDING);
}
#endif /* JERRY_ESNEXT */
JERRY_ASSERT (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL);
uint16_t literal_index = context_p->lit_object.index;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_ASSIGN)
{
#if JERRY_ESNEXT
if ((context_p->status_flags & PARSER_IS_STRICT) || !is_for_in)
{
parser_raise_error (context_p, PARSER_ERR_FOR_IN_OF_DECLARATION);
}
#endif /* JERRY_ESNEXT */
parser_branch_t branch;
/* Initialiser is never executed. */
parser_emit_cbc_forward_branch (context_p, CBC_JUMP_FORWARD, &branch);
lexer_next_token (context_p);
parser_parse_expression_statement (context_p, PARSE_EXPR_NO_COMMA);
parser_set_branch_to_current_position (context_p, &branch);
}
parser_emit_cbc_ext (context_p, is_for_in ? CBC_EXT_FOR_IN_GET_NEXT
: CBC_EXT_FOR_OF_GET_NEXT);
#if JERRY_ESNEXT
#ifndef JERRY_NDEBUG
if (literal_index < PARSER_REGISTER_START
&& has_context
&& !scanner_literal_is_created (context_p, literal_index))
{
context_p->global_status_flags |= ECMA_PARSE_INTERNAL_FOR_IN_OFF_CONTEXT_ERROR;
}
#endif /* !JERRY_NDEBUG */
uint16_t opcode = (has_context ? CBC_ASSIGN_LET_CONST : CBC_ASSIGN_SET_IDENT);
parser_emit_cbc_literal (context_p, opcode, literal_index);
#else /* !JERRY_ESNEXT */
parser_emit_cbc_literal (context_p, CBC_ASSIGN_SET_IDENT, literal_index);
#endif /* JERRY_ESNEXT */
break;
}
#if JERRY_ESNEXT
case LEXER_LEFT_BRACE:
case LEXER_LEFT_SQUARE:
{
if (context_p->next_scanner_info_p->source_p == context_p->source_p
&& context_p->next_scanner_info_p->type == SCANNER_TYPE_LITERAL_FLAGS
&& (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_DESTRUCTURING_FOR))
{
parser_emit_cbc_ext (context_p, is_for_in ? CBC_EXT_FOR_IN_GET_NEXT
: CBC_EXT_FOR_OF_GET_NEXT);
uint32_t flags = PARSER_PATTERN_TARGET_ON_STACK;
if (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_OBJECT_HAS_REST)
{
flags |= PARSER_PATTERN_HAS_REST_ELEMENT;
}
scanner_release_next (context_p, sizeof (scanner_info_t));
parser_parse_initializer (context_p, flags);
/* Pop the value returned by GET_NEXT. */
parser_emit_cbc (context_p, CBC_POP);
break;
}
/* FALLTHRU */
}
#endif /* JERRY_ESNEXT */
default:
{
uint16_t opcode;
parser_parse_expression (context_p, PARSE_EXPR_LEFT_HAND_SIDE);
opcode = context_p->last_cbc_opcode;
/* The CBC_EXT_FOR_IN_CREATE_CONTEXT flushed the opcode combiner. */
JERRY_ASSERT (opcode != CBC_PUSH_TWO_LITERALS
&& opcode != CBC_PUSH_THREE_LITERALS);
opcode = parser_check_left_hand_side_expression (context_p, opcode);
parser_emit_cbc_ext (context_p, is_for_in ? CBC_EXT_FOR_IN_GET_NEXT
: CBC_EXT_FOR_OF_GET_NEXT);
parser_flush_cbc (context_p);
context_p->last_cbc_opcode = opcode;
break;
}
}
if (context_p->token.type != LEXER_EOS)
{
#if JERRY_ESNEXT
parser_raise_error (context_p, is_for_in ? PARSER_ERR_IN_EXPECTED : PARSER_ERR_OF_EXPECTED);
#else /* !JERRY_ESNEXT */
parser_raise_error (context_p, PARSER_ERR_IN_EXPECTED);
#endif /* JERRY_ESNEXT */
}
parser_flush_cbc (context_p);
scanner_set_location (context_p, &end_location);
context_p->source_end_p = original_source_end_p;
lexer_next_token (context_p);
loop.branch_list_p = NULL;
parser_stack_push (context_p, &for_in_of_statement, sizeof (parser_for_in_of_statement_t));
parser_stack_push (context_p, &loop, sizeof (parser_loop_statement_t));
uint8_t for_type = PARSER_STATEMENT_FOR_IN;
#if JERRY_ESNEXT
if (!is_for_in)
{
for_type = is_for_await ? PARSER_STATEMENT_FOR_AWAIT_OF : PARSER_STATEMENT_FOR_OF;
}
#endif /* JERRY_ESNEXT */
parser_stack_push_uint8 (context_p, for_type);
parser_stack_iterator_init (context_p, &context_p->last_statement);
return;
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_SEMICOLON)
{
#if JERRY_ESNEXT
const uint8_t *source_p = context_p->source_p;
#endif /* JERRY_ESNEXT */
switch (context_p->token.type)
{
#if JERRY_ESNEXT
case LEXER_LITERAL:
{
if (!lexer_token_is_let (context_p))
{
parser_parse_expression_statement (context_p, PARSE_EXPR);
break;
}
if (context_p->next_scanner_info_p->source_p == context_p->source_p
&& context_p->next_scanner_info_p->type != SCANNER_TYPE_BLOCK)
{
if (context_p->next_scanner_info_p->type == SCANNER_TYPE_LET_EXPRESSION)
{
scanner_release_next (context_p, sizeof (scanner_info_t));
}
parser_parse_expression_statement (context_p, PARSE_EXPR);
break;
}
context_p->token.type = LEXER_KEYW_LET;
/* FALLTHRU */
}
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
{
if (context_p->next_scanner_info_p->source_p == source_p)
{
parser_push_block_context (context_p, true);
}
/* FALLTHRU */
}
#endif /* JERRY_ESNEXT */
case LEXER_KEYW_VAR:
{
parser_parse_var_statement (context_p);
break;
}
default:
{
parser_parse_expression_statement (context_p, PARSE_EXPR);
break;
}
}
if (context_p->token.type != LEXER_SEMICOLON)
{
parser_raise_error (context_p, PARSER_ERR_SEMICOLON_EXPECTED);
}
}
#if JERRY_ESNEXT
if (is_for_await)
{
parser_raise_error (context_p, PARSER_ERR_FOR_AWAIT_NO_OF);
}
#endif /* JERRY_ESNEXT */
JERRY_ASSERT (context_p->next_scanner_info_p->source_p != context_p->source_p
|| context_p->next_scanner_info_p->type == SCANNER_TYPE_FOR);
if (context_p->next_scanner_info_p->source_p != context_p->source_p
|| ((scanner_for_info_t *) context_p->next_scanner_info_p)->end_location.source_p == NULL)
{
if (context_p->next_scanner_info_p->source_p == context_p->source_p)
{
/* Even though the scanning is failed, there might be valid statements
* inside the for statement which depend on scanner info blocks. */
scanner_release_next (context_p, sizeof (scanner_for_info_t));
}
/* The prescanner couldn't find the second semicolon or the closing paranthesis. */
lexer_next_token (context_p);
parser_parse_expression (context_p, PARSE_EXPR);
if (context_p->token.type != LEXER_SEMICOLON)
{
parser_raise_error (context_p, PARSER_ERR_SEMICOLON_EXPECTED);
}
lexer_next_token (context_p);
parser_parse_expression_statement (context_p, PARSE_EXPR);
JERRY_ASSERT (context_p->token.type != LEXER_RIGHT_PAREN);
parser_raise_error (context_p, PARSER_ERR_RIGHT_PAREN_EXPECTED);
}
parser_for_statement_t for_statement;
scanner_for_info_t *for_info_p = (scanner_for_info_t *) context_p->next_scanner_info_p;
parser_emit_cbc_forward_branch (context_p, CBC_JUMP_FORWARD, &for_statement.branch);
JERRY_ASSERT (context_p->last_cbc_opcode == PARSER_CBC_UNAVAILABLE);
for_statement.start_offset = context_p->byte_code_size;
scanner_get_location (&for_statement.condition_location, context_p);
for_statement.expression_location = for_info_p->expression_location;
scanner_set_location (context_p, &for_info_p->end_location);
scanner_release_next (context_p, sizeof (scanner_for_info_t));
scanner_seek (context_p);
lexer_next_token (context_p);
loop.branch_list_p = NULL;
parser_stack_push (context_p, &for_statement, sizeof (parser_for_statement_t));
parser_stack_push (context_p, &loop, sizeof (parser_loop_statement_t));
parser_stack_push_uint8 (context_p, PARSER_STATEMENT_FOR);
parser_stack_iterator_init (context_p, &context_p->last_statement);
} /* parser_parse_for_statement_start */
| null | null | 198,202
|
146258652977097384493241787237206491033
| 479
|
Fix parsing unterminated let statement (#4808)
This patch fixes #4754
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected]
|
other
|
ok-file-formats
|
e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e
| 1
|
static void ok_png_decode2(ok_png_decoder *decoder) {
ok_png *png = decoder->png;
uint8_t png_header[8];
if (!ok_read(decoder, png_header, sizeof(png_header))) {
return;
}
uint8_t png_signature[8] = {137, 80, 78, 71, 13, 10, 26, 10};
if (memcmp(png_header, png_signature, 8) != 0) {
ok_png_error(decoder->png, OK_PNG_ERROR_INVALID, "Invalid signature (not a PNG file)");
return;
}
// When info_only is true, we only care about the IHDR chunk and whether or not
// the tRNS chunk exists.
bool info_only = (decoder->decode_flags & OK_PNG_INFO_ONLY) != 0;
bool hdr_found = false;
bool end_found = false;
while (!end_found) {
uint8_t chunk_header[8];
uint8_t chunk_footer[4];
if (!ok_read(decoder, chunk_header, sizeof(chunk_header))) {
return;
}
const uint32_t chunk_length = readBE32(chunk_header);
const uint32_t chunk_type = readBE32(chunk_header + 4);
bool success = false;
if (!hdr_found && chunk_type != OK_PNG_CHUNK_CGBI && chunk_type != OK_PNG_CHUNK_IHDR) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "IHDR chunk must appear first");
return;
}
if (chunk_type == OK_PNG_CHUNK_IHDR) {
hdr_found = true;
success = ok_png_read_header(decoder, chunk_length);
if (success && info_only) {
// If the png has alpha, then we have all the info we need.
// Otherwise, continue scanning to see if the tRNS chunk exists.
if (png->has_alpha) {
return;
}
}
} else if (chunk_type == OK_PNG_CHUNK_CGBI) {
success = ok_seek(decoder, (long)chunk_length);
decoder->is_ios_format = true;
} else if (chunk_type == OK_PNG_CHUNK_PLTE && !info_only) {
success = ok_png_read_palette(decoder, chunk_length);
} else if (chunk_type == OK_PNG_CHUNK_TRNS) {
if (info_only) {
// No need to parse this chunk, we have all the info we need.
png->has_alpha = true;
return;
} else {
success = ok_png_read_transparency(decoder, chunk_length);
}
} else if (chunk_type == OK_PNG_CHUNK_IDAT) {
if (info_only) {
// Both IHDR and tRNS must come before IDAT, so we have all the info we need.
return;
}
success = ok_png_read_data(decoder, chunk_length);
} else if (chunk_type == OK_PNG_CHUNK_IEND) {
success = ok_seek(decoder, (long)chunk_length);
end_found = true;
} else {
// Ignore this chunk
success = ok_seek(decoder, (long)chunk_length);
}
if (!success) {
return;
}
// Read the footer (CRC) and ignore it
if (!ok_read(decoder, chunk_footer, sizeof(chunk_footer))) {
return;
}
}
// Sanity check
if (!decoder->decoding_completed) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Missing imaga data");
}
}
| null | null | 198,205
|
88368051304508135188622151578934062635
| 84
|
ok_png: Disallow multiple IHDR chunks (#15)
|
other
|
bareos
|
e8e79984410f5d4f65e199e9e677443c88808a66
| 1
|
bool PamAuthenticateUser(BareosSocket* UA_sock,
const std::string& username_in,
const std::string& password_in,
std::string& authenticated_username)
{
std::unique_ptr<PamData> pam_callback_data(new PamData(UA_sock, password_in));
std::unique_ptr<struct pam_conv> pam_conversation_container(
new struct pam_conv);
struct pam_handle* pamh = nullptr; /* pam session handle */
bool interactive = true;
if (!username_in.empty() && !password_in.empty()) { interactive = false; }
pam_conversation_container->conv
= interactive ? PamConversationCallback : PamLocalCallback;
pam_conversation_container->appdata_ptr = pam_callback_data.get();
const char* username = username_in.empty() ? nullptr : username_in.c_str();
int err = pam_start(service_name.c_str(), username,
pam_conversation_container.get(), &pamh);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM start failed: %s\n", pam_strerror(pamh, err));
return false;
}
err = pam_set_item(pamh, PAM_RUSER, username);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM set_item failed: %s\n", pam_strerror(pamh, err));
return false;
}
err = pam_authenticate(pamh, 0);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM authentication failed: %s\n",
pam_strerror(pamh, err));
return false;
}
#if defined(__sun)
void* data;
#else
const void* data;
#endif
err = pam_get_item(pamh, PAM_USER, &data);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM get_item failed: %s\n", pam_strerror(pamh, err));
return false;
} else {
if (data) {
const char* temp_str = static_cast<const char*>(data);
authenticated_username = temp_str;
}
}
if (pam_end(pamh, err) != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM end failed: %s\n", pam_strerror(pamh, err));
return false;
}
if (err == PAM_SUCCESS) {
bool ok = true;
if (interactive) { ok = PamConvSendMessage(UA_sock, "", PAM_SUCCESS); }
return ok;
}
return false;
}
| null | null | 198,237
|
68645282009021159660365589311628648946
| 65
|
dir: fix memory-leak on failed PAM authentication
Fixes CVE-2022-24756
|
other
|
barebox
|
a3337563c705bc8e0cf32f910b3e9e3c43d962ff
| 1
|
static int check_passwd(unsigned char *passwd, size_t length)
{
struct digest *d = NULL;
unsigned char *passwd1_sum;
unsigned char *passwd2_sum;
int ret = 0;
int hash_len;
if (IS_ENABLED(CONFIG_PASSWD_CRYPTO_PBKDF2)) {
hash_len = PBKDF2_LENGTH;
} else {
d = digest_alloc(PASSWD_SUM);
if (!d) {
pr_err("No such digest: %s\n",
PASSWD_SUM ? PASSWD_SUM : "NULL");
return -ENOENT;
}
hash_len = digest_length(d);
}
passwd1_sum = calloc(hash_len * 2, sizeof(unsigned char));
if (!passwd1_sum)
return -ENOMEM;
passwd2_sum = passwd1_sum + hash_len;
if (is_passwd_env_enable())
ret = read_env_passwd(passwd2_sum, hash_len);
else if (is_passwd_default_enable())
ret = read_default_passwd(passwd2_sum, hash_len);
else
ret = -EINVAL;
if (ret < 0)
goto err;
if (IS_ENABLED(CONFIG_PASSWD_CRYPTO_PBKDF2)) {
char *key = passwd2_sum + PBKDF2_SALT_LEN;
char *salt = passwd2_sum;
int keylen = PBKDF2_LENGTH - PBKDF2_SALT_LEN;
ret = pkcs5_pbkdf2_hmac_sha1(passwd, length, salt,
PBKDF2_SALT_LEN, PBKDF2_COUNT, keylen, passwd1_sum);
if (ret)
goto err;
if (strncmp(passwd1_sum, key, keylen) == 0)
ret = 1;
} else {
ret = digest_digest(d, passwd, length, passwd1_sum);
if (ret)
goto err;
if (strncmp(passwd1_sum, passwd2_sum, hash_len) == 0)
ret = 1;
}
err:
free(passwd1_sum);
digest_free(d);
return ret;
}
| null | null | 198,239
|
294849430085948649941831986415582450811
| 65
|
password: Use crypto_memneq() to compare hashes
Cryptographic verifications should be time-constant so that an attacker
cannot get information about the secrets used by observing the system,
so use crypto_memneq() rather than memcmp() to compare password hashes.
Signed-off-by: Sascha Hauer <[email protected]>
|
other
|
tensorflow
|
a2b743f6017d7b97af1fe49087ae15f0ac634373
| 1
|
void Compute(OpKernelContext* context) override {
// Get the input Tensors.
OpInputList params_nested_splits_in;
OP_REQUIRES_OK(context, context->input_list("params_nested_splits",
¶ms_nested_splits_in));
const Tensor& params_dense_values_in =
context->input(params_nested_splits_in.size());
const Tensor& indices_in =
context->input(params_nested_splits_in.size() + 1);
DCHECK_GT(params_nested_splits_in.size(), 0); // Enforced by REGISTER_OP.
SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1;
OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params));
OP_REQUIRES(context, params_dense_values_in.dims() > 0,
errors::InvalidArgument("params.rank must be nonzero"));
SPLITS_TYPE num_params_dense_values = params_dense_values_in.dim_size(0);
// Calculate the `splits`, and store the value slices that we need to
// copy in `value_slices`.
std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>> value_slices;
SPLITS_TYPE num_values = 0;
std::vector<std::vector<SPLITS_TYPE>> out_splits;
OP_REQUIRES_OK(context, MakeSplits(indices_in, params_nested_splits_in,
num_params_dense_values, &out_splits,
&value_slices, &num_values));
// Write the output tensors.
OP_REQUIRES_OK(context, WriteSplits(out_splits, context));
OP_REQUIRES_OK(context,
WriteValues(params_dense_values_in, value_slices,
out_splits.size(), num_values, context));
}
| null | null | 198,259
|
320142100828149702516338704383329826014
| 33
|
Fix heap OOB in `tf.raw_ops.RaggedGather`
PiperOrigin-RevId: 388355464
Change-Id: If14d96231d1cd7aad7c4d1c22c1bab1576b75717
|
other
|
tensorflow
|
4923de56ec94fff7770df259ab7f2288a74feb41
| 1
|
void ReshapeSparseTensor(OpKernelContext *context,
const Tensor &input_indices_in,
const Tensor &input_shape_in,
const Tensor &target_shape_in, int output_indices_idx,
int output_shape_idx) {
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices_in.shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices_in.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape_in.shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape_in.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(target_shape_in.shape()),
errors::InvalidArgument(
"Target shape should be a vector but received shape ",
target_shape_in.shape().DebugString()));
const int64_t output_rank = target_shape_in.NumElements();
const TensorShape input_shape(input_shape_in.vec<int64>());
const int64_t dense_size = input_shape.num_elements();
const int64_t nnz = input_indices_in.shape().dim_size(0);
// Compute the output shape. Determine product of specified dimensions, and
// find the index of the unspecified one.
TensorShape output_shape;
int64_t product = 1;
int unknown_index = -1;
auto target_shape = target_shape_in.vec<int64>();
for (int d = 0; d < output_rank; ++d) {
const int64_t size = target_shape(d);
if (size == -1) {
OP_REQUIRES(
context, unknown_index == -1,
errors::InvalidArgument("only one output dimension may be -1, "
"not both ",
unknown_index, " and ", d));
unknown_index = d;
output_shape.AddDim(1);
} else {
OP_REQUIRES(context, size >= 0,
errors::InvalidArgument("size ", d,
" must be non-negative, not ", size));
product *= size;
output_shape.AddDim(size);
}
}
if (unknown_index != -1) {
OP_REQUIRES(
context, product > 0,
errors::InvalidArgument("reshape cannot infer the missing "
"input size for an empty tensor unless all "
"specified input sizes are non-zero"));
const int64_t missing = dense_size / product;
OP_REQUIRES(
context, product * missing == dense_size,
errors::InvalidArgument(
"Input to reshape is a SparseTensor with ", dense_size,
" dense values, but the requested shape requires a multiple of ",
product, ". input_shape=", input_shape.DebugString(),
" output_shape=", output_shape.DebugString()));
output_shape.set_dim(unknown_index, missing);
}
OP_REQUIRES(
context, output_shape.num_elements() == dense_size,
errors::InvalidArgument("Input to reshape is a tensor with ", dense_size,
" dense values, but the requested shape has ",
output_shape.num_elements(),
". input_shape=", input_shape.DebugString(),
" output_shape=", output_shape.DebugString()));
// Optimize for reshaping to the same shape.
if (input_shape == output_shape) {
context->set_output(output_indices_idx, input_indices_in);
context->set_output(output_shape_idx, input_shape_in);
return;
}
Tensor *result_shape = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(output_shape_idx,
TensorShape({output_rank}),
&result_shape));
auto output_shape_vec = result_shape->vec<int64>();
for (int j = 0; j < output_shape.dims(); ++j) {
output_shape_vec(j) = output_shape.dim_size(j);
}
Tensor *result_indices = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(output_indices_idx,
TensorShape({nnz, output_rank}),
&result_indices));
if (nnz > 0) {
OP_REQUIRES_OK(context, functor::ReshapeSparseTensorFunctor<Device>()(
context, input_shape, output_shape,
input_indices_in.matrix<int64>(),
result_indices->matrix<int64>()));
}
}
| null | null | 198,282
|
190782557585607280379165978157912191873
| 100
|
Don't do any work when reshaping 0 elements sparse tensor.
If reshaping to 0 elements tensor, check that input has no elements.
If reshaping no elements input, check that output has no elements.
PiperOrigin-RevId: 388296986
Change-Id: Iadc9fe7252e14313ca987e69bf0d7042fd10232a
|
other
|
pesign
|
b879dda52f8122de697d145977c285fb0a022d76
| 1
|
void cms_set_pw_data(cms_context *cms, secuPWData *pwdata)
{
ingress();
switch (cms->pwdata.source) {
case PW_SOURCE_INVALID:
case PW_PROMPT:
case PW_DEVICE:
case PW_SOURCE_MAX:
break;
case PW_FROMFD:
if (cms->pwdata.intdata >= 0 &&
!(pwdata->source == PW_FROMFD &&
cms->pwdata.intdata == pwdata->intdata))
close(cms->pwdata.intdata);
break;
case PW_FROMFILEDB:
case PW_FROMENV:
case PW_FROMFILE:
case PW_PLAINTEXT:
memset(cms->pwdata.data, 0, strlen(cms->pwdata.data));
xfree(cms->pwdata.data);
break;
case PW_DATABASE:
xfree(cms->pwdata.data);
break;
}
memmove(&cms->pwdata, pwdata, sizeof(*pwdata));
dprintf("pwdata:%p", pwdata);
dprintf("pwdata->source:%d", pwdata->source);
dprintf("pwdata->data:%p (\"%s\")", pwdata->data,
pwdata->data ? pwdata->data : "(null)");
egress();
}
| null | null | 198,317
|
114679687783740689335214625108343607963
| 38
|
Handle NULL pwdata in cms_set_pw_data()
When 12f16710ee44ef64ddb044a3523c3c4c4d90039a rewrote this function, it
didn't handle the NULL pwdata invocation from daemon.c. This leads to a
explicit NULL dereference and crash on all attempts to daemonize pesign.
Signed-off-by: Robbie Harwood <[email protected]>
|
other
|
owntone-server
|
246d8ae0cef27377e5dfe9ee3ad87e864d6b6266
| 1
|
net_bind(short unsigned *port, int type, const char *log_service_name)
{
struct addrinfo hints = { 0 };
struct addrinfo *servinfo;
struct addrinfo *ptr;
const char *cfgaddr;
char addr[INET6_ADDRSTRLEN];
char strport[8];
int yes = 1;
int no = 0;
int fd;
int ret;
cfgaddr = cfg_getstr(cfg_getsec(cfg, "general"), "bind_address");
hints.ai_socktype = (type & (SOCK_STREAM | SOCK_DGRAM)); // filter since type can be SOCK_STREAM | SOCK_NONBLOCK
hints.ai_family = (cfg_getbool(cfg_getsec(cfg, "general"), "ipv6")) ? AF_INET6 : AF_INET;
hints.ai_flags = cfgaddr ? 0 : AI_PASSIVE;
snprintf(strport, sizeof(strport), "%hu", *port);
ret = getaddrinfo(cfgaddr, strport, &hints, &servinfo);
if (ret < 0)
{
DPRINTF(E_LOG, L_MISC, "Failure creating '%s' service, could not resolve '%s' (port %s): %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", strport, gai_strerror(ret));
return -1;
}
for (ptr = servinfo, fd = -1; ptr != NULL; ptr = ptr->ai_next)
{
if (fd >= 0)
close(fd);
fd = socket(ptr->ai_family, type | SOCK_CLOEXEC, ptr->ai_protocol);
if (fd < 0)
continue;
// TODO libevent sets this, we do the same?
ret = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes));
if (ret < 0)
continue;
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes));
if (ret < 0)
continue;
if (ptr->ai_family == AF_INET6)
{
// We want to be sure the service is dual stack
ret = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &no, sizeof(no));
if (ret < 0)
continue;
}
ret = bind(fd, ptr->ai_addr, ptr->ai_addrlen);
if (ret < 0)
continue;
break;
}
freeaddrinfo(servinfo);
if (!ptr)
{
DPRINTF(E_LOG, L_MISC, "Could not create service '%s' with address %s, port %hu: %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", *port, strerror(errno));
goto error;
}
// Get the port that was assigned
ret = getsockname(fd, ptr->ai_addr, &ptr->ai_addrlen);
if (ret < 0)
{
DPRINTF(E_LOG, L_MISC, "Could not find address of service '%s': %s\n", log_service_name, strerror(errno));
goto error;
}
net_port_get(port, (union net_sockaddr *)ptr->ai_addr);
net_address_get(addr, sizeof(addr), (union net_sockaddr *)ptr->ai_addr);
DPRINTF(E_DBG, L_MISC, "Service '%s' bound to %s, port %hu, socket %d\n", log_service_name, addr, *port, fd);
return fd;
error:
close(fd);
return -1;
}
| null | null | 198,350
|
242703657813636020025363398917671657558
| 87
|
[misc] Fix use-after-free in net_bind()
Thanks to Ba Jinsheng for reporting this bug
|
other
|
tensorflow
|
803404044ae7a1efac48ba82d74111fce1ddb09a
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor* x_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("x", &x_tensor));
const Tensor* cs_prev_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("cs_prev", &cs_prev_tensor));
const Tensor* h_prev_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("h_prev", &h_prev_tensor));
const Tensor* w_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("w", &w_tensor));
const Tensor* wci_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wci", &wci_tensor));
const Tensor* wcf_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wcf", &wcf_tensor));
const Tensor* wco_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wco", &wco_tensor));
const Tensor* b_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("b", &b_tensor));
const int64_t batch_size = x_tensor->dim_size(0);
const int64_t input_size = x_tensor->dim_size(1);
const int64_t cell_size = cs_prev_tensor->dim_size(1);
// Sanity checks for our input shapes.
OP_REQUIRES(ctx, cs_prev_tensor->dim_size(0) == batch_size,
errors::InvalidArgument("cs_prev.dims(0) != batch_size: ",
cs_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, cs_prev_tensor->dim_size(1) == cell_size,
errors::InvalidArgument("cs_prev.dims(1) != cell_size: ",
cs_prev_tensor->dim_size(1), " vs. ",
cell_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(0) == batch_size,
errors::InvalidArgument("h_prev.dims(0) != batch_size: ",
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
errors::InvalidArgument(
"h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
" vs. ", cell_size));
OP_REQUIRES(ctx, w_tensor->dim_size(0) == input_size + cell_size,
errors::InvalidArgument(
"w.dim_size(0) != input_size + cell_size: ",
w_tensor->dim_size(0), " vs. ", input_size + cell_size));
OP_REQUIRES(ctx, w_tensor->dim_size(1) == cell_size * 4,
errors::InvalidArgument(
"w.dim_size(1) != cell_size * 4: ", w_tensor->dim_size(1),
" vs. ", cell_size * 4));
OP_REQUIRES(ctx, b_tensor->dim_size(0) == cell_size * 4,
errors::InvalidArgument(
"b.dim_size(0) != cell_size * 4: ", b_tensor->dim_size(0),
" vs. ", cell_size * 4));
// Allocate our output tensors.
Tensor* i_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output(
{"h_prev"}, "i",
TensorShape({batch_size, cell_size}), &i_tensor));
Tensor* cs_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("cs", TensorShape({batch_size, cell_size}),
&cs_tensor));
Tensor* f_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("f", TensorShape({batch_size, cell_size}),
&f_tensor));
Tensor* o_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output(
{"cs_prev"}, "o",
TensorShape({batch_size, cell_size}), &o_tensor));
Tensor* ci_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("ci", TensorShape({batch_size, cell_size}),
&ci_tensor));
Tensor* co_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("co", TensorShape({batch_size, cell_size}),
&co_tensor));
Tensor* h_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("h", TensorShape({batch_size, cell_size}),
&h_tensor));
// Allocate our temp tensors.
Tensor xh_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(
DataTypeToEnum<T>::v(),
TensorShape({batch_size, input_size + cell_size}),
&xh_tensor));
Tensor gates_tensor;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(DataTypeToEnum<T>::v(),
TensorShape({batch_size, cell_size * 4}),
&gates_tensor));
const Device& device = ctx->eigen_device<Device>();
functor::LSTMBlockCellFprop<Device, T, USE_CUBLAS, gate_layout>(
batch_size, input_size, cell_size)(
ctx, device, forget_bias_, cell_clip_, use_peephole_,
x_tensor->matrix<T>(), cs_prev_tensor->matrix<T>(),
h_prev_tensor->matrix<T>(), w_tensor->matrix<T>(), wci_tensor->vec<T>(),
wcf_tensor->vec<T>(), wco_tensor->vec<T>(), b_tensor->vec<T>(),
xh_tensor.matrix<T>(), i_tensor->matrix<T>(), cs_tensor->matrix<T>(),
f_tensor->matrix<T>(), o_tensor->matrix<T>(), ci_tensor->matrix<T>(),
co_tensor->matrix<T>(), gates_tensor.matrix<T>(),
h_tensor->matrix<T>());
}
| null | null | 198,374
|
260646771289760508359660640080487592990
| 124
|
Fix security vulnerability with LSTMBlockCellOp
PiperOrigin-RevId: 446028341
|
other
|
uftpd
|
0fb2c031ce0ace07cc19cd2cb2143c4b5a63c9dd
| 1
|
static void handle_PORT(ctrl_t *ctrl, char *str)
{
int a, b, c, d, e, f;
char addr[INET_ADDRSTRLEN];
struct sockaddr_in sin;
if (ctrl->data_sd > 0) {
uev_io_stop(&ctrl->data_watcher);
close(ctrl->data_sd);
ctrl->data_sd = -1;
}
/* Convert PORT command's argument to IP address + port */
sscanf(str, "%d,%d,%d,%d,%d,%d", &a, &b, &c, &d, &e, &f);
sprintf(addr, "%d.%d.%d.%d", a, b, c, d);
/* Check IPv4 address using inet_aton(), throw away converted result */
if (!inet_aton(addr, &(sin.sin_addr))) {
ERR(0, "Invalid address '%s' given to PORT command", addr);
send_msg(ctrl->sd, "500 Illegal PORT command.\r\n");
return;
}
strlcpy(ctrl->data_address, addr, sizeof(ctrl->data_address));
ctrl->data_port = e * 256 + f;
DBG("Client PORT command accepted for %s:%d", ctrl->data_address, ctrl->data_port);
send_msg(ctrl->sd, "200 PORT command successful.\r\n");
}
| null | null | 198,399
|
22646162618618523500441009878618953221
| 29
|
FTP: Fix buffer overflow in PORT parser, reported by Aaron Esau
Signed-off-by: Joachim Nilsson <[email protected]>
|
other
|
cups
|
0bc9dc4658c26920a3f66da7dd234be463ca572e
| 1
|
ctcompare(const char *a, /* I - First string */
const char *b) /* I - Second string */
{
int result = 0; /* Result */
while (*a && *b)
{
result |= *a ^ *b;
a ++;
b ++;
}
return (result);
}
| null | null | 198,433
|
39715001571456723900086525845394192958
| 15
|
CVE-2022-26691: An incorrect comparison in local admin authentication
|
other
|
gerbv
|
319a8af890e4d0a5c38e6d08f510da8eefc42537
| 1
|
aperture_report(gerbv_aperture_t *apertures[], int aperture_num,
double x, double y, gerbv_image_t *img, gerbv_project_t *prj)
{
gerbv_aperture_type_t type = apertures[aperture_num]->type;
double *params = apertures[aperture_num]->parameter;
gerbv_simplified_amacro_t *sim = apertures[aperture_num]->simplified;
double *simpars = sim->parameter;
g_message (_(" Aperture used: D%d"), aperture_num);
g_message (_(" Aperture type: %s"),
(type == GERBV_APTYPE_MACRO)?
_(gerbv_aperture_type_name(sim->type)):
_(gerbv_aperture_type_name(type)));
switch (type) {
case GERBV_APTYPE_CIRCLE:
g_message (_(" Diameter: %g %s"),
screen_units(params[0]),
screen_units_str());
break;
case GERBV_APTYPE_RECTANGLE:
case GERBV_APTYPE_OVAL:
g_message (_(" Dimensions: %gx%g %s"),
screen_units(params[0]),
screen_units(params[1]),
screen_units_str());
break;
case GERBV_APTYPE_MACRO: {
switch (sim->type) {
case GERBV_APTYPE_MACRO_CIRCLE:
g_message (_(" Diameter: %g %s"),
screen_units(simpars[CIRCLE_DIAMETER]),
screen_units_str());
x += simpars[CIRCLE_CENTER_X];
y += simpars[CIRCLE_CENTER_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Center: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
break;
case GERBV_APTYPE_MACRO_OUTLINE:
g_message (_(" Number of points: %g"),
simpars[OUTLINE_NUMBER_OF_POINTS]);
x += simpars[OUTLINE_FIRST_X];
y += simpars[OUTLINE_FIRST_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Start: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[OUTLINE_ROTATION_IDX(simpars)]);
break;
case GERBV_APTYPE_MACRO_POLYGON:
g_message (_(" Number of points: %g"),
simpars[POLYGON_NUMBER_OF_POINTS]);
g_message (_(" Diameter: %g %s"),
screen_units(simpars[POLYGON_DIAMETER]),
screen_units_str());
x += simpars[POLYGON_CENTER_X];
y += simpars[POLYGON_CENTER_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Center: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[POLYGON_ROTATION]);
break;
case GERBV_APTYPE_MACRO_MOIRE:
g_message (_(" Outside diameter: %g %s"),
screen_units(simpars[MOIRE_OUTSIDE_DIAMETER]),
screen_units_str());
g_message (_(" Ring thickness: %g %s"),
screen_units(simpars[MOIRE_CIRCLE_THICKNESS]),
screen_units_str());
g_message (_(" Gap width: %g %s"),
screen_units(simpars[MOIRE_GAP_WIDTH]),
screen_units_str());
g_message (_(" Number of rings: %g"),
simpars[MOIRE_NUMBER_OF_CIRCLES]);
g_message (_(" Crosshair thickness: %g %s"),
screen_units(
simpars[MOIRE_CROSSHAIR_THICKNESS]),
screen_units_str());
g_message (_(" Crosshair length: %g %s"),
screen_units(simpars[MOIRE_CROSSHAIR_LENGTH]),
screen_units_str());
x += simpars[MOIRE_CENTER_X];
y += simpars[MOIRE_CENTER_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Center: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[MOIRE_ROTATION]);
break;
case GERBV_APTYPE_MACRO_THERMAL:
g_message (_(" Outside diameter: %g %s"),
screen_units(simpars[THERMAL_OUTSIDE_DIAMETER]),
screen_units_str());
g_message (_(" Inside diameter: %g %s"),
screen_units(simpars[THERMAL_INSIDE_DIAMETER]),
screen_units_str());
g_message (_(" Crosshair thickness: %g %s"),
screen_units(
simpars[THERMAL_CROSSHAIR_THICKNESS]),
screen_units_str());
x += simpars[THERMAL_CENTER_X];
y += simpars[THERMAL_CENTER_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Center: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[THERMAL_ROTATION]);
break;
case GERBV_APTYPE_MACRO_LINE20:
g_message (_(" Width: %g %s"),
screen_units(simpars[LINE20_WIDTH]),
screen_units_str());
x += simpars[LINE20_START_X];
y += simpars[LINE20_START_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Start: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
x += simpars[LINE20_END_X];
y += simpars[LINE20_END_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Stop: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[LINE20_ROTATION]);
break;
case GERBV_APTYPE_MACRO_LINE21:
g_message (_(" Width: %g %s"),
screen_units(simpars[LINE21_WIDTH]),
screen_units_str());
g_message (_(" Height: %g %s"),
screen_units(simpars[LINE21_HEIGHT]),
screen_units_str());
x += simpars[LINE21_CENTER_X];
y += simpars[LINE21_CENTER_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Center: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[LINE21_ROTATION]);
break;
case GERBV_APTYPE_MACRO_LINE22:
g_message (_(" Width: %g %s"),
screen_units(simpars[LINE22_WIDTH]),
screen_units_str());
g_message (_(" Height: %g %s"),
screen_units(simpars[LINE22_HEIGHT]),
screen_units_str());
x += simpars[LINE22_LOWER_LEFT_X];
y += simpars[LINE22_LOWER_LEFT_Y];
gerbv_transform_coord_for_image(&x, &y, img, prj);
g_message (_(" Lower left: (%g, %g) %s"),
screen_units(x), screen_units(y),
screen_units_str());
g_message (_(" Rotation: %g deg"),
simpars[LINE22_ROTATION]);
break;
default:
break;
}
break;
}
default:
break;
}
}
| null | null | 198,435
|
86272392915191272115118972342960989458
| 183
|
Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
|
other
|
ntfs-3g
|
60717a846deaaea47e50ce58872869f7bd1103b5
| 1
|
s64 ntfs_get_attribute_value(const ntfs_volume *vol,
const ATTR_RECORD *a, u8 *b)
{
runlist *rl;
s64 total, r;
int i;
/* Sanity checks. */
if (!vol || !a || !b) {
errno = EINVAL;
return 0;
}
/* Complex attribute? */
/*
* Ignore the flags in case they are not zero for an attribute list
* attribute. Windows does not complain about invalid flags and chkdsk
* does not detect or fix them so we need to cope with it, too.
*/
if (a->type != AT_ATTRIBUTE_LIST && a->flags) {
ntfs_log_error("Non-zero (%04x) attribute flags. Cannot handle "
"this yet.\n", le16_to_cpu(a->flags));
errno = EOPNOTSUPP;
return 0;
}
if (!a->non_resident) {
/* Attribute is resident. */
/* Sanity check. */
if (le32_to_cpu(a->value_length) + le16_to_cpu(a->value_offset)
> le32_to_cpu(a->length)) {
return 0;
}
memcpy(b, (const char*)a + le16_to_cpu(a->value_offset),
le32_to_cpu(a->value_length));
errno = 0;
return (s64)le32_to_cpu(a->value_length);
}
/* Attribute is not resident. */
/* If no data, return 0. */
if (!(a->data_size)) {
errno = 0;
return 0;
}
/*
* FIXME: What about attribute lists?!? (AIA)
*/
/* Decompress the mapping pairs array into a runlist. */
rl = ntfs_mapping_pairs_decompress(vol, a, NULL);
if (!rl) {
errno = EINVAL;
return 0;
}
/*
* FIXED: We were overflowing here in a nasty fashion when we
* reach the last cluster in the runlist as the buffer will
* only be big enough to hold data_size bytes while we are
* reading in allocated_size bytes which is usually larger
* than data_size, since the actual data is unlikely to have a
* size equal to a multiple of the cluster size!
* FIXED2: We were also overflowing here in the same fashion
* when the data_size was more than one run smaller than the
* allocated size which happens with Windows XP sometimes.
*/
/* Now load all clusters in the runlist into b. */
for (i = 0, total = 0; rl[i].length; i++) {
if (total + (rl[i].length << vol->cluster_size_bits) >=
sle64_to_cpu(a->data_size)) {
unsigned char *intbuf = NULL;
/*
* We have reached the last run so we were going to
* overflow when executing the ntfs_pread() which is
* BAAAAAAAD!
* Temporary fix:
* Allocate a new buffer with size:
* rl[i].length << vol->cluster_size_bits, do the
* read into our buffer, then memcpy the correct
* amount of data into the caller supplied buffer,
* free our buffer, and continue.
* We have reached the end of data size so we were
* going to overflow in the same fashion.
* Temporary fix: same as above.
*/
intbuf = ntfs_malloc(rl[i].length << vol->cluster_size_bits);
if (!intbuf) {
free(rl);
return 0;
}
/*
* FIXME: If compressed file: Only read if lcn != -1.
* Otherwise, we are dealing with a sparse run and we
* just memset the user buffer to 0 for the length of
* the run, which should be 16 (= compression unit
* size).
* FIXME: Really only when file is compressed, or can
* we have sparse runs in uncompressed files as well?
* - Yes we can, in sparse files! But not necessarily
* size of 16, just run length.
*/
r = ntfs_pread(vol->dev, rl[i].lcn <<
vol->cluster_size_bits, rl[i].length <<
vol->cluster_size_bits, intbuf);
if (r != rl[i].length << vol->cluster_size_bits) {
#define ESTR "Error reading attribute value"
if (r == -1)
ntfs_log_perror(ESTR);
else if (r < rl[i].length <<
vol->cluster_size_bits) {
ntfs_log_debug(ESTR ": Ran out of input data.\n");
errno = EIO;
} else {
ntfs_log_debug(ESTR ": unknown error\n");
errno = EIO;
}
#undef ESTR
free(rl);
free(intbuf);
return 0;
}
memcpy(b + total, intbuf, sle64_to_cpu(a->data_size) -
total);
free(intbuf);
total = sle64_to_cpu(a->data_size);
break;
}
/*
* FIXME: If compressed file: Only read if lcn != -1.
* Otherwise, we are dealing with a sparse run and we just
* memset the user buffer to 0 for the length of the run, which
* should be 16 (= compression unit size).
* FIXME: Really only when file is compressed, or can
* we have sparse runs in uncompressed files as well?
* - Yes we can, in sparse files! But not necessarily size of
* 16, just run length.
*/
r = ntfs_pread(vol->dev, rl[i].lcn << vol->cluster_size_bits,
rl[i].length << vol->cluster_size_bits,
b + total);
if (r != rl[i].length << vol->cluster_size_bits) {
#define ESTR "Error reading attribute value"
if (r == -1)
ntfs_log_perror(ESTR);
else if (r < rl[i].length << vol->cluster_size_bits) {
ntfs_log_debug(ESTR ": Ran out of input data.\n");
errno = EIO;
} else {
ntfs_log_debug(ESTR ": unknown error\n");
errno = EIO;
}
#undef ESTR
free(rl);
return 0;
}
total += r;
}
free(rl);
return total;
}
| null | null | 198,437
|
207327288367432466091072935525550388492
| 160
|
Avoided allocating and reading an attribute beyond its full size
Before reading a full attribute value for internal use, its expected
length has been checked to be < 0x40000. However the allocated size
in the runlist may be much bigger as a consequence of a bug or malice.
To prevent malloc'ing excessive size, restrict the size of the last
run to read to the needed length.
|
other
|
mruby
|
3cf291f72224715942beaf8553e42ba8891ab3c6
| 1
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
goto L_INT_OVERFLOW;
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (!target_class || target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#define OP_MATH_OVERFLOW_INT() goto L_INT_OVERFLOW
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| null | null | 198,439
|
242922149333710994821156841146649645539
| 1,828
|
vm.c: create break object before clearing GC arena.
Otherwise it possibly cause use-after-free.
|
other
|
pjproject
|
450baca94f475345542c6953832650c390889202
| 1
|
PJ_DEF(pj_status_t) pjstun_parse_msg( void *buf, pj_size_t buf_len,
pjstun_msg *msg)
{
pj_uint16_t msg_type, msg_len;
char *p_attr;
PJ_CHECK_STACK();
msg->hdr = (pjstun_msg_hdr*)buf;
msg_type = pj_ntohs(msg->hdr->type);
switch (msg_type) {
case PJSTUN_BINDING_REQUEST:
case PJSTUN_BINDING_RESPONSE:
case PJSTUN_BINDING_ERROR_RESPONSE:
case PJSTUN_SHARED_SECRET_REQUEST:
case PJSTUN_SHARED_SECRET_RESPONSE:
case PJSTUN_SHARED_SECRET_ERROR_RESPONSE:
break;
default:
PJ_LOG(4,(THIS_FILE, "Error: unknown msg type %d", msg_type));
return PJLIB_UTIL_ESTUNINMSGTYPE;
}
msg_len = pj_ntohs(msg->hdr->length);
if (msg_len != buf_len - sizeof(pjstun_msg_hdr)) {
PJ_LOG(4,(THIS_FILE, "Error: invalid msg_len %d (expecting %d)",
msg_len, buf_len - sizeof(pjstun_msg_hdr)));
return PJLIB_UTIL_ESTUNINMSGLEN;
}
msg->attr_count = 0;
p_attr = (char*)buf + sizeof(pjstun_msg_hdr);
while (msg_len > 0) {
pjstun_attr_hdr **attr = &msg->attr[msg->attr_count];
pj_uint32_t len;
pj_uint16_t attr_type;
*attr = (pjstun_attr_hdr*)p_attr;
len = pj_ntohs((pj_uint16_t) ((*attr)->length)) + sizeof(pjstun_attr_hdr);
len = (len + 3) & ~3;
if (msg_len < len) {
PJ_LOG(4,(THIS_FILE, "Error: length mismatch in attr %d",
msg->attr_count));
return PJLIB_UTIL_ESTUNINATTRLEN;
}
attr_type = pj_ntohs((*attr)->type);
if (attr_type > PJSTUN_ATTR_REFLECTED_FROM &&
attr_type != PJSTUN_ATTR_XOR_MAPPED_ADDR)
{
PJ_LOG(5,(THIS_FILE, "Warning: unknown attr type %x in attr %d. "
"Attribute was ignored.",
attr_type, msg->attr_count));
}
msg_len = (pj_uint16_t)(msg_len - len);
p_attr += len;
++msg->attr_count;
}
return PJ_SUCCESS;
}
| null | null | 198,449
|
135632213457172939945264979223013834460
| 65
|
Merge pull request from GHSA-26j7-ww69-c4qj
|
other
|
tensorflow
|
a989426ee1346693cc015792f11d715f6944f2b8
| 1
|
void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output, bool requires_broadcast) {
if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) {
auto input1_offset = -input1->params.zero_point;
auto input2_offset = -input2->params.zero_point;
const int left_shift = 8;
int32 input1_multiplier;
int input1_shift;
QuantizeMultiplierSmallerThanOneExp(input1->params.scale,
&input1_multiplier, &input1_shift);
int32 input2_multiplier;
int input2_shift;
QuantizeMultiplierSmallerThanOneExp(input2->params.scale,
&input2_multiplier, &input2_shift);
ComparisonParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = input2_shift;
if (requires_broadcast) {
reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>(
op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1),
GetTensorShape(input2), GetTensorData<input_dtype>(input2),
GetTensorShape(output), GetTensorData<bool>(output));
} else {
reference_ops::ComparisonWithScaling<input_dtype, opname>(
op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1),
GetTensorShape(input2), GetTensorData<input_dtype>(input2),
GetTensorShape(output), GetTensorData<bool>(output));
}
}
}
| null | null | 198,452
|
207684057347604940476422838906061374925
| 37
|
Improve to cover scale value greater than one
PiperOrigin-RevId: 433050921
|
other
|
njs
|
6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6
| 1
|
njs_await_fulfilled(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
njs_int_t ret;
njs_value_t **cur_local, **cur_closures, **cur_temp, *value;
njs_frame_t *frame, *async_frame;
njs_function_t *function;
njs_async_ctx_t *ctx;
njs_native_frame_t *top, *async;
ctx = vm->top_frame->function->context;
value = njs_arg(args, nargs, 1);
if (njs_is_error(value)) {
goto failed;
}
async_frame = ctx->await;
async = &async_frame->native;
async->previous = vm->top_frame;
function = async->function;
cur_local = vm->levels[NJS_LEVEL_LOCAL];
cur_closures = vm->levels[NJS_LEVEL_CLOSURE];
cur_temp = vm->levels[NJS_LEVEL_TEMP];
top = vm->top_frame;
frame = vm->active_frame;
vm->levels[NJS_LEVEL_LOCAL] = async->local;
vm->levels[NJS_LEVEL_CLOSURE] = njs_function_closures(async->function);
vm->levels[NJS_LEVEL_TEMP] = async->temp;
vm->top_frame = async;
vm->active_frame = async_frame;
*njs_scope_value(vm, ctx->index) = *value;
vm->retval = *value;
vm->top_frame->retval = &vm->retval;
function->context = ctx->capability;
function->await = ctx;
ret = njs_vmcode_interpreter(vm, ctx->pc);
function->context = NULL;
function->await = NULL;
vm->levels[NJS_LEVEL_LOCAL] = cur_local;
vm->levels[NJS_LEVEL_CLOSURE] = cur_closures;
vm->levels[NJS_LEVEL_TEMP] = cur_temp;
vm->top_frame = top;
vm->active_frame = frame;
if (ret == NJS_OK) {
ret = njs_function_call(vm, njs_function(&ctx->capability->resolve),
&njs_value_undefined, &vm->retval, 1, &vm->retval);
njs_async_context_free(vm, ctx);
} else if (ret == NJS_AGAIN) {
ret = NJS_OK;
} else if (ret == NJS_ERROR) {
if (njs_is_memory_error(vm, &vm->retval)) {
return NJS_ERROR;
}
value = &vm->retval;
goto failed;
}
return ret;
failed:
(void) njs_function_call(vm, njs_function(&ctx->capability->reject),
&njs_value_undefined, value, 1, &vm->retval);
njs_async_context_free(vm, ctx);
return NJS_ERROR;
}
| null | null | 198,476
|
30140299270589977394449369465621934034
| 86
|
Fixed recursive async function calls.
Previously, PromiseCapability record was stored (function->context)
directly in function object during a function invocation. This is
not correct, because PromiseCapability record should be linked to
current execution context. As a result, function->context is
overwritten with consecutive recursive calls which results in
use-after-free.
This closes #451 issue on Github.
|
other
|
micro-ecc
|
1b5f5cea5145c96dd8791b9b2c41424fc74c2172
| 1
|
static int uECC_sign_with_k(const uint8_t *private_key,
const uint8_t *message_hash,
unsigned hash_size,
uECC_word_t *k,
uint8_t *signature,
uECC_Curve curve) {
uECC_word_t tmp[uECC_MAX_WORDS];
uECC_word_t s[uECC_MAX_WORDS];
uECC_word_t *k2[2] = {tmp, s};
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
uECC_word_t *p = (uECC_word_t *)signature;
#else
uECC_word_t p[uECC_MAX_WORDS * 2];
#endif
uECC_word_t carry;
wordcount_t num_words = curve->num_words;
wordcount_t num_n_words = BITS_TO_WORDS(curve->num_n_bits);
bitcount_t num_n_bits = curve->num_n_bits;
/* Make sure 0 < k < curve_n */
if (uECC_vli_isZero(k, num_words) || uECC_vli_cmp(curve->n, k, num_n_words) != 1) {
return 0;
}
carry = regularize_k(k, tmp, s, curve);
EccPoint_mult(p, curve->G, k2[!carry], 0, num_n_bits + 1, curve);
if (uECC_vli_isZero(p, num_words)) {
return 0;
}
/* If an RNG function was specified, get a random number
to prevent side channel analysis of k. */
if (!g_rng_function) {
uECC_vli_clear(tmp, num_n_words);
tmp[0] = 1;
} else if (!uECC_generate_random_int(tmp, curve->n, num_n_words)) {
return 0;
}
/* Prevent side channel analysis of uECC_vli_modInv() to determine
bits of k / the private key by premultiplying by a random number */
uECC_vli_modMult(k, k, tmp, curve->n, num_n_words); /* k' = rand * k */
uECC_vli_modInv(k, k, curve->n, num_n_words); /* k = 1 / k' */
uECC_vli_modMult(k, k, tmp, curve->n, num_n_words); /* k = 1 / k */
#if uECC_VLI_NATIVE_LITTLE_ENDIAN == 0
uECC_vli_nativeToBytes(signature, curve->num_bytes, p); /* store r */
#endif
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
bcopy((uint8_t *) tmp, private_key, BITS_TO_BYTES(curve->num_n_bits));
#else
uECC_vli_bytesToNative(tmp, private_key, BITS_TO_BYTES(curve->num_n_bits)); /* tmp = d */
#endif
s[num_n_words - 1] = 0;
uECC_vli_set(s, p, num_words);
uECC_vli_modMult(s, tmp, s, curve->n, num_n_words); /* s = r*d */
bits2int(tmp, message_hash, hash_size, curve);
uECC_vli_modAdd(s, tmp, s, curve->n, num_n_words); /* s = e + r*d */
uECC_vli_modMult(s, s, k, curve->n, num_n_words); /* s = (e + r*d) / k */
if (uECC_vli_numBits(s, num_n_words) > (bitcount_t)curve->num_bytes * 8) {
return 0;
}
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
bcopy((uint8_t *) signature + curve->num_bytes, (uint8_t *) s, curve->num_bytes);
#else
uECC_vli_nativeToBytes(signature + curve->num_bytes, curve->num_bytes, s);
#endif
return 1;
}
| null | null | 198,499
|
75558427469827995413294142200508990286
| 73
|
Fix for #168
|
other
|
mruby
|
00acae117da1b45b318dc36531a7b0021b8097ae
| 1
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
goto L_INT_OVERFLOW;
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#define OP_MATH_OVERFLOW_INT() goto L_INT_OVERFLOW
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| null | null | 198,512
|
251133800800873458523255728173671147403
| 1,828
|
vm.c: target class may be NULL.
|
other
|
tensorflow
|
5ecec9c6fbdbc6be03295685190a45e7eee726ab
| 1
|
void Compute(OpKernelContext* context) override {
// Get the stamp token.
const Tensor* stamp_token_t;
OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t));
int64_t stamp_token = stamp_token_t->scalar<int64>()();
// Get the tree ensemble proto.
const Tensor* tree_ensemble_serialized_t;
OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized",
&tree_ensemble_serialized_t));
std::unique_ptr<BoostedTreesEnsembleResource> result(
new BoostedTreesEnsembleResource());
if (!result->InitFromSerialized(
tree_ensemble_serialized_t->scalar<tstring>()(), stamp_token)) {
result->Unref();
OP_REQUIRES(
context, false,
errors::InvalidArgument("Unable to parse tree ensemble proto."));
}
// Only create one, if one does not exist already. Report status for all
// other exceptions.
auto status =
CreateResource(context, HandleFromInput(context, 0), result.release());
if (status.code() != tensorflow::error::ALREADY_EXISTS) {
OP_REQUIRES_OK(context, status);
}
}
| null | null | 198,523
|
201044185104134614142321846533666874996
| 28
|
Prevent use after free.
A very old version of the code used `result` as a simple pointer to a resource. Two years later, the pointer got changed to a `unique_ptr` but author forgot to remove the call to `Unref`. Three years after that, we finally uncover the UAF.
PiperOrigin-RevId: 387924872
Change-Id: I70fb6f199164de49fac20c168132a07b84903f9b
|
other
|
u-boot
|
8f8c04bf1ebbd2f72f1643e7ad9617dafa6e5409
| 1
|
static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
uint chip;
uint addr, length;
int alen;
int j, nbytes, linebytes;
int ret;
#if CONFIG_IS_ENABLED(DM_I2C)
struct udevice *dev;
#endif
/* We use the last specified parameters, unless new ones are
* entered.
*/
chip = i2c_dp_last_chip;
addr = i2c_dp_last_addr;
alen = i2c_dp_last_alen;
length = i2c_dp_last_length;
if (argc < 3)
return CMD_RET_USAGE;
if ((flag & CMD_FLAG_REPEAT) == 0) {
/*
* New command specified.
*/
/*
* I2C chip address
*/
chip = hextoul(argv[1], NULL);
/*
* I2C data address within the chip. This can be 1 or
* 2 bytes long. Some day it might be 3 bytes long :-).
*/
addr = hextoul(argv[2], NULL);
alen = get_alen(argv[2], DEFAULT_ADDR_LEN);
if (alen > 3)
return CMD_RET_USAGE;
/*
* If another parameter, it is the length to display.
* Length is the number of objects, not number of bytes.
*/
if (argc > 3)
length = hextoul(argv[3], NULL);
}
#if CONFIG_IS_ENABLED(DM_I2C)
ret = i2c_get_cur_bus_chip(chip, &dev);
if (!ret && alen != -1)
ret = i2c_set_chip_offset_len(dev, alen);
if (ret)
return i2c_report_err(ret, I2C_ERR_READ);
#endif
/*
* Print the lines.
*
* We buffer all read data, so we can make sure data is read only
* once.
*/
nbytes = length;
do {
unsigned char linebuf[DISP_LINE_LEN];
unsigned char *cp;
linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes;
#if CONFIG_IS_ENABLED(DM_I2C)
ret = dm_i2c_read(dev, addr, linebuf, linebytes);
#else
ret = i2c_read(chip, addr, alen, linebuf, linebytes);
#endif
if (ret)
return i2c_report_err(ret, I2C_ERR_READ);
else {
printf("%04x:", addr);
cp = linebuf;
for (j=0; j<linebytes; j++) {
printf(" %02x", *cp++);
addr++;
}
puts (" ");
cp = linebuf;
for (j=0; j<linebytes; j++) {
if ((*cp < 0x20) || (*cp > 0x7e))
puts (".");
else
printf("%c", *cp);
cp++;
}
putc ('\n');
}
nbytes -= linebytes;
} while (nbytes > 0);
i2c_dp_last_chip = chip;
i2c_dp_last_addr = addr;
i2c_dp_last_alen = alen;
i2c_dp_last_length = length;
return 0;
}
| null | null | 198,545
|
316505070418912776648735019136484744565
| 106
|
i2c: fix stack buffer overflow vulnerability in i2c md command
When running "i2c md 0 0 80000100", the function do_i2c_md parses the
length into an unsigned int variable named length. The value is then
moved to a signed variable:
int nbytes = length;
#define DISP_LINE_LEN 16
int linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes;
ret = dm_i2c_read(dev, addr, linebuf, linebytes);
On systems where integers are 32 bits wide, 0x80000100 is a negative
value to "nbytes > DISP_LINE_LEN" is false and linebytes gets assigned
0x80000100 instead of 16.
The consequence is that the function which reads from the i2c device
(dm_i2c_read or i2c_read) is called with a 16-byte stack buffer to fill
but with a size parameter which is too large. In some cases, this could
trigger a crash. But with some i2c drivers, such as drivers/i2c/nx_i2c.c
(used with "nexell,s5pxx18-i2c" bus), the size is actually truncated to
a 16-bit integer. This is because function i2c_transfer expects an
unsigned short length. In such a case, an attacker who can control the
response of an i2c device can overwrite the return address of a function
and execute arbitrary code through Return-Oriented Programming.
Fix this issue by using unsigned integers types in do_i2c_md. While at
it, make also alen unsigned, as signed sizes can cause vulnerabilities
when people forgot to check that they can be negative.
Signed-off-by: Nicolas Iooss <[email protected]>
Reviewed-by: Heiko Schocher <[email protected]>
|
other
|
engine
|
7df766124f87768b43b9e8947c5a01e17545772c
| 1
|
static int pkey_GOST_ECcp_encrypt(EVP_PKEY_CTX *pctx, unsigned char *out,
size_t *out_len, const unsigned char *key,
size_t key_len)
{
GOST_KEY_TRANSPORT *gkt = NULL;
EVP_PKEY *pubk = EVP_PKEY_CTX_get0_pkey(pctx);
struct gost_pmeth_data *data = EVP_PKEY_CTX_get_data(pctx);
int pkey_nid = EVP_PKEY_base_id(pubk);
ASN1_OBJECT *crypt_params_obj = (pkey_nid == NID_id_GostR3410_2001 || pkey_nid == NID_id_GostR3410_2001DH) ?
OBJ_nid2obj(NID_id_Gost28147_89_CryptoPro_A_ParamSet) :
OBJ_nid2obj(NID_id_tc26_gost_28147_param_Z);
const struct gost_cipher_info *param =
get_encryption_params(crypt_params_obj);
unsigned char ukm[8], shared_key[32], crypted_key[44];
int ret = 0;
int key_is_ephemeral = 1;
gost_ctx cctx;
EVP_PKEY *sec_key = EVP_PKEY_CTX_get0_peerkey(pctx);
if (data->shared_ukm_size) {
memcpy(ukm, data->shared_ukm, 8);
} else {
if (RAND_bytes(ukm, 8) <= 0) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT, GOST_R_RNG_ERROR);
return 0;
}
}
if (!param)
goto err;
/* Check for private key in the peer_key of context */
if (sec_key) {
key_is_ephemeral = 0;
if (!gost_get0_priv_key(sec_key)) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT,
GOST_R_NO_PRIVATE_PART_OF_NON_EPHEMERAL_KEYPAIR);
goto err;
}
} else {
key_is_ephemeral = 1;
if (out) {
sec_key = EVP_PKEY_new();
if (!EVP_PKEY_assign(sec_key, EVP_PKEY_base_id(pubk), EC_KEY_new())
|| !EVP_PKEY_copy_parameters(sec_key, pubk)
|| !gost_ec_keygen(EVP_PKEY_get0(sec_key))) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT,
GOST_R_ERROR_COMPUTING_SHARED_KEY);
goto err;
}
}
}
if (out) {
int dgst_nid = NID_undef;
EVP_PKEY_get_default_digest_nid(pubk, &dgst_nid);
if (dgst_nid == NID_id_GostR3411_2012_512)
dgst_nid = NID_id_GostR3411_2012_256;
if (!VKO_compute_key(shared_key,
EC_KEY_get0_public_key(EVP_PKEY_get0(pubk)),
EVP_PKEY_get0(sec_key), ukm, 8, dgst_nid)) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT,
GOST_R_ERROR_COMPUTING_SHARED_KEY);
goto err;
}
gost_init(&cctx, param->sblock);
keyWrapCryptoPro(&cctx, shared_key, ukm, key, crypted_key);
}
gkt = GOST_KEY_TRANSPORT_new();
if (!gkt) {
goto err;
}
if (!ASN1_OCTET_STRING_set(gkt->key_agreement_info->eph_iv, ukm, 8)) {
goto err;
}
if (!ASN1_OCTET_STRING_set(gkt->key_info->imit, crypted_key + 40, 4)) {
goto err;
}
if (!ASN1_OCTET_STRING_set
(gkt->key_info->encrypted_key, crypted_key + 8, 32)) {
goto err;
}
if (key_is_ephemeral) {
if (!X509_PUBKEY_set
(&gkt->key_agreement_info->ephem_key, out ? sec_key : pubk)) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT,
GOST_R_CANNOT_PACK_EPHEMERAL_KEY);
goto err;
}
}
ASN1_OBJECT_free(gkt->key_agreement_info->cipher);
gkt->key_agreement_info->cipher = OBJ_nid2obj(param->nid);
if (key_is_ephemeral)
EVP_PKEY_free(sec_key);
if (!key_is_ephemeral) {
/* Set control "public key from client certificate used" */
if (EVP_PKEY_CTX_ctrl(pctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 3, NULL)
<= 0) {
GOSTerr(GOST_F_PKEY_GOST_ECCP_ENCRYPT, GOST_R_CTRL_CALL_FAILED);
goto err;
}
}
if ((*out_len = i2d_GOST_KEY_TRANSPORT(gkt, out ? &out : NULL)) > 0)
ret = 1;
OPENSSL_cleanse(shared_key, sizeof(shared_key));
GOST_KEY_TRANSPORT_free(gkt);
return ret;
err:
OPENSSL_cleanse(shared_key, sizeof(shared_key));
if (key_is_ephemeral)
EVP_PKEY_free(sec_key);
GOST_KEY_TRANSPORT_free(gkt);
return -1;
}
| null | null | 198,552
|
119311370633297214893271826756266062107
| 111
|
Fix buffer overrun in creating key transport blob according to RFC 9189, 4.2.4.2
Resolves: CVE-2022-29242
|
other
|
Bento4
|
33331ce2d35d45d855af7441db6116b4a9e2b70f
| 1
|
main(int argc, char** argv)
{
if (argc < 2) {
PrintUsageAndExit();
}
// default options
Options.input = NULL;
Options.verbose = false;
Options.hls_version = 0;
Options.pmt_pid = 0x100;
Options.audio_pid = 0x101;
Options.video_pid = 0x102;
Options.audio_track_id = -1;
Options.video_track_id = -1;
Options.audio_format = AUDIO_FORMAT_TS;
Options.output_single_file = false;
Options.show_info = false;
Options.index_filename = "stream.m3u8";
Options.iframe_index_filename = NULL;
Options.segment_filename_template = NULL;
Options.segment_url_template = NULL;
Options.segment_duration = 6;
Options.segment_duration_threshold = DefaultSegmentDurationThreshold;
Options.allow_cache = NULL;
Options.encryption_key_hex = NULL;
Options.encryption_mode = ENCRYPTION_MODE_NONE;
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_NONE;
Options.encryption_key_uri = "key.bin";
Options.encryption_key_format = NULL;
Options.encryption_key_format_versions = NULL;
Options.pcr_offset = AP4_MPEG2_TS_DEFAULT_PCR_OFFSET;
AP4_SetMemory(Options.encryption_key, 0, sizeof(Options.encryption_key));
AP4_SetMemory(Options.encryption_iv, 0, sizeof(Options.encryption_iv));
AP4_SetMemory(&Stats, 0, sizeof(Stats));
// parse command line
AP4_Result result;
char** args = argv+1;
while (const char* arg = *args++) {
if (!strcmp(arg, "--verbose")) {
Options.verbose = true;
} else if (!strcmp(arg, "--hls-version")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --hls-version requires a number\n");
return 1;
}
Options.hls_version = (unsigned int)strtoul(*args++, NULL, 10);
if (Options.hls_version ==0) {
fprintf(stderr, "ERROR: --hls-version requires number > 0\n");
return 1;
}
} else if (!strcmp(arg, "--segment-duration")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --segment-duration requires a number\n");
return 1;
}
Options.segment_duration = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--segment-duration-threshold")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --segment-duration-threshold requires a number\n");
return 1;
}
Options.segment_duration_threshold = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--segment-filename-template")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --segment-filename-template requires an argument\n");
return 1;
}
Options.segment_filename_template = *args++;
} else if (!strcmp(arg, "--segment-url-template")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --segment-url-template requires an argument\n");
return 1;
}
Options.segment_url_template = *args++;
} else if (!strcmp(arg, "--allow-cache")) {
if (*args == NULL || (strcmp(*args, "NO") && strcmp(*args, "YES"))) {
fprintf(stderr, "ERROR: --allow-cache requires a YES or NO argument\n");
return 1;
}
Options.allow_cache = *args++;
} else if (!strcmp(arg, "--pmt-pid")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --pmt-pid requires a number\n");
return 1;
}
Options.pmt_pid = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--audio-pid")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --audio-pid requires a number\n");
return 1;
}
Options.audio_pid = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--video-pid")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --video-pid requires a number\n");
return 1;
}
Options.video_pid = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--audio-track-id")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --audio-track-id requires a number\n");
return 1;
}
Options.audio_track_id = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--audio-format")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --audio-format requires an argument\n");
return 1;
}
const char* format = *args++;
if (!strcmp(format, "ts")) {
Options.audio_format = AUDIO_FORMAT_TS;
} else if (!strcmp(format, "packed")) {
Options.audio_format = AUDIO_FORMAT_PACKED;
} else {
fprintf(stderr, "ERROR: unknown audio format\n");
return 1;
}
} else if (!strcmp(arg, "--video-track-id")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --video-track-id requires a number\n");
return 1;
}
Options.video_track_id = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--pcr-offset")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --pcr-offset requires a number\n");
return 1;
}
Options.pcr_offset = (unsigned int)strtoul(*args++, NULL, 10);
} else if (!strcmp(arg, "--output-single-file")) {
Options.output_single_file = true;
} else if (!strcmp(arg, "--index-filename")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --index-filename requires a filename\n");
return 1;
}
Options.index_filename = *args++;
} else if (!strcmp(arg, "--iframe-index-filename")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --iframe-index-filename requires a filename\n");
return 1;
}
Options.iframe_index_filename = *args++;
} else if (!strcmp(arg, "--show-info")) {
Options.show_info = true;
} else if (!strcmp(arg, "--encryption-key")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-key requires an argument\n");
return 1;
}
Options.encryption_key_hex = *args++;
result = AP4_ParseHex(Options.encryption_key_hex, Options.encryption_key, 16);
if (AP4_FAILED(result)) {
fprintf(stderr, "ERROR: invalid hex key\n");
return 1;
}
if (Options.encryption_mode == ENCRYPTION_MODE_NONE) {
Options.encryption_mode = ENCRYPTION_MODE_AES_128;
}
} else if (!strcmp(arg, "--encryption-mode")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-mode requires an argument\n");
return 1;
}
if (strncmp(*args, "AES-128", 7) == 0) {
Options.encryption_mode = ENCRYPTION_MODE_AES_128;
} else if (strncmp(*args, "SAMPLE-AES", 10) == 0) {
Options.encryption_mode = ENCRYPTION_MODE_SAMPLE_AES;
} else {
fprintf(stderr, "ERROR: unknown encryption mode\n");
return 1;
}
++args;
} else if (!strcmp(arg, "--encryption-iv-mode")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-iv-mode requires an argument\n");
return 1;
}
if (strncmp(*args, "sequence", 8) == 0) {
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_SEQUENCE;
} else if (strncmp(*args, "random", 6) == 0) {
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_RANDOM;
} else if (strncmp(*args, "fps", 3) == 0) {
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_FPS;
} else {
fprintf(stderr, "ERROR: unknown encryption IV mode\n");
return 1;
}
++args;
} else if (!strcmp(arg, "--encryption-key-uri")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-key-uri requires an argument\n");
return 1;
}
Options.encryption_key_uri = *args++;
} else if (!strcmp(arg, "--encryption-key-format")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-key-format requires an argument\n");
return 1;
}
Options.encryption_key_format = *args++;
} else if (!strcmp(arg, "--encryption-key-format-versions")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-key-format-versions requires an argument\n");
return 1;
}
Options.encryption_key_format_versions = *args++;
} else if (!strcmp(arg, "--encryption-key-line")) {
if (*args == NULL) {
fprintf(stderr, "ERROR: --encryption-key-line requires an argument\n");
return 1;
}
Options.encryption_key_lines.Append(*args++);
} else if (Options.input == NULL) {
Options.input = arg;
} else {
fprintf(stderr, "ERROR: unexpected argument: %s\n", arg);
return 1;
}
}
// check args
if (Options.input == NULL) {
fprintf(stderr, "ERROR: missing input file name\n");
return 1;
}
if (Options.encryption_mode == ENCRYPTION_MODE_NONE && Options.encryption_key_lines.ItemCount() != 0) {
fprintf(stderr, "ERROR: --encryption-key-line requires --encryption-key and --encryption-key-mode\n");
return 1;
}
if (Options.encryption_mode != ENCRYPTION_MODE_NONE && Options.encryption_key_hex == NULL) {
fprintf(stderr, "ERROR: no encryption key specified\n");
return 1;
}
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES && Options.hls_version > 0 && Options.hls_version < 5) {
Options.hls_version = 5;
fprintf(stderr, "WARNING: forcing version to 5 in order to support SAMPLE-AES encryption\n");
}
if (Options.iframe_index_filename && Options.hls_version > 0 && Options.hls_version < 4) {
fprintf(stderr, "WARNING: forcing version to 4 in order to support I-FRAME-ONLY playlists\n");
Options.hls_version = 4;
}
if (Options.encryption_iv_mode == ENCRYPTION_IV_MODE_NONE && Options.encryption_mode != ENCRYPTION_MODE_NONE) {
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
// sequence-mode IVs don't work well with i-frame only playlists, use random instead
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_RANDOM;
} else {
Options.encryption_iv_mode = ENCRYPTION_IV_MODE_SEQUENCE;
}
}
if ((Options.encryption_key_format || Options.encryption_key_format_versions) && Options.hls_version > 0 && Options.hls_version < 5) {
Options.hls_version = 5;
fprintf(stderr, "WARNING: forcing version to 5 in order to support KEYFORMAT and/or KEYFORMATVERSIONS\n");
}
if (Options.output_single_file && Options.hls_version > 0 && Options.hls_version < 4) {
Options.hls_version = 4;
fprintf(stderr, "WARNING: forcing version to 4 in order to support single file output\n");
}
if (Options.hls_version == 0) {
// default version is 3 for cleartext or AES-128 encryption, and 5 for SAMPLE-AES
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
Options.hls_version = 5;
} else if (Options.output_single_file || Options.iframe_index_filename) {
Options.hls_version = 4;
} else {
Options.hls_version = 3;
}
}
if (Options.verbose && Options.show_info) {
fprintf(stderr, "WARNING: --verbose will be ignored because --show-info is selected\n");
Options.verbose = false;
}
// compute some derived values
if (Options.iframe_index_filename == NULL) {
if (Options.hls_version >= 4) {
Options.iframe_index_filename = "iframes.m3u8";
}
}
if (Options.audio_format == AUDIO_FORMAT_TS) {
if (Options.segment_filename_template == NULL) {
if (Options.output_single_file) {
Options.segment_filename_template = "stream.ts";
} else {
Options.segment_filename_template = "segment-%d.ts";
}
}
if (Options.segment_url_template == NULL) {
if (Options.output_single_file) {
Options.segment_url_template = "stream.ts";
} else {
Options.segment_url_template = "segment-%d.ts";
}
}
}
if (Options.encryption_iv_mode == ENCRYPTION_IV_MODE_FPS) {
if (AP4_StringLength(Options.encryption_key_hex) != 64) {
fprintf(stderr, "ERROR: 'fps' IV mode requires a 32 byte key value (64 characters in hex)\n");
return 1;
}
result = AP4_ParseHex(Options.encryption_key_hex+32, Options.encryption_iv, 16);
if (AP4_FAILED(result)) {
fprintf(stderr, "ERROR: invalid hex IV\n");
return 1;
}
} else if (Options.encryption_iv_mode == ENCRYPTION_IV_MODE_RANDOM) {
result = AP4_System_GenerateRandomBytes(Options.encryption_iv, sizeof(Options.encryption_iv));
if (AP4_FAILED(result)) {
fprintf(stderr, "ERROR: failed to get random IV (%d)\n", result);
return 1;
}
}
// create the input stream
AP4_ByteStream* input = NULL;
result = AP4_FileByteStream::Create(Options.input, AP4_FileByteStream::STREAM_MODE_READ, input);
if (AP4_FAILED(result)) {
fprintf(stderr, "ERROR: cannot open input (%d)\n", result);
return 1;
}
// open the file
AP4_File* input_file = new AP4_File(*input, true);
// get the movie
AP4_SampleDescription* sample_description;
AP4_Movie* movie = input_file->GetMovie();
if (movie == NULL) {
fprintf(stderr, "ERROR: no movie in file\n");
return 1;
}
// get the audio and video tracks
AP4_Track* audio_track = NULL;
if (Options.audio_track_id == -1) {
audio_track = movie->GetTrack(AP4_Track::TYPE_AUDIO);
} else if (Options.audio_track_id > 0) {
audio_track = movie->GetTrack((AP4_UI32)Options.audio_track_id);
if (audio_track == NULL) {
fprintf(stderr, "ERROR: audio track ID %d not found\n", Options.audio_track_id);
return 1;
}
if (audio_track->GetType() != AP4_Track::TYPE_AUDIO) {
fprintf(stderr, "ERROR: track ID %d is not an audio track\n", Options.audio_track_id);
return 1;
}
}
AP4_Track* video_track = NULL;
if (Options.video_track_id == -1) {
video_track = movie->GetTrack(AP4_Track::TYPE_VIDEO);
} else if (Options.video_track_id > 0) {
video_track = movie->GetTrack((AP4_UI32)Options.video_track_id);
if (video_track == NULL) {
fprintf(stderr, "ERROR: video track ID %d not found\n", Options.video_track_id);
return 1;
}
if (video_track->GetType() != AP4_Track::TYPE_VIDEO) {
fprintf(stderr, "ERROR: track ID %d is not a video track\n", Options.video_track_id);
return 1;
}
}
if (audio_track == NULL && video_track == NULL) {
fprintf(stderr, "ERROR: no suitable tracks found\n");
delete input_file;
input->Release();
return 1;
}
if (Options.audio_format == AUDIO_FORMAT_PACKED && video_track != NULL) {
if (audio_track == NULL) {
fprintf(stderr, "ERROR: packed audio format requires an audio track\n");
return 1;
}
fprintf(stderr, "WARNING: ignoring video track because of the packed audio format\n");
video_track = NULL;
}
if (video_track == NULL) {
Options.segment_duration_threshold = 0;
}
// create the appropriate readers
AP4_LinearReader* linear_reader = NULL;
SampleReader* audio_reader = NULL;
SampleReader* video_reader = NULL;
if (movie->HasFragments()) {
// create a linear reader to get the samples
linear_reader = new AP4_LinearReader(*movie, input);
if (audio_track) {
linear_reader->EnableTrack(audio_track->GetId());
audio_reader = new FragmentedSampleReader(*linear_reader, audio_track->GetId());
}
if (video_track) {
linear_reader->EnableTrack(video_track->GetId());
video_reader = new FragmentedSampleReader(*linear_reader, video_track->GetId());
}
} else {
if (audio_track) {
audio_reader = new TrackSampleReader(*audio_track);
}
if (video_track) {
video_reader = new TrackSampleReader(*video_track);
}
}
AP4_Mpeg2TsWriter* ts_writer = NULL;
AP4_Mpeg2TsWriter::SampleStream* audio_stream = NULL;
AP4_Mpeg2TsWriter::SampleStream* video_stream = NULL;
AP4_UI08 nalu_length_size = 0;
PackedAudioWriter* packed_writer = NULL;
if (Options.audio_format == AUDIO_FORMAT_PACKED) {
packed_writer = new PackedAudioWriter();
// figure out the file extensions if needed
sample_description = audio_track->GetSampleDescription(0);
if (sample_description == NULL) {
fprintf(stderr, "ERROR: unable to parse audio sample description\n");
goto end;
}
if (Options.segment_filename_template == NULL || Options.segment_url_template == NULL) {
const char* default_stream_name = "stream.es";
const char* default_stream_pattern = "segment-%d.es";
if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_MP4A) {
AP4_MpegAudioSampleDescription* mpeg_audio_desc = AP4_DYNAMIC_CAST(AP4_MpegAudioSampleDescription, sample_description);
if (mpeg_audio_desc == NULL ||
!(mpeg_audio_desc->GetObjectTypeId() == AP4_OTI_MPEG4_AUDIO ||
mpeg_audio_desc->GetObjectTypeId() == AP4_OTI_MPEG2_AAC_AUDIO_LC ||
mpeg_audio_desc->GetObjectTypeId() == AP4_OTI_MPEG2_AAC_AUDIO_MAIN)) {
fprintf(stderr, "ERROR: only AAC audio is supported\n");
return 1;
}
default_stream_name = "stream.aac";
default_stream_pattern = "segment-%d.aac";
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AC_3) {
default_stream_name = "stream.ac3";
default_stream_pattern = "segment-%d.ac3";
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_EC_3) {
default_stream_name = "stream.ec3";
default_stream_pattern = "segment-%d.ec3";
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AC_4) {
default_stream_name = "stream.ac4";
default_stream_pattern = "segment-%d.ac4";
}
// override the segment names
if (Options.segment_filename_template == NULL) {
if (Options.output_single_file) {
Options.segment_filename_template = default_stream_name;
} else {
Options.segment_filename_template = default_stream_pattern;
}
}
if (Options.segment_url_template == NULL) {
if (Options.output_single_file) {
Options.segment_url_template = default_stream_name;
} else {
Options.segment_url_template = default_stream_pattern;
}
}
}
} else {
// create an MPEG2 TS Writer
ts_writer = new AP4_Mpeg2TsWriter(Options.pmt_pid);
// add the audio stream
if (audio_track) {
sample_description = audio_track->GetSampleDescription(0);
if (sample_description == NULL) {
fprintf(stderr, "ERROR: unable to parse audio sample description\n");
goto end;
}
unsigned int stream_type = 0;
unsigned int stream_id = 0;
if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_MP4A) {
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
stream_type = AP4_MPEG2_STREAM_TYPE_SAMPLE_AES_ISO_IEC_13818_7;
} else {
stream_type = AP4_MPEG2_STREAM_TYPE_ISO_IEC_13818_7;
}
stream_id = AP4_MPEG2_TS_DEFAULT_STREAM_ID_AUDIO;
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AC_3) {
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
stream_type = AP4_MPEG2_STREAM_TYPE_SAMPLE_AES_ATSC_AC3;
} else {
stream_type = AP4_MPEG2_STREAM_TYPE_ATSC_AC3;
}
stream_id = AP4_MPEG2_TS_STREAM_ID_PRIVATE_STREAM_1;
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_EC_3) {
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
stream_type = AP4_MPEG2_STREAM_TYPE_SAMPLE_AES_ATSC_EAC3;
} else {
stream_type = AP4_MPEG2_STREAM_TYPE_ATSC_EAC3;
}
stream_id = AP4_MPEG2_TS_STREAM_ID_PRIVATE_STREAM_1;
} else {
fprintf(stderr, "ERROR: audio codec not supported\n");
return 1;
}
if (stream_type == AP4_MPEG2_STREAM_TYPE_ATSC_EAC3) {
// E-AC-3 descriptor
unsigned int number_of_channels = 0;
AP4_String track_language;
AP4_Dec3Atom* dec3 = AP4_DYNAMIC_CAST(AP4_Dec3Atom, sample_description->GetDetails().GetChild(AP4_ATOM_TYPE_DEC3));
AP4_BitWriter bits(8);
bits.Write(0xCC, 8);
bits.Write(0x06, 8); // fixed value
bits.Write(0xC0, 8); // reserved, bsid_flag, mainid_flag, asvc_flag, mixinfoexists, substream1_flag, substream2_flag and substream3_flag
bits.Write(24, 5); // reserved, full_service_flag and service_type
if (dec3->GetSubStreams()[0].acmod == 0) {
number_of_channels = 1;
} else if (dec3->GetSubStreams()[0].acmod == 1) {
number_of_channels = 0;
} else if (dec3->GetSubStreams()[0].acmod == 2) {
number_of_channels = 2;
} else {
number_of_channels = 4;
}
if (dec3->GetSubStreams()[0].num_dep_sub > 0) {
number_of_channels = 5;
}
bits.Write(number_of_channels, 3); // number_of_channels
bits.Write(4, 3); // language_flag, language_flag_2, reserved
bits.Write(dec3->GetSubStreams()[0].bsid, 5); // bsid
track_language = audio_track->GetTrackLanguage();
if (track_language.GetLength() == 3) {
bits.Write(track_language.GetChars()[0], 8);
bits.Write(track_language.GetChars()[1], 8);
bits.Write(track_language.GetChars()[2], 8);
} else {
bits.Write(0x75, 8);
bits.Write(0x6E, 8);
bits.Write(0x64, 8);
}
// setup the audio stream
result = ts_writer->SetAudioStream(audio_track->GetMediaTimeScale(),
stream_type,
stream_id,
audio_stream,
Options.audio_pid,
bits.GetData(), 8,
Options.pcr_offset);
} else {
// setup the audio stream
result = ts_writer->SetAudioStream(audio_track->GetMediaTimeScale(),
stream_type,
stream_id,
audio_stream,
Options.audio_pid,
NULL, 0,
Options.pcr_offset);
}
if (AP4_FAILED(result)) {
fprintf(stderr, "could not create audio stream (%d)\n", result);
goto end;
}
}
// add the video stream
if (video_track) {
sample_description = video_track->GetSampleDescription(0);
if (sample_description == NULL) {
fprintf(stderr, "ERROR: unable to parse video sample description\n");
goto end;
}
// decide on the stream type
unsigned int stream_type = 0;
unsigned int stream_id = AP4_MPEG2_TS_DEFAULT_STREAM_ID_VIDEO;
if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AVC1 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AVC2 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AVC3 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_AVC4 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_DVAV ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_DVA1) {
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
stream_type = AP4_MPEG2_STREAM_TYPE_SAMPLE_AES_AVC;
AP4_AvcSampleDescription* avc_desc = AP4_DYNAMIC_CAST(AP4_AvcSampleDescription, sample_description);
if (avc_desc == NULL) {
fprintf(stderr, "ERROR: not a proper AVC track\n");
return 1;
}
nalu_length_size = avc_desc->GetNaluLengthSize();
} else {
stream_type = AP4_MPEG2_STREAM_TYPE_AVC;
}
} else if (sample_description->GetFormat() == AP4_SAMPLE_FORMAT_HEV1 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_HVC1 ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_DVHE ||
sample_description->GetFormat() == AP4_SAMPLE_FORMAT_DVH1) {
stream_type = AP4_MPEG2_STREAM_TYPE_HEVC;
} else {
fprintf(stderr, "ERROR: video codec not supported\n");
return 1;
}
if (Options.encryption_mode == ENCRYPTION_MODE_SAMPLE_AES) {
if (stream_type != AP4_MPEG2_STREAM_TYPE_SAMPLE_AES_AVC) {
fprintf(stderr, "ERROR: AES-SAMPLE encryption can only be used with H.264 video\n");
return 1;
}
}
// setup the video stream
result = ts_writer->SetVideoStream(video_track->GetMediaTimeScale(),
stream_type,
stream_id,
video_stream,
Options.video_pid,
NULL, 0,
Options.pcr_offset);
if (AP4_FAILED(result)) {
fprintf(stderr, "could not create video stream (%d)\n", result);
goto end;
}
}
}
result = WriteSamples(ts_writer, packed_writer,
audio_track, audio_reader, audio_stream,
video_track, video_reader, video_stream,
Options.segment_duration_threshold,
nalu_length_size);
if (AP4_FAILED(result)) {
fprintf(stderr, "ERROR: failed to write samples (%d)\n", result);
}
if (Options.show_info) {
double average_segment_bitrate = 0.0;
if (Stats.segments_total_duration != 0.0) {
average_segment_bitrate = 8.0*(double)Stats.segments_total_size/Stats.segments_total_duration;
}
double average_iframe_bitrate = 0.0;
if (Stats.segments_total_duration != 0.0) {
average_iframe_bitrate = 8.0*(double)Stats.iframes_total_size/Stats.segments_total_duration;
}
double frame_rate = 0.0;
if (video_track && (Stats.segments_total_duration != 0.0)) {
double sample_count = (double)video_track->GetSampleCount();
double media_duration = (double)video_track->GetMediaDuration();
double timescale = (double)video_track->GetMediaTimeScale();
if (media_duration > 0.0) {
frame_rate = sample_count/(media_duration/timescale);
}
}
printf(
"{\n"
);
printf(
" \"stats\": {\n"
" \"duration\": %f,\n"
" \"avg_segment_bitrate\": %f,\n"
" \"max_segment_bitrate\": %f,\n"
" \"avg_iframe_bitrate\": %f,\n"
" \"max_iframe_bitrate\": %f,\n"
" \"frame_rate\": %f\n"
" }",
(double)movie->GetDurationMs()/1000.0,
average_segment_bitrate,
Stats.max_segment_bitrate,
average_iframe_bitrate,
Stats.max_iframe_bitrate,
frame_rate
);
if (audio_track) {
AP4_String codec;
AP4_SampleDescription* sdesc = audio_track->GetSampleDescription(0);
if (sdesc) {
sdesc->GetCodecString(codec);
}
printf(
",\n"
" \"audio\": {\n"
" \"codec\": \"%s\"\n"
" }",
codec.GetChars()
);
}
if (video_track) {
AP4_String codec;
AP4_UI16 width = (AP4_UI16)(video_track->GetWidth()/65536.0);
AP4_UI16 height = (AP4_UI16)(video_track->GetHeight()/65536.0);
AP4_SampleDescription* sdesc = video_track->GetSampleDescription(0);
if (sdesc) {
sdesc->GetCodecString(codec);
AP4_VideoSampleDescription* video_desc = AP4_DYNAMIC_CAST(AP4_VideoSampleDescription, sdesc);
if (video_desc) {
width = video_desc->GetWidth();
height = video_desc->GetHeight();
}
}
printf(
",\n"
" \"video\": {\n"
" \"codec\": \"%s\",\n"
" \"width\": %d,\n"
" \"height\": %d\n"
" }",
codec.GetChars(),
width,
height
);
}
printf(
"\n"
"}\n"
);
}
end:
delete ts_writer;
delete packed_writer;
delete input_file;
input->Release();
delete linear_reader;
delete audio_reader;
delete video_reader;
return result == AP4_SUCCESS?0:1;
}
| null | null | 198,555
|
309905552433608479525781569947809221165
| 725
|
fix #691
|
other
|
mruby
|
da48e7dbb20024c198493b8724adae1b842083aa
| 1
|
fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec)
{
struct mrb_context *c = fiber_check(mrb, self);
struct mrb_context *old_c = mrb->c;
enum mrb_fiber_state status;
mrb_value value;
fiber_check_cfunc(mrb, c);
status = c->status;
switch (status) {
case MRB_FIBER_TRANSFERRED:
if (resume) {
mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber");
}
break;
case MRB_FIBER_RUNNING:
case MRB_FIBER_RESUMED:
mrb_raise(mrb, E_FIBER_ERROR, "double resume");
break;
case MRB_FIBER_TERMINATED:
mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber");
break;
default:
break;
}
old_c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED;
c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c);
fiber_switch_context(mrb, c);
if (status == MRB_FIBER_CREATED) {
mrb_value *b, *e;
if (!c->ci->proc) {
mrb_raise(mrb, E_FIBER_ERROR, "double resume (current)");
}
mrb_stack_extend(mrb, len+2); /* for receiver and (optional) block */
b = c->stbase+1;
e = b + len;
while (b<e) {
*b++ = *a++;
}
if (vmexec) {
c->ci--; /* pop dummy callinfo */
}
c->cibase->n = len;
value = c->stbase[0] = MRB_PROC_ENV(c->cibase->proc)->stack[0];
}
else {
value = fiber_result(mrb, a, len);
if (vmexec) {
c->ci[1].stack[0] = value;
}
}
if (vmexec) {
c->vmexec = TRUE;
value = mrb_vm_exec(mrb, c->ci->proc, c->ci->pc);
mrb->c = old_c;
}
else {
MARK_CONTEXT_MODIFY(c);
}
return value;
}
| null | null | 198,556
|
271948785129900052928977498798833276567
| 63
|
fiber.c: should pack 15+ arguments in an array.
|
other
|
libmobi
|
eafc415bc6067e72577f70d6dd5acbf057ce6e6f
| 1
|
MOBI_RET mobi_decode_infl(unsigned char *decoded, int *decoded_size, const unsigned char *rule) {
int pos = *decoded_size;
char mod = 'i';
char dir = '<';
char olddir;
unsigned char c;
while ((c = *rule++)) {
if (c <= 4) {
mod = (c <= 2) ? 'i' : 'd'; /* insert, delete */
olddir = dir;
dir = (c & 2) ? '<' : '>'; /* left, right */
if (olddir != dir && olddir) {
pos = (c & 2) ? *decoded_size : 0;
}
}
else if (c > 10 && c < 20) {
if (dir == '>') {
pos = *decoded_size;
}
pos -= c - 10;
dir = 0;
if (pos < 0 || pos > *decoded_size) {
debug_print("Position setting failed (%s)\n", decoded);
return MOBI_DATA_CORRUPT;
}
}
else {
if (mod == 'i') {
const unsigned char *s = decoded + pos;
unsigned char *d = decoded + pos + 1;
const int l = *decoded_size - pos;
if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) {
debug_print("Out of buffer in %s at pos: %i\n", decoded, pos);
return MOBI_DATA_CORRUPT;
}
memmove(d, s, (size_t) l);
decoded[pos] = c;
(*decoded_size)++;
if (dir == '>') { pos++; }
} else {
if (dir == '<') { pos--; }
const unsigned char *s = decoded + pos + 1;
unsigned char *d = decoded + pos;
const int l = *decoded_size - pos;
if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) {
debug_print("Out of buffer in %s at pos: %i\n", decoded, pos);
return MOBI_DATA_CORRUPT;
}
if (decoded[pos] != c) {
debug_print("Character mismatch in %s at pos: %i (%c != %c)\n", decoded, pos, decoded[pos], c);
return MOBI_DATA_CORRUPT;
}
memmove(d, s, (size_t) l);
(*decoded_size)--;
}
}
}
return MOBI_SUCCESS;
}
| null | null | 198,566
|
211795113703656920067010021209431193626
| 59
|
Fix wrong boundary checks in inflections parser resulting in stack buffer over-read with corrupt input
|
other
|
libjpeg-turbo
|
6709e4a0cfa44d4f54ee8ad05753d4aa9260cb91
| 1
|
get_text_gray_row (j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading text-format PGM files with any maxval */
{
ppm_source_ptr source = (ppm_source_ptr) sinfo;
FILE * infile = source->pub.input_file;
register JSAMPROW ptr;
register JSAMPLE *rescale = source->rescale;
JDIMENSION col;
ptr = source->pub.buffer[0];
for (col = cinfo->image_width; col > 0; col--) {
*ptr++ = rescale[read_pbm_integer(cinfo, infile)];
}
return 1;
}
| null | null | 198,576
|
90454294641612113179390747862471467555
| 15
|
Check range of integer values in PPM text file
Add checks to ensure values are within the specified range.
Fixes mozilla/mozjpeg#141, closes #8
|
other
|
vim
|
0e8e938d497260dd57be67b4966cb27a5f72376f
| 1
|
get_lisp_indent(void)
{
pos_T *pos, realpos, paren;
int amount;
char_u *that;
colnr_T col;
colnr_T firsttry;
int parencount, quotecount;
int vi_lisp;
// Set vi_lisp to use the vi-compatible method
vi_lisp = (vim_strchr(p_cpo, CPO_LISP) != NULL);
realpos = curwin->w_cursor;
curwin->w_cursor.col = 0;
if ((pos = findmatch(NULL, '(')) == NULL)
pos = findmatch(NULL, '[');
else
{
paren = *pos;
pos = findmatch(NULL, '[');
if (pos == NULL || LT_POSP(pos, &paren))
pos = &paren;
}
if (pos != NULL)
{
// Extra trick: Take the indent of the first previous non-white
// line that is at the same () level.
amount = -1;
parencount = 0;
while (--curwin->w_cursor.lnum >= pos->lnum)
{
if (linewhite(curwin->w_cursor.lnum))
continue;
for (that = ml_get_curline(); *that != NUL; ++that)
{
if (*that == ';')
{
while (*(that + 1) != NUL)
++that;
continue;
}
if (*that == '\\')
{
if (*(that + 1) != NUL)
++that;
continue;
}
if (*that == '"' && *(that + 1) != NUL)
{
while (*++that && *that != '"')
{
// skipping escaped characters in the string
if (*that == '\\')
{
if (*++that == NUL)
break;
if (that[1] == NUL)
{
++that;
break;
}
}
}
}
if (*that == '(' || *that == '[')
++parencount;
else if (*that == ')' || *that == ']')
--parencount;
}
if (parencount == 0)
{
amount = get_indent();
break;
}
}
if (amount == -1)
{
curwin->w_cursor.lnum = pos->lnum;
curwin->w_cursor.col = pos->col;
col = pos->col;
that = ml_get_curline();
if (vi_lisp && get_indent() == 0)
amount = 2;
else
{
char_u *line = that;
amount = 0;
while (*that && col)
{
amount += lbr_chartabsize_adv(line, &that, (colnr_T)amount);
col--;
}
// Some keywords require "body" indenting rules (the
// non-standard-lisp ones are Scheme special forms):
//
// (let ((a 1)) instead (let ((a 1))
// (...)) of (...))
if (!vi_lisp && (*that == '(' || *that == '[')
&& lisp_match(that + 1))
amount += 2;
else
{
that++;
amount++;
firsttry = amount;
while (VIM_ISWHITE(*that))
{
amount += lbr_chartabsize(line, that, (colnr_T)amount);
++that;
}
if (*that && *that != ';') // not a comment line
{
// test *that != '(' to accommodate first let/do
// argument if it is more than one line
if (!vi_lisp && *that != '(' && *that != '[')
firsttry++;
parencount = 0;
quotecount = 0;
if (vi_lisp
|| (*that != '"'
&& *that != '\''
&& *that != '#'
&& (*that < '0' || *that > '9')))
{
while (*that
&& (!VIM_ISWHITE(*that)
|| quotecount
|| parencount)
&& (!((*that == '(' || *that == '[')
&& !quotecount
&& !parencount
&& vi_lisp)))
{
if (*that == '"')
quotecount = !quotecount;
if ((*that == '(' || *that == '[')
&& !quotecount)
++parencount;
if ((*that == ')' || *that == ']')
&& !quotecount)
--parencount;
if (*that == '\\' && *(that+1) != NUL)
amount += lbr_chartabsize_adv(
line, &that, (colnr_T)amount);
amount += lbr_chartabsize_adv(
line, &that, (colnr_T)amount);
}
}
while (VIM_ISWHITE(*that))
{
amount += lbr_chartabsize(
line, that, (colnr_T)amount);
that++;
}
if (!*that || *that == ';')
amount = firsttry;
}
}
}
}
}
else
amount = 0; // no matching '(' or '[' found, use zero indent
curwin->w_cursor = realpos;
return amount;
}
| null | null | 198,588
|
327268708570235606305447273326836046337
| 181
|
patch 8.2.5122: lisp indenting my run over the end of the line
Problem: Lisp indenting my run over the end of the line.
Solution: Check for NUL earlier.
|
other
|
vim
|
dc5490e2cbc8c16022a23b449b48c1bd0083f366
| 1
|
ex_copy(linenr_T line1, linenr_T line2, linenr_T n)
{
linenr_T count;
char_u *p;
count = line2 - line1 + 1;
if ((cmdmod.cmod_flags & CMOD_LOCKMARKS) == 0)
{
curbuf->b_op_start.lnum = n + 1;
curbuf->b_op_end.lnum = n + count;
curbuf->b_op_start.col = curbuf->b_op_end.col = 0;
}
/*
* there are three situations:
* 1. destination is above line1
* 2. destination is between line1 and line2
* 3. destination is below line2
*
* n = destination (when starting)
* curwin->w_cursor.lnum = destination (while copying)
* line1 = start of source (while copying)
* line2 = end of source (while copying)
*/
if (u_save(n, n + 1) == FAIL)
return;
curwin->w_cursor.lnum = n;
while (line1 <= line2)
{
// need to use vim_strsave() because the line will be unlocked within
// ml_append()
p = vim_strsave(ml_get(line1));
if (p != NULL)
{
ml_append(curwin->w_cursor.lnum, p, (colnr_T)0, FALSE);
vim_free(p);
}
// situation 2: skip already copied lines
if (line1 == n)
line1 = curwin->w_cursor.lnum;
++line1;
if (curwin->w_cursor.lnum < line1)
++line1;
if (curwin->w_cursor.lnum < line2)
++line2;
++curwin->w_cursor.lnum;
}
appended_lines_mark(n, count);
msgmore((long)count);
}
| null | null | 198,662
|
23515640853533718736246272320506725321
| 53
|
patch 8.2.4215: illegal memory access when copying lines in Visual mode
Problem: Illegal memory access when copying lines in Visual mode.
Solution: Adjust the Visual position after copying lines.
|
other
|
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
| 1
|
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_migrate,
struct xfrm_kmaddress *k, struct net *net,
struct xfrm_encap_tmpl *encap)
{
int i, err, nx_cur = 0, nx_new = 0;
struct xfrm_policy *pol = NULL;
struct xfrm_state *x, *xc;
struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
goto out;
}
/* Stage 2 - find and update state(s) */
for (i = 0, mp = m; i < num_migrate; i++, mp++) {
if ((x = xfrm_migrate_state_find(mp, net))) {
x_cur[nx_cur] = x;
nx_cur++;
xc = xfrm_state_migrate(x, mp, encap);
if (xc) {
x_new[nx_new] = xc;
nx_new++;
} else {
err = -ENODATA;
goto restore_state;
}
}
}
/* Stage 3 - update policy */
if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
goto restore_state;
/* Stage 4 - delete old state(s) */
if (nx_cur) {
xfrm_states_put(x_cur, nx_cur);
xfrm_states_delete(x_cur, nx_cur);
}
/* Stage 5 - announce */
km_migrate(sel, dir, type, m, num_migrate, k, encap);
xfrm_pol_put(pol);
return 0;
out:
return err;
restore_state:
if (pol)
xfrm_pol_put(pol);
if (nx_cur)
xfrm_states_put(x_cur, nx_cur);
if (nx_new)
xfrm_states_delete(x_new, nx_new);
return err;
}
| null | null | 198,692
|
251209948525286249756852973192467594641
| 66
|
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
other
|
MilkyTracker
|
fd607a3439fcdd0992e5efded3c16fc79c804e34
| 1
|
mp_sint32 LoaderS3M::load(XMFileBase& f, XModule* module)
{
module->cleanUp();
// this will make code much easier to read
TXMHeader* header = &module->header;
TXMInstrument* instr = module->instr;
TXMSample* smp = module->smp;
TXMPattern* phead = module->phead;
// we're already out of memory here
if (!phead || !instr || !smp)
return MP_OUT_OF_MEMORY;
f.read(&header->name,1,28);
header->whythis1a = f.readByte();
if (f.readByte() != 16)
return MP_LOADER_FAILED; // no ST3 module
f.readByte(); // skip something
f.readByte(); // skip something
header->ordnum = f.readWord(); // number of positions in order list (songlength)
mp_ubyte* orders = new mp_ubyte[header->ordnum];
if (orders == NULL)
return MP_OUT_OF_MEMORY;
header->insnum = f.readWord(); // number of instruments
header->patnum = f.readWord(); // number of patterns
mp_sint32 flags = f.readWord(); // st3 flags
mp_sint32 Cvt = f.readWord();
header->flags = XModule::MODULE_ST3NEWINSTRUMENT | XModule::MODULE_ST3DUALCOMMANDS;
if (Cvt == 0x1300 || (flags & 64))
header->flags |= module->MODULE_OLDS3MVOLSLIDES;
header->flags |= module->MODULE_ST3NOTECUT;
/*mp_uword Ffi = */f.readWord();
f.read(header->sig,1,4);
header->mainvol = module->vol64to255(f.readByte()); // initial main volume
header->tempo = f.readByte(); // tempo
header->speed = f.readByte(); // speed
f.readByte(); // global volume? skipped...
f.readByte(); // ignore GUS click removal
/*mp_ubyte dp = */f.readByte();
f.readDword(); // skip something
f.readDword(); // skip something
f.readWord(); // skip some more...
mp_ubyte channelSettings[32];
f.read(channelSettings,1,32);
mp_sint32 numChannels = 0;
for (numChannels = 0; numChannels < 32; numChannels++)
if (channelSettings[numChannels] == 255)
break;
header->channum = numChannels; // number of channels
f.read(orders,1,header->ordnum);
mp_sint32 j = 0, i = 0;
for (i = 0; i < header->ordnum; i++)
{
if (orders[i] == 255)
break;
header->ord[j++] = orders[i];
}
header->ordnum = j; // final songlength
delete[] orders;
mp_uword* insParaPtrs = new mp_uword[header->insnum];
if (insParaPtrs == NULL)
return MP_OUT_OF_MEMORY;
f.readWords(insParaPtrs,header->insnum);
mp_uword* patParaPtrs = new mp_uword[header->patnum];
if (patParaPtrs == NULL)
{
delete[] insParaPtrs;
return MP_OUT_OF_MEMORY;
}
f.readWords(patParaPtrs,header->patnum);
//for (i = 0; i < header->insnum; i++)
//{
// printf("%x\n",insParaPtrs[i]*16);
//}
//////////////////////
// read instruments //
//////////////////////
mp_uint32* samplePtrs = new mp_uint32[header->insnum];
if (samplePtrs == NULL)
{
delete[] insParaPtrs;
delete[] patParaPtrs;
return MP_OUT_OF_MEMORY;
}
memset(samplePtrs,0,sizeof(mp_uint32)*header->insnum);
mp_sint32 s = 0;
for (i = 0; i < header->insnum; i++)
{
mp_uint32 insOffs = insParaPtrs[i]*16;
if (insOffs)
{
f.seekWithBaseOffset(insOffs);
// We can only read that if it's a sample
mp_ubyte type = f.readByte();
if (type == 1)
{
f.read(smp[s].name,1,12); // read dos filename
mp_ubyte bOffs = f.readByte();
mp_uword wOffs = f.readWord();
// stupid fileoffsets
samplePtrs[i] = (((mp_uint32)bOffs<<16)+(mp_uint32)wOffs)*16;
smp[s].flags = 1;
smp[s].pan = 0x80;
smp[s].samplen = f.readDword();
smp[s].loopstart = f.readDword();
mp_sint32 looplen = ((mp_sint32)f.readDword() - (mp_sint32)smp[s].loopstart);
if (looplen < 0)
looplen = 0;
smp[s].looplen = looplen;
smp[s].vol = module->vol64to255(f.readByte());
f.readByte(); // skip something
smp[s].res = f.readByte() == 0x04 ? 0xAD : 0; // packing
mp_ubyte flags = f.readByte();
// looping
if (flags & 1)
{
smp[s].type = 1;
}
// 16 bit sample
if (flags & 4)
{
smp[s].type |= 16;
smp[s].samplen >>= 1;
smp[s].loopstart >>= 1;
smp[s].looplen >>= 1;
}
mp_uint32 c4spd = f.readDword();
XModule::convertc4spd(c4spd,&smp[s].finetune,&smp[s].relnote);
#ifdef VERBOSE
printf("%i, %i\n",c4spd,module->getc4spd(smp[s].relnote,smp[s].finetune));
#endif
f.readDword(); // skip something
f.readDword(); // skip two internal words
f.readDword(); // skip internal dword
f.read(instr[i].name,1,28); // instrument name
f.readDword(); // skip signature
if (samplePtrs[i] && smp[s].samplen)
{
instr[i].samp=1;
for (j=0;j<120;j++)
instr[i].snum[j] = s;
s++;
}
}
else if (type == 0)
{
samplePtrs[i] = 0;
mp_ubyte buffer[12];
f.read(buffer,1,12); // read dos filename
f.readByte();
f.readWord();
f.readDword();
f.readDword();
f.readDword();
f.readByte();
f.readByte(); // skip something
f.readByte(); // skip packing
f.readByte();
f.readDword();
f.readDword(); // skip something
f.readDword(); // skip two internal words
f.readDword(); // skip internal dword
f.read(instr[i].name,1,28); // instrument name
f.readDword(); // skip signature
}
else
{
samplePtrs[i] = 0;
}
}
}
//////////////////////
// read patterns //
//////////////////////
mp_ubyte* pattern = new mp_ubyte[64*32*5];
if (pattern == NULL)
{
delete[] insParaPtrs;
delete[] patParaPtrs;
delete[] samplePtrs;
return MP_OUT_OF_MEMORY;
}
mp_uint32 songMaxChannels = 1;
for (i = 0; i < header->patnum; i++)
{
for (j = 0; j < 32*64; j++)
{
pattern[j*5] = 0xFF;
pattern[j*5+1] = 0;
pattern[j*5+2] = 0xFF;
pattern[j*5+3] = 0xFF;
pattern[j*5+4] = 0;
}
mp_uint32 patOffs = patParaPtrs[i]*16;
mp_uint32 maxChannels = 1;
if (patOffs)
{
f.seekWithBaseOffset(patOffs);
mp_uint32 size = f.readWord();
if (size > 2)
{
size-=2;
mp_ubyte* packed = new mp_ubyte[size+5];
if (packed == NULL)
{
delete[] insParaPtrs;
delete[] patParaPtrs;
delete[] samplePtrs;
delete[] pattern;
return MP_OUT_OF_MEMORY;
}
memset(packed, 0, size);
f.read(packed, 1, size);
mp_uint32 index = 0;
mp_uint32 row = 0;
while (index<size)
{
mp_ubyte pi = safeRead(packed, index, size);
if (pi == 0)
{
row++;
// one more safety net for incorrectly saved pattern sizes
if (row >= 64)
{
int i = 0;
i++;
i--;
break;
}
continue;
}
mp_uint32 chn = pi&31;
if (chn>maxChannels && (pi & (32+64+128)))
{
maxChannels = chn;
}
mp_ubyte* slot = pattern+(row*32*5)+chn*5;
if (pi & 32)
{
slot[0] = safeRead(packed, index, size, 0xFF);
slot[1] = safeRead(packed, index, size);
}
if (pi & 64)
{
slot[2] = safeRead(packed, index, size, 0xFF);
}
if (pi & 128)
{
slot[3] = safeRead(packed, index, size, 0xFF);
slot[4] = safeRead(packed, index, size);
}
}
maxChannels++;
if (maxChannels > header->channum)
maxChannels = header->channum;
delete[] packed;
}
if (maxChannels > songMaxChannels)
songMaxChannels = maxChannels;
}
convertS3MPattern(&phead[i], pattern, maxChannels, i);
}
if (header->channum > songMaxChannels)
header->channum = songMaxChannels;
delete[] pattern;
delete[] insParaPtrs;
delete[] patParaPtrs;
s = 0;
for (i = 0; i < header->insnum; i++)
{
mp_uint32 smpOffs = samplePtrs[i];
if (smpOffs)
{
f.seekWithBaseOffset(smpOffs);
if (!smp[s].samplen)
continue;
bool adpcm = (smp[s].res == 0xAD);
mp_sint32 result = module->loadModuleSample(f, s,
adpcm ? XModule::ST_PACKING_ADPCM : XModule::ST_UNSIGNED,
adpcm ? (XModule::ST_16BIT | XModule::ST_PACKING_ADPCM) : (XModule::ST_16BIT | XModule::ST_UNSIGNED));
if (result != MP_OK)
{
delete[] samplePtrs;
return result;
}
if (adpcm)
// no longer needed
smp[s].res = 0;
s++;
}
}
delete[] samplePtrs;
header->smpnum = s;
strcpy(header->tracker,"Screamtracker 3");
module->setDefaultPanning();
module->postProcessSamples();
return MP_OK;
}
| null | null | 198,695
|
232151339946137971220513489493449251730
| 415
|
Fix #184: Heap overflow in S3M loader
|
other
|
LibRaw
|
4606c28f494a750892c5c1ac7903e62dd1c6fdb5
| 1
|
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
int c, tag, len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
fread (data, 2, 1, ifp);
if (data[1] != 0xd8) return 0;
do {
fread (data, 2, 2, ifp);
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3:
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc0:
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4:
if (info_only) break;
for (dp = data; dp < data+len && (c = *dp++) < 4; )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda:
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (info_only) return 1;
if (jh->clrs > 6 || !jh->huff[0]) return 0;
FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
| null | null | 198,703
|
12868455872231710696102155978892600552
| 50
|
0.16.1: fix for dcraw ljpeg_start() vulnerability
|
other
|
linux
|
d563131ef23cbc756026f839a82598c8445bc45f
| 1
|
static int rsi_send_beacon(struct rsi_common *common)
{
struct sk_buff *skb = NULL;
u8 dword_align_bytes = 0;
skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE);
if (!skb)
return -ENOMEM;
memset(skb->data, 0, MAX_MGMT_PKT_SIZE);
dword_align_bytes = ((unsigned long)skb->data & 0x3f);
if (dword_align_bytes)
skb_pull(skb, (64 - dword_align_bytes));
if (rsi_prepare_beacon(common, skb)) {
rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
return -EINVAL;
}
skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
rsi_set_event(&common->tx_thread.event);
rsi_dbg(DATA_TX_ZONE, "%s: Added to beacon queue\n", __func__);
return 0;
}
| null | null | 198,736
|
175741261833627073099821425757531326769
| 24
|
rsi: release skb if rsi_prepare_beacon fails
In rsi_send_beacon, if rsi_prepare_beacon fails the allocated skb should
be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
other
|
LuaJIT
|
53f82e6e2e858a0a62fd1a2ff47e9866693382e6
| 1
|
static ptrdiff_t finderrfunc(lua_State *L)
{
cTValue *frame = L->base-1, *bot = tvref(L->stack);
void *cf = L->cframe;
while (frame > bot && cf) {
while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
if (frame >= restorestack(L, -cframe_nres(cf)))
break;
if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */
return cframe_errfunc(cf);
cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */
if (cf == NULL)
return 0;
}
switch (frame_typep(frame)) {
case FRAME_LUA:
case FRAME_LUAP:
frame = frame_prevl(frame);
break;
case FRAME_C:
cf = cframe_prev(cf);
/* fallthrough */
case FRAME_VARG:
frame = frame_prevd(frame);
break;
case FRAME_CONT:
#if LJ_HASFFI
if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
cf = cframe_prev(cf);
#endif
frame = frame_prevd(frame);
break;
case FRAME_CP:
if (cframe_canyield(cf)) return 0;
if (cframe_errfunc(cf) >= 0)
return cframe_errfunc(cf);
frame = frame_prevd(frame);
break;
case FRAME_PCALL:
case FRAME_PCALLH:
if (frame_ftsz(frame) >= (ptrdiff_t)(2*sizeof(TValue))) /* xpcall? */
return savestack(L, frame-1); /* Point to xpcall's errorfunc. */
return 0;
default:
lua_assert(0);
return 0;
}
}
return 0;
}
| null | null | 198,743
|
103249297125021918182204154082572508861
| 50
|
Fix frame traversal for __gc handler frames.
Reported by Changochen.
|
other
|
qemu
|
98a8cc741dad9cb4738f81a994bcf8d77d619152
| 1
|
static uint64_t zynq_slcr_compute_pll(uint64_t input, uint32_t ctrl_reg)
{
uint32_t mult = ((ctrl_reg & R_xxx_PLL_CTRL_PLL_FPDIV_MASK) >>
R_xxx_PLL_CTRL_PLL_FPDIV_SHIFT);
/* first, check if pll is bypassed */
if (ctrl_reg & R_xxx_PLL_CTRL_PLL_BYPASS_FORCE_MASK) {
return input;
}
/* is pll disabled ? */
if (ctrl_reg & (R_xxx_PLL_CTRL_PLL_RESET_MASK |
R_xxx_PLL_CTRL_PLL_PWRDWN_MASK)) {
return 0;
}
/* frequency multiplier -> period division */
return input / mult;
}
| null | null | 198,798
|
268699829089390686182525661231401074307
| 19
|
hw/misc/zynq_slcr: Avoid #DIV/0! error
Malicious user can set the feedback divisor for the PLLs
to zero, triggering a floating-point exception (SIGFPE).
As the datasheet [*] is not clear how hardware behaves
when these bits are zeroes, use the maximum divisor
possible (128) to avoid the software FPE.
[*] Zynq-7000 TRM, UG585 (v1.12.2)
B.28 System Level Control Registers (slcr)
-> "Register (slcr) ARM_PLL_CTRL"
25.10.4 PLLs
-> "Software-Controlled PLL Update"
Fixes: 38867cb7ec9 ("hw/misc/zynq_slcr: add clock generation for uarts")
Reported-by: Gaoning Pan <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Alistair Francis <[email protected]>
Reviewed-by: Edgar E. Iglesias <[email protected]>
Reviewed-by: Damien Hedde <[email protected]>
Message-id: [email protected]
Signed-off-by: Peter Maydell <[email protected]>
|
other
|
radare2
|
0a557045476a2969c7079aec9eeb29d02f2809c6
| 1
|
RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) {
if (!bin->entry_table) {
return NULL;
}
RList *entries = r_list_newf (free);
if (!entries) {
return NULL;
}
RList *segments = r_bin_ne_get_segments (bin);
if (!segments) {
r_list_free (entries);
return NULL;
}
if (bin->ne_header->csEntryPoint) {
RBinAddr *entry = R_NEW0 (RBinAddr);
if (!entry) {
r_list_free (entries);
return NULL;
}
entry->bits = 16;
ut32 entry_cs = bin->ne_header->csEntryPoint;
RBinSection *s = r_list_get_n (segments, entry_cs - 1);
entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0);
r_list_append (entries, entry);
}
int off = 0;
size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset;
while (off < bin->ne_header->EntryTableLength) {
if (tableat + off >= r_buf_size (bin->buf)) {
break;
}
ut8 bundle_length = *(ut8 *)(bin->entry_table + off);
if (!bundle_length) {
break;
}
off++;
ut8 bundle_type = *(ut8 *)(bin->entry_table + off);
off++;
int i;
for (i = 0; i < bundle_length; i++) {
if (tableat + off + 4 >= r_buf_size (bin->buf)) {
break;
}
RBinAddr *entry = R_NEW0 (RBinAddr);
if (!entry) {
r_list_free (entries);
return NULL;
}
off++;
if (!bundle_type) { // Skip
off--;
free (entry);
break;
} else if (bundle_type == 0xff) { // moveable
off += 2;
ut8 segnum = *(bin->entry_table + off);
off++;
ut16 segoff = *(ut16 *)(bin->entry_table + off);
if (segnum > 0) {
entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff;
}
} else { // Fixed
if (bundle_type < bin->ne_header->SegCount) {
entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset
* bin->alignment + *(ut16 *)(bin->entry_table + off);
}
}
off += 2;
r_list_append (entries, entry);
}
}
r_list_free (segments);
bin->entries = entries;
return entries;
}
| null | null | 198,927
|
231378592395081525233262099532340458175
| 76
|
Fix oobread and unaligned casts in the NE entrypoint logic ##crash
* Reported by @hmsec via huntr.dev
* Reproducer: nepocaligns
* BountyID: ec538fa4-06c6-4050-a141-f60153ddeaac
|
other
|
swtpm
|
9f740868fc36761de27df3935513bdebf8852d19
| 1
|
SWTPM_NVRAM_CheckHeader(unsigned char *data, uint32_t length,
uint32_t *dataoffset, uint16_t *hdrflags,
uint8_t *hdrversion, bool quiet)
{
blobheader *bh = (blobheader *)data;
if (length < sizeof(bh)) {
if (!quiet)
logprintf(STDERR_FILENO,
"not enough bytes for header: %u\n", length);
return TPM_BAD_PARAMETER;
}
if (ntohl(bh->totlen) != length) {
if (!quiet)
logprintf(STDERR_FILENO,
"broken header: bh->totlen %u != %u\n",
htonl(bh->totlen), length);
return TPM_BAD_PARAMETER;
}
if (bh->min_version > BLOB_HEADER_VERSION) {
if (!quiet)
logprintf(STDERR_FILENO,
"Minimum required version for the blob is %d, we "
"only support version %d\n", bh->min_version,
BLOB_HEADER_VERSION);
return TPM_BAD_VERSION;
}
*hdrversion = bh->version;
*dataoffset = ntohs(bh->hdrsize);
*hdrflags = ntohs(bh->flags);
return TPM_SUCCESS;
}
| null | null | 198,983
|
13496292045022689295589483997516441609
| 36
|
swtpm: Check header size indicator against expected size (CID 375869)
This fix addresses Coverity issue CID 375869.
Check the header size indicated in the header of the state against the
expected size and return an error code in case the header size indicator
is different. There was only one header size so far since blobheader was
introduced, so we don't need to deal with different sizes.
Without this fix a specially craft header could have cause out-of-bounds
accesses on the byte array containing the swtpm's state.
Signed-off-by: Stefan Berger <[email protected]>
|
other
|
linux
|
8423f0b6d513b259fdab9c9bf4aaa6188d054c2d
| 1
|
static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
{
int err = 0;
unsigned int saved_f_flags;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_pcm_format_t format;
unsigned long width;
size_t size;
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream != NULL) {
runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
goto __direct;
err = snd_pcm_oss_make_ready(substream);
if (err < 0)
return err;
atomic_inc(&runtime->oss.rw_ref);
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
atomic_dec(&runtime->oss.rw_ref);
return -ERESTARTSYS;
}
format = snd_pcm_oss_format_from(runtime->oss.format);
width = snd_pcm_format_physical_width(format);
if (runtime->oss.buffer_used > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: buffer_used\n");
#endif
size = (8 * (runtime->oss.period_bytes - runtime->oss.buffer_used) + 7) / width;
snd_pcm_format_set_silence(format,
runtime->oss.buffer + runtime->oss.buffer_used,
size);
err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
if (err < 0)
goto unlock;
} else if (runtime->oss.period_ptr > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: period_ptr\n");
#endif
size = runtime->oss.period_bytes - runtime->oss.period_ptr;
snd_pcm_format_set_silence(format,
runtime->oss.buffer,
size * 8 / width);
err = snd_pcm_oss_sync1(substream, size);
if (err < 0)
goto unlock;
}
/*
* The ALSA's period might be a bit large than OSS one.
* Fill the remain portion of ALSA period with zeros.
*/
size = runtime->control->appl_ptr % runtime->period_size;
if (size > 0) {
size = runtime->period_size - size;
if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED)
snd_pcm_lib_write(substream, NULL, size);
else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
snd_pcm_lib_writev(substream, NULL, size);
}
unlock:
mutex_unlock(&runtime->oss.params_lock);
atomic_dec(&runtime->oss.rw_ref);
if (err < 0)
return err;
/*
* finish sync: drain the buffer
*/
__direct:
saved_f_flags = substream->f_flags;
substream->f_flags &= ~O_NONBLOCK;
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
substream->f_flags = saved_f_flags;
if (err < 0)
return err;
mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
mutex_unlock(&runtime->oss.params_lock);
}
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
if (substream != NULL) {
err = snd_pcm_oss_make_ready(substream);
if (err < 0)
return err;
runtime = substream->runtime;
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
if (err < 0)
return err;
mutex_lock(&runtime->oss.params_lock);
runtime->oss.buffer_used = 0;
runtime->oss.prepare = 1;
mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
| null | null | 199,159
|
77235514643827032938109375026312253290
| 96
|
ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
There is a small race window at snd_pcm_oss_sync() that is called from
OSS PCM SNDCTL_DSP_SYNC ioctl; namely the function calls
snd_pcm_oss_make_ready() at first, then takes the params_lock mutex
for the rest. When the stream is set up again by another thread
between them, it leads to inconsistency, and may result in unexpected
results such as NULL dereference of OSS buffer as a fuzzer spotted
recently.
The fix is simply to cover snd_pcm_oss_make_ready() call into the same
params_lock mutex with snd_pcm_oss_make_ready_locked() variant.
Reported-and-tested-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Link: https://lore.kernel.org/r/CAFcO6XN7JDM4xSXGhtusQfS2mSBcx50VJKwQpCq=WeLt57aaZA@mail.gmail.com
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]>
|
other
|
linux
|
233087ca063686964a53c829d547c7571e3f67bf
| 1
|
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
int i;
int ret;
int size;
union inparam {
struct floppy_struct g; /* geometry */
struct format_descr f;
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
const void *outparam; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
* installing the new fdutils package */
if (cmd == CDROMEJECT || /* CD-ROM eject */
cmd == 0x6470) { /* SunOS floppy eject */
DPRINT("obsolete eject ioctl\n");
DPRINT("please use floppycontrol --eject\n");
cmd = FDEJECT;
}
if (!((cmd & 0xff00) == 0x0200))
return -EINVAL;
/* convert the old style command into a new style command */
ret = normalize_ioctl(&cmd, &size);
if (ret)
return ret;
/* permission checks */
if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) ||
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
if (WARN_ON(size < 0 || size > sizeof(inparam)))
return -EINVAL;
/* copyin */
memset(&inparam, 0, sizeof(inparam));
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = fd_copyin((void __user *)param, &inparam, size);
if (ret)
return ret;
}
switch (cmd) {
case FDEJECT:
if (drive_state[drive].fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
if (lock_fdc(drive))
return -EINTR;
/* do the actual eject. Fails on
* non-Sparc architectures */
ret = fd_eject(UNIT(drive));
set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
process_fd_request();
return ret;
case FDCLRPRM:
if (lock_fdc(drive))
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
drive_state[drive].keep_data = 0;
return invalidate_drive(bdev);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g, drive, type, bdev);
case FDGETPRM:
ret = get_floppy_geometry(drive, type,
(struct floppy_struct **)&outparam);
if (ret)
return ret;
memcpy(&inparam.g, outparam,
offsetof(struct floppy_struct, name));
outparam = &inparam.g;
break;
case FDMSGON:
drive_params[drive].flags |= FTD_MSG;
return 0;
case FDMSGOFF:
drive_params[drive].flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
ret = drive_state[drive].flags;
process_fd_request();
if (ret & FD_VERIFY)
return -ENODEV;
if (!(ret & FD_DISK_WRITABLE))
return -EROFS;
return 0;
case FDFMTTRK:
if (drive_state[drive].fd_ref != 1)
return -EBUSY;
return do_format(drive, &inparam.f);
case FDFMTEND:
case FDFLUSH:
if (lock_fdc(drive))
return -EINTR;
return invalidate_drive(bdev);
case FDSETEMSGTRESH:
drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
case FDGETMAXERRS:
outparam = &drive_params[drive].max_errors;
break;
case FDSETMAXERRS:
drive_params[drive].max_errors = inparam.max_errors;
break;
case FDGETDRVTYP:
outparam = drive_name(type, drive);
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
if (!valid_floppy_drive_params(inparam.dp.autodetect,
inparam.dp.native_format))
return -EINVAL;
drive_params[drive] = inparam.dp;
break;
case FDGETDRVPRM:
outparam = &drive_params[drive];
break;
case FDPOLLDRVSTAT:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
fallthrough;
case FDGETDRVSTAT:
outparam = &drive_state[drive];
break;
case FDRESET:
return user_reset_fdc(drive, (int)param, true);
case FDGETFDCSTAT:
outparam = &fdc_state[FDC(drive)];
break;
case FDWERRORCLR:
memset(&write_errors[drive], 0, sizeof(write_errors[drive]));
return 0;
case FDWERRORGET:
outparam = &write_errors[drive];
break;
case FDRAWCMD:
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
i = raw_cmd_ioctl(cmd, (void __user *)param);
if (i == -EINTR)
return -EINTR;
process_fd_request();
return i;
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
twaddle(current_fdc, current_drive);
process_fd_request();
return 0;
default:
return -EINVAL;
}
if (_IOC_DIR(cmd) & _IOC_READ)
return fd_copyout((void __user *)param, outparam, size);
return 0;
}
| null | null | 199,681
|
78370347980706037992860663978546029725
| 181
|
floppy: disable FDRAWCMD by default
Minh Yuan reported a concurrency use-after-free issue in the floppy code
between raw_cmd_ioctl and seek_interrupt.
[ It turns out this has been around, and that others have reported the
KASAN splats over the years, but Minh Yuan had a reproducer for it and
so gets primary credit for reporting it for this fix - Linus ]
The problem is, this driver tends to break very easily and nowadays,
nobody is expected to use FDRAWCMD anyway since it was used to
manipulate non-standard formats. The risk of breaking the driver is
higher than the risk presented by this race, and accessing the device
requires privileges anyway.
Let's just add a config option to completely disable this ioctl and
leave it disabled by default. Distros shouldn't use it, and only those
running on antique hardware might need to enable it.
Link: https://lore.kernel.org/all/[email protected]/
Link: https://lore.kernel.org/lkml/CAKcFiNC=MfYVW-Jt9A3=FPJpTwCD2PL_ULNCpsCVE5s8ZeBQgQ@mail.gmail.com
Link: https://lore.kernel.org/all/CAEAjamu1FRhz6StCe_55XY5s389ZP_xmCF69k987En+1z53=eg@mail.gmail.com
Reported-by: Minh Yuan <[email protected]>
Reported-by: [email protected]
Reported-by: cruise k <[email protected]>
Reported-by: Kyungtae Kim <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Tested-by: Denis Efremov <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.