project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
contiki-ng
|
a4597001d50a04f4b9c78f323ba731e2f979802c
| 1
|
ns_input(void)
{
uint8_t flags = 0;
LOG_INFO("Received NS from ");
LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);
LOG_INFO_(" to ");
LOG_INFO_6ADDR(&UIP_IP_BUF->destipaddr);
LOG_INFO_(" with target address ");
LOG_INFO_6ADDR((uip_ipaddr_t *) (&UIP_ND6_NS_BUF->tgtipaddr));
LOG_INFO_("\n");
UIP_STAT(++uip_stat.nd6.recv);
#if UIP_CONF_IPV6_CHECKS
if((UIP_IP_BUF->ttl != UIP_ND6_HOP_LIMIT) ||
(uip_is_addr_mcast(&UIP_ND6_NS_BUF->tgtipaddr)) ||
(UIP_ICMP_BUF->icode != 0)) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
/* Options processing */
nd6_opt_llao = NULL;
nd6_opt_offset = UIP_ND6_NS_LEN;
while(uip_l3_icmp_hdr_len + nd6_opt_offset < uip_len) {
#if UIP_CONF_IPV6_CHECKS
if(ND6_OPT_HDR_BUF(nd6_opt_offset)->len == 0) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
switch (ND6_OPT_HDR_BUF(nd6_opt_offset)->type) {
case UIP_ND6_OPT_SLLAO:
nd6_opt_llao = &uip_buf[uip_l3_icmp_hdr_len + nd6_opt_offset];
#if UIP_CONF_IPV6_CHECKS
/* There must be NO option in a DAD NS */
if(uip_is_addr_unspecified(&UIP_IP_BUF->srcipaddr)) {
LOG_ERR("NS received is bad\n");
goto discard;
} else {
#endif /*UIP_CONF_IPV6_CHECKS */
uip_lladdr_t lladdr_aligned;
extract_lladdr_from_llao_aligned(&lladdr_aligned);
nbr = uip_ds6_nbr_lookup(&UIP_IP_BUF->srcipaddr);
if(nbr == NULL) {
uip_ds6_nbr_add(&UIP_IP_BUF->srcipaddr, &lladdr_aligned,
0, NBR_STALE, NBR_TABLE_REASON_IPV6_ND, NULL);
} else {
const uip_lladdr_t *lladdr = uip_ds6_nbr_get_ll(nbr);
if(lladdr == NULL) {
goto discard;
}
if(memcmp(&nd6_opt_llao[UIP_ND6_OPT_DATA_OFFSET],
lladdr, UIP_LLADDR_LEN) != 0) {
if(uip_ds6_nbr_update_ll(&nbr,
(const uip_lladdr_t *)&lladdr_aligned)
< 0) {
/* failed to update the lladdr */
goto discard;
}
nbr->state = NBR_STALE;
} else {
if(nbr->state == NBR_INCOMPLETE) {
nbr->state = NBR_STALE;
}
}
}
#if UIP_CONF_IPV6_CHECKS
}
#endif /*UIP_CONF_IPV6_CHECKS */
break;
default:
LOG_WARN("ND option not supported in NS");
break;
}
nd6_opt_offset += (ND6_OPT_HDR_BUF(nd6_opt_offset)->len << 3);
}
addr = uip_ds6_addr_lookup(&UIP_ND6_NS_BUF->tgtipaddr);
if(addr != NULL) {
if(uip_is_addr_unspecified(&UIP_IP_BUF->srcipaddr)) {
/* DAD CASE */
#if UIP_ND6_DEF_MAXDADNS > 0
#if UIP_CONF_IPV6_CHECKS
if(!uip_is_addr_solicited_node(&UIP_IP_BUF->destipaddr)) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
if(addr->state != ADDR_TENTATIVE) {
uip_create_linklocal_allnodes_mcast(&UIP_IP_BUF->destipaddr);
uip_ds6_select_src(&UIP_IP_BUF->srcipaddr, &UIP_IP_BUF->destipaddr);
flags = UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
} else {
/** \todo if I sent a NS before him, I win */
uip_ds6_dad_failed(addr);
goto discard;
}
#else /* UIP_ND6_DEF_MAXDADNS > 0 */
goto discard; /* DAD CASE */
#endif /* UIP_ND6_DEF_MAXDADNS > 0 */
}
#if UIP_CONF_IPV6_CHECKS
if(uip_ds6_is_my_addr(&UIP_IP_BUF->srcipaddr)) {
/**
* \NOTE do we do something here? we both are using the same address.
* If we are doing dad, we could cancel it, though we should receive a
* NA in response of DAD NS we sent, hence DAD will fail anyway. If we
* were not doing DAD, it means there is a duplicate in the network!
*/
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /*UIP_CONF_IPV6_CHECKS */
/* Address resolution case */
if(uip_is_addr_solicited_node(&UIP_IP_BUF->destipaddr)) {
uip_ipaddr_copy(&UIP_IP_BUF->destipaddr, &UIP_IP_BUF->srcipaddr);
uip_ipaddr_copy(&UIP_IP_BUF->srcipaddr, &UIP_ND6_NS_BUF->tgtipaddr);
flags = UIP_ND6_NA_FLAG_SOLICITED | UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
}
/* NUD CASE */
if(uip_ds6_addr_lookup(&UIP_IP_BUF->destipaddr) == addr) {
uip_ipaddr_copy(&UIP_IP_BUF->destipaddr, &UIP_IP_BUF->srcipaddr);
uip_ipaddr_copy(&UIP_IP_BUF->srcipaddr, &UIP_ND6_NS_BUF->tgtipaddr);
flags = UIP_ND6_NA_FLAG_SOLICITED | UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
} else {
#if UIP_CONF_IPV6_CHECKS
LOG_ERR("NS received is bad\n");
goto discard;
#endif /* UIP_CONF_IPV6_CHECKS */
}
} else {
goto discard;
}
create_na:
/* If the node is a router it should set R flag in NAs */
#if UIP_CONF_ROUTER
flags = flags | UIP_ND6_NA_FLAG_ROUTER;
#endif
uipbuf_clear();
UIP_IP_BUF->vtc = 0x60;
UIP_IP_BUF->tcflow = 0;
UIP_IP_BUF->flow = 0;
uipbuf_set_len_field(UIP_IP_BUF, UIP_ICMPH_LEN + UIP_ND6_NA_LEN + UIP_ND6_OPT_LLAO_LEN);
UIP_IP_BUF->proto = UIP_PROTO_ICMP6;
UIP_IP_BUF->ttl = UIP_ND6_HOP_LIMIT;
UIP_ICMP_BUF->type = ICMP6_NA;
UIP_ICMP_BUF->icode = 0;
UIP_ND6_NA_BUF->flagsreserved = flags;
memcpy(&UIP_ND6_NA_BUF->tgtipaddr, &addr->ipaddr, sizeof(uip_ipaddr_t));
create_llao(&uip_buf[uip_l3_icmp_hdr_len + UIP_ND6_NA_LEN],
UIP_ND6_OPT_TLLAO);
UIP_ICMP_BUF->icmpchksum = 0;
UIP_ICMP_BUF->icmpchksum = ~uip_icmp6chksum();
uipbuf_set_len(UIP_IPH_LEN + UIP_ICMPH_LEN + UIP_ND6_NA_LEN + UIP_ND6_OPT_LLAO_LEN);
UIP_STAT(++uip_stat.nd6.sent);
LOG_INFO("Sending NA to ");
LOG_INFO_6ADDR(&UIP_IP_BUF->destipaddr);
LOG_INFO_(" from ");
LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);
LOG_INFO_(" with target address ");
LOG_INFO_6ADDR(&UIP_ND6_NA_BUF->tgtipaddr);
LOG_INFO_("\n");
return;
discard:
uipbuf_clear();
return;
}
| null | null | 195,674
|
84843070962167005407140386244838304136
| 183
|
Check whether there is enough space for ND6 option headers when processing
incoming packets.
|
other
|
libde265
|
697aa4f7c774abd6374596e6707a6f4f54265355
| 1
|
void generate_inter_prediction_samples(base_context* ctx,
const slice_segment_header* shdr,
de265_image* img,
int xC,int yC,
int xB,int yB,
int nCS, int nPbW,int nPbH,
const PBMotion* vi)
{
int xP = xC+xB;
int yP = yC+yB;
void* pixels[3];
int stride[3];
const pic_parameter_set* pps = shdr->pps.get();
const seq_parameter_set* sps = pps->sps.get();
const int SubWidthC = sps->SubWidthC;
const int SubHeightC = sps->SubHeightC;
pixels[0] = img->get_image_plane_at_pos_any_depth(0,xP,yP);
stride[0] = img->get_image_stride(0);
pixels[1] = img->get_image_plane_at_pos_any_depth(1,xP/SubWidthC,yP/SubHeightC);
stride[1] = img->get_image_stride(1);
pixels[2] = img->get_image_plane_at_pos_any_depth(2,xP/SubWidthC,yP/SubHeightC);
stride[2] = img->get_image_stride(2);
ALIGNED_16(int16_t) predSamplesL [2 /* LX */][MAX_CU_SIZE* MAX_CU_SIZE];
ALIGNED_16(int16_t) predSamplesC[2 /* chroma */ ][2 /* LX */][MAX_CU_SIZE* MAX_CU_SIZE];
//int xP = xC+xB;
//int yP = yC+yB;
int predFlag[2];
predFlag[0] = vi->predFlag[0];
predFlag[1] = vi->predFlag[1];
const int bit_depth_L = sps->BitDepth_Y;
const int bit_depth_C = sps->BitDepth_C;
// Some encoders use bi-prediction with two similar MVs.
// Identify this case and use only one MV.
// do this only without weighted prediction, because the weights/offsets may be different
if (pps->weighted_pred_flag==0) {
if (predFlag[0] && predFlag[1]) {
if (vi->mv[0].x == vi->mv[1].x &&
vi->mv[0].y == vi->mv[1].y &&
shdr->RefPicList[0][vi->refIdx[0]] ==
shdr->RefPicList[1][vi->refIdx[1]]) {
predFlag[1] = 0;
}
}
}
for (int l=0;l<2;l++) {
if (predFlag[l]) {
// 8.5.3.2.1
if (vi->refIdx[l] >= MAX_NUM_REF_PICS) {
img->integrity = INTEGRITY_DECODING_ERRORS;
ctx->add_warning(DE265_WARNING_NONEXISTING_REFERENCE_PICTURE_ACCESSED, false);
return;
}
const de265_image* refPic = ctx->get_image(shdr->RefPicList[l][vi->refIdx[l]]);
logtrace(LogMotion, "refIdx: %d -> dpb[%d]\n", vi->refIdx[l], shdr->RefPicList[l][vi->refIdx[l]]);
if (!refPic || refPic->PicState == UnusedForReference) {
img->integrity = INTEGRITY_DECODING_ERRORS;
ctx->add_warning(DE265_WARNING_NONEXISTING_REFERENCE_PICTURE_ACCESSED, false);
// TODO: fill predSamplesC with black or grey
}
else {
// 8.5.3.2.2
logtrace(LogMotion,"do MC: L%d,MV=%d;%d RefPOC=%d\n",
l,vi->mv[l].x,vi->mv[l].y,refPic->PicOrderCntVal);
// TODO: must predSamples stride really be nCS or can it be somthing smaller like nPbW?
if (img->high_bit_depth(0)) {
mc_luma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesL[l],nCS,
(const uint16_t*)refPic->get_image_plane(0),
refPic->get_luma_stride(), nPbW,nPbH, bit_depth_L);
}
else {
mc_luma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesL[l],nCS,
(const uint8_t*)refPic->get_image_plane(0),
refPic->get_luma_stride(), nPbW,nPbH, bit_depth_L);
}
if (img->high_bit_depth(0)) {
mc_chroma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesC[0][l],nCS, (const uint16_t*)refPic->get_image_plane(1),
refPic->get_chroma_stride(), nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
mc_chroma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesC[1][l],nCS, (const uint16_t*)refPic->get_image_plane(2),
refPic->get_chroma_stride(), nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
}
else {
mc_chroma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesC[0][l],nCS, (const uint8_t*)refPic->get_image_plane(1),
refPic->get_chroma_stride(), nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
mc_chroma(ctx, sps, vi->mv[l].x, vi->mv[l].y, xP,yP,
predSamplesC[1][l],nCS, (const uint8_t*)refPic->get_image_plane(2),
refPic->get_chroma_stride(), nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
}
}
}
}
// weighted sample prediction (8.5.3.2.3)
const int shift1_L = libde265_max(2,14-sps->BitDepth_Y);
const int offset_shift1_L = img->get_sps().WpOffsetBdShiftY;
const int shift1_C = libde265_max(2,14-sps->BitDepth_C);
const int offset_shift1_C = img->get_sps().WpOffsetBdShiftC;
/*
const int shift1_L = 14-img->sps.BitDepth_Y;
const int offset_shift1_L = img->sps.BitDepth_Y-8;
const int shift1_C = 14-img->sps.BitDepth_C;
const int offset_shift1_C = img->sps.BitDepth_C-8;
*/
/*
if (0)
printf("%d/%d %d/%d %d/%d %d/%d\n",
shift1_L,
Nshift1_L,
offset_shift1_L,
Noffset_shift1_L,
shift1_C,
Nshift1_C,
offset_shift1_C,
Noffset_shift1_C);
assert(shift1_L==
Nshift1_L);
assert(offset_shift1_L==
Noffset_shift1_L);
assert(shift1_C==
Nshift1_C);
assert(offset_shift1_C==
Noffset_shift1_C);
*/
logtrace(LogMotion,"predFlags (modified): %d %d\n", predFlag[0], predFlag[1]);
if (shdr->slice_type == SLICE_TYPE_P) {
if (pps->weighted_pred_flag==0) {
if (predFlag[0]==1 && predFlag[1]==0) {
ctx->acceleration.put_unweighted_pred(pixels[0], stride[0],
predSamplesL[0],nCS, nPbW,nPbH, bit_depth_L);
ctx->acceleration.put_unweighted_pred(pixels[1], stride[1],
predSamplesC[0][0],nCS,
nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
ctx->acceleration.put_unweighted_pred(pixels[2], stride[2],
predSamplesC[1][0],nCS,
nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
}
else {
ctx->add_warning(DE265_WARNING_BOTH_PREDFLAGS_ZERO, false);
img->integrity = INTEGRITY_DECODING_ERRORS;
}
}
else {
// weighted prediction
if (predFlag[0]==1 && predFlag[1]==0) {
int refIdx0 = vi->refIdx[0];
int luma_log2WD = shdr->luma_log2_weight_denom + shift1_L;
int chroma_log2WD = shdr->ChromaLog2WeightDenom + shift1_C;
int luma_w0 = shdr->LumaWeight[0][refIdx0];
int luma_o0 = shdr->luma_offset[0][refIdx0] * (1<<(offset_shift1_L));
int chroma0_w0 = shdr->ChromaWeight[0][refIdx0][0];
int chroma0_o0 = shdr->ChromaOffset[0][refIdx0][0] * (1<<(offset_shift1_C));
int chroma1_w0 = shdr->ChromaWeight[0][refIdx0][1];
int chroma1_o0 = shdr->ChromaOffset[0][refIdx0][1] * (1<<(offset_shift1_C));
logtrace(LogMotion,"weighted-0 [%d] %d %d %d %dx%d\n", refIdx0, luma_log2WD-6,luma_w0,luma_o0,nPbW,nPbH);
ctx->acceleration.put_weighted_pred(pixels[0], stride[0],
predSamplesL[0],nCS, nPbW,nPbH,
luma_w0, luma_o0, luma_log2WD, bit_depth_L);
ctx->acceleration.put_weighted_pred(pixels[1], stride[1],
predSamplesC[0][0],nCS, nPbW/SubWidthC,nPbH/SubHeightC,
chroma0_w0, chroma0_o0, chroma_log2WD, bit_depth_C);
ctx->acceleration.put_weighted_pred(pixels[2], stride[2],
predSamplesC[1][0],nCS, nPbW/SubWidthC,nPbH/SubHeightC,
chroma1_w0, chroma1_o0, chroma_log2WD, bit_depth_C);
}
else {
ctx->add_warning(DE265_WARNING_BOTH_PREDFLAGS_ZERO, false);
img->integrity = INTEGRITY_DECODING_ERRORS;
}
}
}
else {
assert(shdr->slice_type == SLICE_TYPE_B);
if (predFlag[0]==1 && predFlag[1]==1) {
if (pps->weighted_bipred_flag==0) {
//const int shift2 = 15-8; // TODO: real bit depth
//const int offset2 = 1<<(shift2-1);
int16_t* in0 = predSamplesL[0];
int16_t* in1 = predSamplesL[1];
ctx->acceleration.put_weighted_pred_avg(pixels[0], stride[0],
in0,in1, nCS, nPbW, nPbH, bit_depth_L);
int16_t* in00 = predSamplesC[0][0];
int16_t* in01 = predSamplesC[0][1];
int16_t* in10 = predSamplesC[1][0];
int16_t* in11 = predSamplesC[1][1];
ctx->acceleration.put_weighted_pred_avg(pixels[1], stride[1],
in00,in01, nCS,
nPbW/SubWidthC, nPbH/SubHeightC, bit_depth_C);
ctx->acceleration.put_weighted_pred_avg(pixels[2], stride[2],
in10,in11, nCS,
nPbW/SubWidthC, nPbH/SubHeightC, bit_depth_C);
}
else {
// weighted prediction
int refIdx0 = vi->refIdx[0];
int refIdx1 = vi->refIdx[1];
int luma_log2WD = shdr->luma_log2_weight_denom + shift1_L;
int chroma_log2WD = shdr->ChromaLog2WeightDenom + shift1_C;
int luma_w0 = shdr->LumaWeight[0][refIdx0];
int luma_o0 = shdr->luma_offset[0][refIdx0] * (1<<(offset_shift1_L));
int luma_w1 = shdr->LumaWeight[1][refIdx1];
int luma_o1 = shdr->luma_offset[1][refIdx1] * (1<<(offset_shift1_L));
int chroma0_w0 = shdr->ChromaWeight[0][refIdx0][0];
int chroma0_o0 = shdr->ChromaOffset[0][refIdx0][0] * (1<<(offset_shift1_C));
int chroma1_w0 = shdr->ChromaWeight[0][refIdx0][1];
int chroma1_o0 = shdr->ChromaOffset[0][refIdx0][1] * (1<<(offset_shift1_C));
int chroma0_w1 = shdr->ChromaWeight[1][refIdx1][0];
int chroma0_o1 = shdr->ChromaOffset[1][refIdx1][0] * (1<<(offset_shift1_C));
int chroma1_w1 = shdr->ChromaWeight[1][refIdx1][1];
int chroma1_o1 = shdr->ChromaOffset[1][refIdx1][1] * (1<<(offset_shift1_C));
logtrace(LogMotion,"weighted-BI-0 [%d] %d %d %d %dx%d\n", refIdx0, luma_log2WD-6,luma_w0,luma_o0,nPbW,nPbH);
logtrace(LogMotion,"weighted-BI-1 [%d] %d %d %d %dx%d\n", refIdx1, luma_log2WD-6,luma_w1,luma_o1,nPbW,nPbH);
int16_t* in0 = predSamplesL[0];
int16_t* in1 = predSamplesL[1];
ctx->acceleration.put_weighted_bipred(pixels[0], stride[0],
in0,in1, nCS, nPbW, nPbH,
luma_w0,luma_o0,
luma_w1,luma_o1,
luma_log2WD, bit_depth_L);
int16_t* in00 = predSamplesC[0][0];
int16_t* in01 = predSamplesC[0][1];
int16_t* in10 = predSamplesC[1][0];
int16_t* in11 = predSamplesC[1][1];
ctx->acceleration.put_weighted_bipred(pixels[1], stride[1],
in00,in01, nCS, nPbW/SubWidthC, nPbH/SubHeightC,
chroma0_w0,chroma0_o0,
chroma0_w1,chroma0_o1,
chroma_log2WD, bit_depth_C);
ctx->acceleration.put_weighted_bipred(pixels[2], stride[2],
in10,in11, nCS, nPbW/SubWidthC, nPbH/SubHeightC,
chroma1_w0,chroma1_o0,
chroma1_w1,chroma1_o1,
chroma_log2WD, bit_depth_C);
}
}
else if (predFlag[0]==1 || predFlag[1]==1) {
int l = predFlag[0] ? 0 : 1;
if (pps->weighted_bipred_flag==0) {
ctx->acceleration.put_unweighted_pred(pixels[0], stride[0],
predSamplesL[l],nCS, nPbW,nPbH, bit_depth_L);
ctx->acceleration.put_unweighted_pred(pixels[1], stride[1],
predSamplesC[0][l],nCS,
nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
ctx->acceleration.put_unweighted_pred(pixels[2], stride[2],
predSamplesC[1][l],nCS,
nPbW/SubWidthC,nPbH/SubHeightC, bit_depth_C);
}
else {
int refIdx = vi->refIdx[l];
int luma_log2WD = shdr->luma_log2_weight_denom + shift1_L;
int chroma_log2WD = shdr->ChromaLog2WeightDenom + shift1_C;
int luma_w = shdr->LumaWeight[l][refIdx];
int luma_o = shdr->luma_offset[l][refIdx] * (1<<(offset_shift1_L));
int chroma0_w = shdr->ChromaWeight[l][refIdx][0];
int chroma0_o = shdr->ChromaOffset[l][refIdx][0] * (1<<(offset_shift1_C));
int chroma1_w = shdr->ChromaWeight[l][refIdx][1];
int chroma1_o = shdr->ChromaOffset[l][refIdx][1] * (1<<(offset_shift1_C));
logtrace(LogMotion,"weighted-B-L%d [%d] %d %d %d %dx%d\n", l, refIdx, luma_log2WD-6,luma_w,luma_o,nPbW,nPbH);
ctx->acceleration.put_weighted_pred(pixels[0], stride[0],
predSamplesL[l],nCS, nPbW,nPbH,
luma_w, luma_o, luma_log2WD, bit_depth_L);
ctx->acceleration.put_weighted_pred(pixels[1], stride[1],
predSamplesC[0][l],nCS,
nPbW/SubWidthC,nPbH/SubHeightC,
chroma0_w, chroma0_o, chroma_log2WD, bit_depth_C);
ctx->acceleration.put_weighted_pred(pixels[2], stride[2],
predSamplesC[1][l],nCS,
nPbW/SubWidthC,nPbH/SubHeightC,
chroma1_w, chroma1_o, chroma_log2WD, bit_depth_C);
}
}
else {
// TODO: check why it can actually happen that both predFlags[] are false.
// For now, we ignore this and continue decoding.
ctx->add_warning(DE265_WARNING_BOTH_PREDFLAGS_ZERO, false);
img->integrity = INTEGRITY_DECODING_ERRORS;
}
}
#if defined(DE265_LOG_TRACE) && 0
logtrace(LogTransform,"MC pixels (luma), position %d %d:\n", xP,yP);
for (int y=0;y<nPbH;y++) {
logtrace(LogTransform,"MC-y-%d-%d ",xP,yP+y);
for (int x=0;x<nPbW;x++) {
logtrace(LogTransform,"*%02x ", pixels[0][x+y*stride[0]]);
}
logtrace(LogTransform,"*\n");
}
logtrace(LogTransform,"MC pixels (chroma cb), position %d %d:\n", xP/2,yP/2);
for (int y=0;y<nPbH/2;y++) {
logtrace(LogTransform,"MC-cb-%d-%d ",xP/2,yP/2+y);
for (int x=0;x<nPbW/2;x++) {
logtrace(LogTransform,"*%02x ", pixels[1][x+y*stride[1]]);
}
logtrace(LogTransform,"*\n");
}
logtrace(LogTransform,"MC pixels (chroma cr), position %d %d:\n", xP/2,yP/2);
for (int y=0;y<nPbH/2;y++) {
logtrace(LogTransform,"MC-cr-%d-%d ",xP/2,yP/2+y);
for (int x=0;x<nPbW/2;x++) {
logtrace(LogTransform,"*%02x ", pixels[2][x+y*stride[2]]);
}
logtrace(LogTransform,"*\n");
}
#endif
}
| null | null | 195,682
|
274813485195215554464834940462882687019
| 383
|
fix MC with HDR chroma, but SDR luma (#301)
|
other
|
mruby
|
a4d97934d51cb88954cc49161dc1d151f64afb6b
| 1
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
#ifdef MRB_USE_BIGINT
{
const char *s = pool[b].u.str;
regs[a] = mrb_bint_new_str(mrb, s+2, (mrb_int)s[0], (mrb_int)s[1]);
}
break;
#else
goto L_INT_OVERFLOW;
#endif
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (!target_class || target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = (uint8_t)len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
#if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#endif
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) { \
OP_MATH_OVERFLOW_INT(op_name,x,y); \
} \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#ifdef MRB_USE_BIGINT
#define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
#else
#define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
#endif
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) { \
OP_MATH_OVERFLOW_INT(op_name,x,y); \
} \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| null | null | 195,691
|
147780436991899729545603935182301496324
| 1,844
|
vm.c: check if target_class is NULL (when prepended).
|
other
|
FreeRTOS-Kernel
|
47338393f1f79558f6144213409f09f81d7c4837
| 1
|
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
const UBaseType_t uxItemSize,
const uint8_t ucQueueType )
{
Queue_t * pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t * pucQueueStorage;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
/* Allocate enough space to hold the maximum number of items that
* can be in the queue at any time. It is valid for uxItemSize to be
* zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
/* Allocate the queue and storage area. Justification for MISRA
* deviation as follows: pvPortMalloc() always ensures returned memory
* blocks are aligned per the requirements of the MCU stack. In this case
* pvPortMalloc() must return a pointer that is guaranteed to meet the
* alignment requirements of the Queue_t structure - which in this case
* is an int8_t *. Therefore, whenever the stack alignment requirements
* are greater than or equal to the pointer to char requirements the cast
* is safe. In other cases alignment requirements are not strict (one or
* two bytes). */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
if( pxNewQueue != NULL )
{
/* Jump past the queue structure to find the location of the queue
* storage area. */
pucQueueStorage = ( uint8_t * ) pxNewQueue;
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Queues can be created either statically or dynamically, so
* note this task was created dynamically in case it is later
* deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
else
{
traceQUEUE_CREATE_FAILED( ucQueueType );
mtCOVERAGE_TEST_MARKER();
}
return pxNewQueue;
}
| null | null | 195,692
|
274786410457574681394426849117761189698
| 55
|
add assert for addition overflow on queue creation (#225)
|
other
|
glibc
|
23e0e8f5f1fb5ed150253d986ecccdc90c2dcd5e
| 1
|
__getcwd_generic (char *buf, size_t size)
{
/* Lengths of big file name components and entire file names, and a
deep level of file name nesting. These numbers are not upper
bounds; they are merely large values suitable for initial
allocations, designed to be large enough for most real-world
uses. */
enum
{
BIG_FILE_NAME_COMPONENT_LENGTH = 255,
BIG_FILE_NAME_LENGTH = MIN (4095, PATH_MAX - 1),
DEEP_NESTING = 100
};
#if HAVE_OPENAT_SUPPORT
int fd = AT_FDCWD;
bool fd_needs_closing = false;
#else
char dots[DEEP_NESTING * sizeof ".." + BIG_FILE_NAME_COMPONENT_LENGTH + 1];
char *dotlist = dots;
size_t dotsize = sizeof dots;
size_t dotlen = 0;
#endif
DIR *dirstream = NULL;
dev_t rootdev, thisdev;
ino_t rootino, thisino;
char *dir;
register char *dirp;
struct __stat64_t64 st;
size_t allocated = size;
size_t used;
#if HAVE_MINIMALLY_WORKING_GETCWD
/* If AT_FDCWD is not defined, the algorithm below is O(N**2) and
this is much slower than the system getcwd (at least on
GNU/Linux). So trust the system getcwd's results unless they
look suspicious.
Use the system getcwd even if we have openat support, since the
system getcwd works even when a parent is unreadable, while the
openat-based approach does not.
But on AIX 5.1..7.1, the system getcwd is not even minimally
working: If the current directory name is slightly longer than
PATH_MAX, it omits the first directory component and returns
this wrong result with errno = 0. */
# undef getcwd
dir = getcwd_system (buf, size);
if (dir || (size && errno == ERANGE))
return dir;
/* Solaris getcwd (NULL, 0) fails with errno == EINVAL, but it has
internal magic that lets it work even if an ancestor directory is
inaccessible, which is better in many cases. So in this case try
again with a buffer that's almost always big enough. */
if (errno == EINVAL && buf == NULL && size == 0)
{
char big_buffer[BIG_FILE_NAME_LENGTH + 1];
dir = getcwd_system (big_buffer, sizeof big_buffer);
if (dir)
return strdup (dir);
}
# if HAVE_PARTLY_WORKING_GETCWD
/* The system getcwd works, except it sometimes fails when it
shouldn't, setting errno to ERANGE, ENAMETOOLONG, or ENOENT. */
if (errno != ERANGE && errno != ENAMETOOLONG && errno != ENOENT)
return NULL;
# endif
#endif
if (size == 0)
{
if (buf != NULL)
{
__set_errno (EINVAL);
return NULL;
}
allocated = BIG_FILE_NAME_LENGTH + 1;
}
if (buf == NULL)
{
dir = malloc (allocated);
if (dir == NULL)
return NULL;
}
else
dir = buf;
dirp = dir + allocated;
*--dirp = '\0';
if (__lstat64_time64 (".", &st) < 0)
goto lose;
thisdev = st.st_dev;
thisino = st.st_ino;
if (__lstat64_time64 ("/", &st) < 0)
goto lose;
rootdev = st.st_dev;
rootino = st.st_ino;
while (!(thisdev == rootdev && thisino == rootino))
{
struct dirent64 *d;
dev_t dotdev;
ino_t dotino;
bool mount_point;
int parent_status;
size_t dirroom;
size_t namlen;
bool use_d_ino = true;
/* Look at the parent directory. */
#if HAVE_OPENAT_SUPPORT
fd = __openat64 (fd, "..", O_RDONLY);
if (fd < 0)
goto lose;
fd_needs_closing = true;
parent_status = __fstat64_time64 (fd, &st);
#else
dotlist[dotlen++] = '.';
dotlist[dotlen++] = '.';
dotlist[dotlen] = '\0';
parent_status = __lstat64_time64 (dotlist, &st);
#endif
if (parent_status != 0)
goto lose;
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
/* Figure out if this directory is a mount point. */
dotdev = st.st_dev;
dotino = st.st_ino;
mount_point = dotdev != thisdev;
/* Search for the last directory. */
#if HAVE_OPENAT_SUPPORT
dirstream = __fdopendir (fd);
if (dirstream == NULL)
goto lose;
fd_needs_closing = false;
#else
dirstream = __opendir (dotlist);
if (dirstream == NULL)
goto lose;
dotlist[dotlen++] = '/';
#endif
for (;;)
{
/* Clear errno to distinguish EOF from error if readdir returns
NULL. */
__set_errno (0);
d = __readdir64 (dirstream);
/* When we've iterated through all directory entries without finding
one with a matching d_ino, rewind the stream and consider each
name again, but this time, using lstat. This is necessary in a
chroot on at least one system (glibc-2.3.6 + linux 2.6.12), where
.., ../.., ../../.., etc. all had the same device number, yet the
d_ino values for entries in / did not match those obtained
via lstat. */
if (d == NULL && errno == 0 && use_d_ino)
{
use_d_ino = false;
__rewinddir (dirstream);
d = __readdir64 (dirstream);
}
if (d == NULL)
{
if (errno == 0)
/* EOF on dirstream, which can mean e.g., that the current
directory has been removed. */
__set_errno (ENOENT);
goto lose;
}
if (d->d_name[0] == '.' &&
(d->d_name[1] == '\0' ||
(d->d_name[1] == '.' && d->d_name[2] == '\0')))
continue;
if (use_d_ino)
{
bool match = (MATCHING_INO (d, thisino) || mount_point);
if (! match)
continue;
}
{
int entry_status;
#if HAVE_OPENAT_SUPPORT
entry_status = __fstatat64_time64 (fd, d->d_name, &st,
AT_SYMLINK_NOFOLLOW);
#else
/* Compute size needed for this file name, or for the file
name ".." in the same directory, whichever is larger.
Room for ".." might be needed the next time through
the outer loop. */
size_t name_alloc = _D_ALLOC_NAMLEN (d);
size_t filesize = dotlen + MAX (sizeof "..", name_alloc);
if (filesize < dotlen)
goto memory_exhausted;
if (dotsize < filesize)
{
/* My, what a deep directory tree you have, Grandma. */
size_t newsize = MAX (filesize, dotsize * 2);
size_t i;
if (newsize < dotsize)
goto memory_exhausted;
if (dotlist != dots)
free (dotlist);
dotlist = malloc (newsize);
if (dotlist == NULL)
goto lose;
dotsize = newsize;
i = 0;
do
{
dotlist[i++] = '.';
dotlist[i++] = '.';
dotlist[i++] = '/';
}
while (i < dotlen);
}
memcpy (dotlist + dotlen, d->d_name, _D_ALLOC_NAMLEN (d));
entry_status = __lstat64_time64 (dotlist, &st);
#endif
/* We don't fail here if we cannot stat() a directory entry.
This can happen when (network) file systems fail. If this
entry is in fact the one we are looking for we will find
out soon as we reach the end of the directory without
having found anything. */
if (entry_status == 0 && S_ISDIR (st.st_mode)
&& st.st_dev == thisdev && st.st_ino == thisino)
break;
}
}
dirroom = dirp - dir;
namlen = _D_EXACT_NAMLEN (d);
if (dirroom <= namlen)
{
if (size != 0)
{
__set_errno (ERANGE);
goto lose;
}
else
{
char *tmp;
size_t oldsize = allocated;
allocated += MAX (allocated, namlen);
if (allocated < oldsize
|| ! (tmp = realloc (dir, allocated)))
goto memory_exhausted;
/* Move current contents up to the end of the buffer.
This is guaranteed to be non-overlapping. */
dirp = memcpy (tmp + allocated - (oldsize - dirroom),
tmp + dirroom,
oldsize - dirroom);
dir = tmp;
}
}
dirp -= namlen;
memcpy (dirp, d->d_name, namlen);
*--dirp = '/';
thisdev = dotdev;
thisino = dotino;
}
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
if (dirp == &dir[allocated - 1])
*--dirp = '/';
#if ! HAVE_OPENAT_SUPPORT
if (dotlist != dots)
free (dotlist);
#endif
used = dir + allocated - dirp;
memmove (dir, dirp, used);
if (size == 0)
/* Ensure that the buffer is only as large as necessary. */
buf = (used < allocated ? realloc (dir, used) : dir);
if (buf == NULL)
/* Either buf was NULL all along, or 'realloc' failed but
we still have the original string. */
buf = dir;
return buf;
memory_exhausted:
__set_errno (ENOMEM);
lose:
{
int save = errno;
if (dirstream)
__closedir (dirstream);
#if HAVE_OPENAT_SUPPORT
if (fd_needs_closing)
__close_nocancel_nostatus (fd);
#else
if (dotlist != dots)
free (dotlist);
#endif
if (buf == NULL)
free (dir);
__set_errno (save);
}
return NULL;
}
| null | null | 195,716
|
204737856749159993041789176797064964819
| 333
|
getcwd: Set errno to ERANGE for size == 1 (CVE-2021-3999)
No valid path returned by getcwd would fit into 1 byte, so reject the
size early and return NULL with errno set to ERANGE. This change is
prompted by CVE-2021-3999, which describes a single byte buffer
underflow and overflow when all of the following conditions are met:
- The buffer size (i.e. the second argument of getcwd) is 1 byte
- The current working directory is too long
- '/' is also mounted on the current working directory
Sequence of events:
- In sysdeps/unix/sysv/linux/getcwd.c, the syscall returns ENAMETOOLONG
because the linux kernel checks for name length before it checks
buffer size
- The code falls back to the generic getcwd in sysdeps/posix
- In the generic func, the buf[0] is set to '\0' on line 250
- this while loop on line 262 is bypassed:
while (!(thisdev == rootdev && thisino == rootino))
since the rootfs (/) is bind mounted onto the directory and the flow
goes on to line 449, where it puts a '/' in the byte before the
buffer.
- Finally on line 458, it moves 2 bytes (the underflowed byte and the
'\0') to the buf[0] and buf[1], resulting in a 1 byte buffer overflow.
- buf is returned on line 469 and errno is not set.
This resolves BZ #28769.
Reviewed-by: Andreas Schwab <[email protected]>
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Qualys Security Advisory <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]>
|
other
|
mvfst
|
a67083ff4b8dcbb7ee2839da6338032030d712b0
| 1
|
void updateHandshakeState(QuicServerConnectionState& conn) {
// Zero RTT read cipher is available after chlo is processed with the
// condition that early data attempt is accepted.
auto handshakeLayer = conn.serverHandshakeLayer;
auto zeroRttReadCipher = handshakeLayer->getZeroRttReadCipher();
auto zeroRttHeaderCipher = handshakeLayer->getZeroRttReadHeaderCipher();
// One RTT write cipher is available at Fizz layer after chlo is processed.
// However, the cipher is only exported to QUIC if early data attempt is
// accepted. Otherwise, the cipher will be available after cfin is
// processed.
auto oneRttWriteCipher = handshakeLayer->getOneRttWriteCipher();
// One RTT read cipher is available after cfin is processed.
auto oneRttReadCipher = handshakeLayer->getOneRttReadCipher();
auto oneRttWriteHeaderCipher = handshakeLayer->getOneRttWriteHeaderCipher();
auto oneRttReadHeaderCipher = handshakeLayer->getOneRttReadHeaderCipher();
if (zeroRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedZeroRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 0-rtt read cipher");
conn.readCodec->setZeroRttReadCipher(std::move(zeroRttReadCipher));
}
if (zeroRttHeaderCipher) {
conn.readCodec->setZeroRttHeaderCipher(std::move(zeroRttHeaderCipher));
}
if (oneRttWriteHeaderCipher) {
conn.oneRttWriteHeaderCipher = std::move(oneRttWriteHeaderCipher);
}
if (oneRttReadHeaderCipher) {
conn.readCodec->setOneRttHeaderCipher(std::move(oneRttReadHeaderCipher));
}
if (oneRttWriteCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttWriteCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt write cipher");
CHECK(!conn.oneRttWriteCipher.get());
conn.oneRttWriteCipher = std::move(oneRttWriteCipher);
updatePacingOnKeyEstablished(conn);
// We negotiate the transport parameters whenever we have the 1-RTT write
// keys available.
auto clientParams = handshakeLayer->getClientTransportParams();
if (!clientParams) {
throw QuicTransportException(
"No client transport params",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
processClientInitialParams(conn, std::move(*clientParams));
}
if (oneRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt read cipher");
// Clear limit because CFIN is received at this point
conn.writableBytesLimit = folly::none;
conn.readCodec->setOneRttReadCipher(std::move(oneRttReadCipher));
}
auto handshakeReadCipher = handshakeLayer->getHandshakeReadCipher();
auto handshakeReadHeaderCipher =
handshakeLayer->getHandshakeReadHeaderCipher();
if (handshakeReadCipher) {
CHECK(handshakeReadHeaderCipher);
conn.readCodec->setHandshakeReadCipher(std::move(handshakeReadCipher));
conn.readCodec->setHandshakeHeaderCipher(
std::move(handshakeReadHeaderCipher));
}
if (handshakeLayer->isHandshakeDone()) {
CHECK(conn.oneRttWriteCipher);
if (conn.version != QuicVersion::MVFST_D24 && !conn.sentHandshakeDone) {
sendSimpleFrame(conn, HandshakeDoneFrame());
conn.sentHandshakeDone = true;
}
}
}
| null | null | 195,720
|
181509801354210897629105034468408496700
| 80
|
Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
|
other
|
libjpeg
|
4746b577931e926a49e50de9720a4946de3069a7
| 1
|
bool SampleInterleavedLSScan::ParseMCU(void)
{
#if ACCUSOFT_CODE
int lines = m_ulRemaining[0]; // total number of MCU lines processed.
UBYTE preshift = m_ucLowBit + FractionalColorBitsOf();
struct Line *line[4];
UBYTE cx;
//
// If a DNL marker is present, the number of remaining lines is zero. Fix it.
if (m_pFrame->HeightOf() == 0) {
assert(lines == 0);
lines = 8;
}
//
// A "MCU" in respect to the code organization is eight lines.
if (lines > 8) {
lines = 8;
}
if (m_pFrame->HeightOf() > 0)
m_ulRemaining[0] -= lines;
assert(lines > 0);
assert(m_ucCount < 4);
//
// Fill the line pointers.
for(cx = 0;cx < m_ucCount;cx++) {
line[cx] = CurrentLine(cx);
}
// Loop over lines and columns
do {
LONG length = m_ulWidth[0];
LONG *lp[4];
// Get the line pointers and initialize the internal backup lines.
for(cx = 0;cx < m_ucCount;cx++) {
lp[cx] = line[cx]->m_pData;
StartLine(cx);
}
if (BeginReadMCU(m_Stream.ByteStreamOf())) {
// No error handling strategy. No RST in scans. Bummer!
do {
LONG a[4],b[4],c[4],d[4]; // neighbouring values.
LONG d1[4],d2[4],d3[4]; // local gradients.
bool isrun = true;
for(cx = 0;cx < m_ucCount;cx++) {
GetContext(cx,a[cx],b[cx],c[cx],d[cx]);
d1[cx] = d[cx] - b[cx]; // compute local gradients
d2[cx] = b[cx] - c[cx];
d3[cx] = c[cx] - a[cx];
//
// Run mode only if the run condition is met for all components
if (isrun && !isRunMode(d1[cx],d2[cx],d3[cx]))
isrun = false;
}
if (isrun) {
LONG run = DecodeRun(length,m_lRunIndex[0]);
//
// Now fill the data.
while(run) {
// Update so that the next process gets the correct value.
// There is one sample per component.
for(cx = 0;cx < m_ucCount;cx++) {
UpdateContext(cx,a[cx]);
// And insert the value into the target line as well.
*lp[cx]++ = a[cx] << preshift;
}
run--,length--;
// As long as there are pixels on the line.
}
//
// More data on the line? I.e. the run did not cover the full m_lJ samples?
// Now decode the run interruption sample. The rtype is here always zero.
if (length) {
bool negative; // the sign variable
LONG errval; // the prediction error
LONG merr; // the mapped error (symbol)
LONG rx; // the reconstructed value
UBYTE k; // golomb parameter
//
// Decode the interrupting pixels.
for(cx = 0;cx < m_ucCount;cx++) {
// Get the neighbourhood.
GetContext(cx,a[cx],b[cx],c[cx],d[cx]);
// The prediction mode is always false, but the sign information
// is required.
negative = a[cx] > b[cx];
// Get the golomb parameter for run interruption coding.
k = GolombParameter(false);
// Golomb-decode the error symbol. It is always using the common
// run index.
merr = GolombDecode(k,m_lLimit - m_lJ[m_lRunIndex[0]] - 1);
// Inverse the error mapping procedure.
errval = InverseErrorMapping(merr,ErrorMappingOffset(false,merr != 0,k));
// Compute the reconstructed value.
rx = Reconstruct(negative,b[cx],errval);
// Update so that the next process gets the correct value.
UpdateContext(cx,rx);
// Fill in the value into the line
*lp[cx]++ = rx << preshift;
// Update the variables of the run mode.
UpdateState(false,errval);
}
// Update the run index now. This is not part of
// EncodeRun because the non-reduced run-index is
// required for the golomb coder length limit.
if (m_lRunIndex[0] > 0)
m_lRunIndex[0]--;
} else break; // end of line.
} else {
UWORD ctxt;
bool negative; // the sign variable.
LONG px; // the predicted variable.
LONG rx; // the reconstructed value.
LONG errval; // the error value.
LONG merr; // the mapped error value.
UBYTE k; // the Golomb parameter.
//
for(cx = 0;cx < m_ucCount;cx++) {
// Quantize the gradients.
d1[cx] = QuantizedGradient(d1[cx]);
d2[cx] = QuantizedGradient(d2[cx]);
d3[cx] = QuantizedGradient(d3[cx]);
// Compute the context.
ctxt = Context(negative,d1[cx],d2[cx],d3[cx]);
// Compute the predicted value.
px = Predict(a[cx],b[cx],c[cx]);
// Correct the prediction.
px = CorrectPrediction(ctxt,negative,px);
// Compute the golomb parameter k from the context.
k = GolombParameter(ctxt);
// Decode the error symbol.
merr = GolombDecode(k,m_lLimit);
// Inverse the error symbol into an error value.
errval = InverseErrorMapping(merr,ErrorMappingOffset(ctxt,k));
// Update the variables.
UpdateState(ctxt,errval);
// Compute the reconstructed value.
rx = Reconstruct(negative,px,errval);
// Update so that the next process gets the correct value.
UpdateContext(cx,rx);
// And insert the value into the target line as well.
*lp[cx]++ = rx << preshift;
}
}
} while(--length);
} // No error handling here.
//
// Advance the line pointers.
for(cx = 0;cx < m_ucCount;cx++) {
EndLine(cx);
line[cx] = line[cx]->m_pNext;
}
//
} while(--lines);
//
// If this is the last line, gobble up all the
// bits from bitstuffing the last byte may have left.
// As SkipStuffing is idempotent, we can also do that
// all the time.
m_Stream.SkipStuffing();
#endif
return false;
}
| null | null | 195,740
|
337627729530744243995392841896493457223
| 170
|
Fixed handling of empty JPEG-LS scans.
|
other
|
libjpeg
|
4746b577931e926a49e50de9720a4946de3069a7
| 1
|
bool SingleComponentLSScan::ParseMCU(void)
{
#if ACCUSOFT_CODE
int lines = m_ulRemaining[0]; // total number of MCU lines processed.
UBYTE preshift = m_ucLowBit + FractionalColorBitsOf();
struct Line *line = CurrentLine(0);
//
// If a DNL marker is present, the number of remaining lines is zero. Fix it.
if (m_pFrame->HeightOf() == 0) {
assert(lines == 0);
lines = 8;
}
assert(m_ucCount == 1);
//
// A "MCU" in respect to the code organization is eight lines.
if (lines > 8) {
lines = 8;
}
if (m_pFrame->HeightOf() > 0)
m_ulRemaining[0] -= lines;
assert(lines > 0);
// Loop over lines and columns
do {
LONG length = m_ulWidth[0];
LONG *lp = line->m_pData;
#ifdef DEBUG_LS
int xpos = 0;
static int linenumber = 0;
printf("\n%4d : ",++linenumber);
#endif
StartLine(0);
if (BeginReadMCU(m_Stream.ByteStreamOf())) { // No error handling strategy. No RST in scans. Bummer!
do {
LONG a,b,c,d; // neighbouring values.
LONG d1,d2,d3; // local gradients.
GetContext(0,a,b,c,d);
d1 = d - b; // compute local gradients
d2 = b - c;
d3 = c - a;
if (isRunMode(d1,d2,d3)) {
LONG run = DecodeRun(length,m_lRunIndex[0]);
//
// Now fill the data.
while(run) {
// Update so that the next process gets the correct value.
UpdateContext(0,a);
// And insert the value into the target line as well.
*lp++ = a << preshift;
#ifdef DEBUG_LS
printf("%4d:<%2x> ",xpos++,a);
#endif
run--,length--;
// As long as there are pixels on the line.
}
//
// More data on the line? I.e. the run did not cover the full m_lJ samples?
// Now decode the run interruption sample.
if (length) {
bool negative; // the sign variable
bool rtype; // run interruption type
LONG errval; // the prediction error
LONG merr; // the mapped error (symbol)
LONG rx; // the reconstructed value
UBYTE k; // golomb parameter
// Get the neighbourhood.
GetContext(0,a,b,c,d);
// Get the prediction mode.
rtype = InterruptedPredictionMode(negative,a,b);
// Get the golomb parameter for run interruption coding.
k = GolombParameter(rtype);
// Golomb-decode the error symbol.
merr = GolombDecode(k,m_lLimit - m_lJ[m_lRunIndex[0]] - 1);
// Inverse the error mapping procedure.
errval = InverseErrorMapping(merr + rtype,ErrorMappingOffset(rtype,rtype || merr,k));
// Compute the reconstructed value.
rx = Reconstruct(negative,rtype?a:b,errval);
// Update so that the next process gets the correct value.
UpdateContext(0,rx);
// Fill in the value into the line
*lp = rx << preshift;
#ifdef DEBUG_LS
printf("%4d:<%2x> ",xpos++,*lp);
#endif
// Update the variables of the run mode.
UpdateState(rtype,errval);
// Update the run index now. This is not part of
// EncodeRun because the non-reduced run-index is
// required for the golomb coder length limit.
if (m_lRunIndex[0] > 0)
m_lRunIndex[0]--;
} else break; // end of line.
} else {
UWORD ctxt;
bool negative; // the sign variable.
LONG px; // the predicted variable.
LONG rx; // the reconstructed value.
LONG errval; // the error value.
LONG merr; // the mapped error value.
UBYTE k; // the Golomb parameter.
// Quantize the gradients.
d1 = QuantizedGradient(d1);
d2 = QuantizedGradient(d2);
d3 = QuantizedGradient(d3);
// Compute the context.
ctxt = Context(negative,d1,d2,d3);
// Compute the predicted value.
px = Predict(a,b,c);
// Correct the prediction.
px = CorrectPrediction(ctxt,negative,px);
// Compute the golomb parameter k from the context.
k = GolombParameter(ctxt);
// Decode the error symbol.
merr = GolombDecode(k,m_lLimit);
// Inverse the error symbol into an error value.
errval = InverseErrorMapping(merr,ErrorMappingOffset(ctxt,k));
// Update the variables.
UpdateState(ctxt,errval);
// Compute the reconstructed value.
rx = Reconstruct(negative,px,errval);
// Update so that the next process gets the correct value.
UpdateContext(0,rx);
// And insert the value into the target line as well.
*lp = rx << preshift;
#ifdef DEBUG_LS
printf("%4d:<%2x> ",xpos++,*lp);
#endif
}
} while(++lp,--length);
} // No error handling here.
EndLine(0);
line = line->m_pNext;
} while(--lines);
//
// If this is the last line, gobble up all the
// bits from bitstuffing the last byte may have left.
// As SkipStuffing is idempotent, we can also do that
// all the time.
m_Stream.SkipStuffing();
#endif
return false;
}
| null | null | 195,741
|
246493896187105005444818505983819497172
| 150
|
Fixed handling of empty JPEG-LS scans.
|
other
|
gpac
|
37592ad86c6ca934d34740012213e467acc4a3b0
| 1
|
static GF_Err gf_isom_parse_movie_boxes_internal(GF_ISOFile *mov, u32 *boxType, u64 *bytesMissing, Bool progressive_mode)
{
GF_Box *a;
u64 totSize, mdat_end=0;
GF_Err e = GF_OK;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (mov->single_moof_mode && mov->single_moof_state == 2) {
return e;
}
/*restart from where we stopped last*/
totSize = mov->current_top_box_start;
if (mov->bytes_removed) {
assert(totSize >= mov->bytes_removed);
totSize -= mov->bytes_removed;
}
gf_bs_seek(mov->movieFileMap->bs, totSize);
#endif
/*while we have some data, parse our boxes*/
while (gf_bs_available(mov->movieFileMap->bs)) {
*bytesMissing = 0;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Parsing a top-level box at position %d\n", mov->current_top_box_start));
#endif
e = gf_isom_parse_root_box(&a, mov->movieFileMap->bs, boxType, bytesMissing, progressive_mode);
if (e >= 0) {
} else if (e == GF_ISOM_INCOMPLETE_FILE) {
/*our mdat is uncomplete, only valid for READ ONLY files...*/
if (mov->openMode != GF_ISOM_OPEN_READ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete MDAT while file is not read-only\n"));
return GF_ISOM_INVALID_FILE;
}
if ((mov->openMode == GF_ISOM_OPEN_READ) && !progressive_mode) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete file while reading for dump - aborting parsing\n"));
break;
}
return e;
} else {
return e;
}
switch (a->type) {
/*MOOV box*/
case GF_ISOM_BOX_TYPE_MOOV:
if (mov->moov) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate MOOV detected!\n"));
gf_isom_box_del(a);
return GF_ISOM_INVALID_FILE;
}
mov->moov = (GF_MovieBox *)a;
mov->original_moov_offset = mov->current_top_box_start;
/*set our pointer to the movie*/
mov->moov->mov = mov;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (mov->moov->mvex) mov->moov->mvex->mov = mov;
#ifdef GF_ENABLE_CTRN
if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) {
gf_isom_setup_traf_inheritance(mov);
}
#endif
#endif
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
totSize += a->size;
if (!mov->moov->mvhd) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n"));
return GF_ISOM_INVALID_FILE;
}
if (mov->meta) {
gf_isom_meta_restore_items_ref(mov, mov->meta);
}
//dump senc info in dump mode
if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) {
u32 k;
for (k=0; k<gf_list_count(mov->moov->trackList); k++) {
GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k);
if (trak->sample_encryption) {
e = senc_Parse(mov->movieFileMap->bs, trak, NULL, trak->sample_encryption);
if (e) return e;
}
}
} else {
u32 k;
for (k=0; k<gf_list_count(mov->moov->trackList); k++) {
GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k);
if (trak->Media->information->sampleTable->sampleGroups) {
convert_compact_sample_groups(trak->Media->information->sampleTable->child_boxes, trak->Media->information->sampleTable->sampleGroups);
}
}
}
if (mdat_end && mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) {
gf_isom_push_mdat_end(mov, mdat_end);
mdat_end=0;
}
break;
/*META box*/
case GF_ISOM_BOX_TYPE_META:
if (mov->meta) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate META detected!\n"));
gf_isom_box_del(a);
return GF_ISOM_INVALID_FILE;
}
mov->meta = (GF_MetaBox *)a;
mov->original_meta_offset = mov->current_top_box_start;
e = gf_list_add(mov->TopBoxes, a);
if (e) {
return e;
}
totSize += a->size;
gf_isom_meta_restore_items_ref(mov, mov->meta);
break;
/*we only keep the MDAT in READ for dump purposes*/
case GF_ISOM_BOX_TYPE_MDAT:
if (!mov->first_data_toplevel_offset) {
mov->first_data_toplevel_offset = mov->current_top_box_start;
mov->first_data_toplevel_size = a->size;
}
totSize += a->size;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (mov->emsgs) {
gf_isom_box_array_del(mov->emsgs);
mov->emsgs = NULL;
}
#endif
if (mov->openMode == GF_ISOM_OPEN_READ) {
if (!mov->mdat) {
mov->mdat = (GF_MediaDataBox *) a;
e = gf_list_add(mov->TopBoxes, mov->mdat);
if (e) {
return e;
}
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
else if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) gf_list_add(mov->TopBoxes, a);
#endif
else gf_isom_box_del(a); //in other modes we don't care
if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) {
mdat_end = gf_bs_get_position(mov->movieFileMap->bs);
if (mov->moov) {
gf_isom_push_mdat_end(mov, mdat_end);
mdat_end=0;
}
}
}
/*if we don't have any MDAT yet, create one (edit-write mode)
We only work with one mdat, but we're puting it at the place
of the first mdat found when opening a file for editing*/
else if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) {
gf_isom_box_del(a);
mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT);
if (!mov->mdat) return GF_OUT_OF_MEM;
e = gf_list_add(mov->TopBoxes, mov->mdat);
if (e) {
return e;
}
} else {
gf_isom_box_del(a);
}
break;
case GF_ISOM_BOX_TYPE_FTYP:
/*ONE AND ONLY ONE FTYP*/
if (mov->brand) {
gf_isom_box_del(a);
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'ftyp' detected!\n"));
return GF_ISOM_INVALID_FILE;
}
mov->brand = (GF_FileTypeBox *)a;
totSize += a->size;
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
break;
case GF_ISOM_BOX_TYPE_OTYP:
/*ONE AND ONLY ONE FTYP*/
if (mov->otyp) {
gf_isom_box_del(a);
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'otyp' detected!\n"));
return GF_ISOM_INVALID_FILE;
}
if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) {
mov->otyp = (GF_Box *)a;
totSize += a->size;
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
} else {
GF_FileTypeBox *brand = (GF_FileTypeBox *) gf_isom_box_find_child(a->child_boxes, GF_ISOM_BOX_TYPE_FTYP);
if (brand) {
s32 pos;
gf_list_del_item(a->child_boxes, brand);
pos = gf_list_del_item(mov->TopBoxes, mov->brand);
gf_isom_box_del((GF_Box *) mov->brand);
mov->brand = brand;
if (pos<0) pos=0;
gf_list_insert(mov->TopBoxes, brand, pos);
}
gf_isom_box_del(a);
}
break;
case GF_ISOM_BOX_TYPE_PDIN:
/*ONE AND ONLY ONE PDIN*/
if (mov->pdin) {
gf_isom_box_del(a);
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'pdin'' detected!\n"));
return GF_ISOM_INVALID_FILE;
}
mov->pdin = (GF_ProgressiveDownloadBox *) a;
totSize += a->size;
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
break;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
case GF_ISOM_BOX_TYPE_STYP:
{
u32 brand = ((GF_FileTypeBox *)a)->majorBrand;
switch (brand) {
case GF_ISOM_BRAND_SISX:
case GF_ISOM_BRAND_RISX:
case GF_ISOM_BRAND_SSSS:
mov->is_index_segment = GF_TRUE;
break;
default:
break;
}
}
/*fall-through*/
case GF_ISOM_BOX_TYPE_SIDX:
case GF_ISOM_BOX_TYPE_SSIX:
if (mov->moov && !mov->first_data_toplevel_offset) {
mov->first_data_toplevel_offset = mov->current_top_box_start;
mov->first_data_toplevel_size = a->size;
}
totSize += a->size;
if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) {
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
} else if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) && (mov->openMode!=GF_ISOM_OPEN_KEEP_FRAGMENTS)
) {
if (a->type==GF_ISOM_BOX_TYPE_SIDX) {
if (mov->root_sidx) gf_isom_box_del( (GF_Box *) mov->root_sidx);
mov->root_sidx = (GF_SegmentIndexBox *) a;
mov->sidx_start_offset = mov->current_top_box_start;
mov->sidx_end_offset = gf_bs_get_position(mov->movieFileMap->bs);
}
else if (a->type==GF_ISOM_BOX_TYPE_STYP) {
mov->styp_start_offset = mov->current_top_box_start;
if (mov->seg_styp) gf_isom_box_del(mov->seg_styp);
mov->seg_styp = a;
} else if (a->type==GF_ISOM_BOX_TYPE_SSIX) {
if (mov->seg_ssix) gf_isom_box_del(mov->seg_ssix);
mov->seg_ssix = a;
} else {
gf_isom_box_del(a);
}
gf_isom_push_mdat_end(mov, mov->current_top_box_start);
} else if (!mov->NextMoofNumber && (a->type==GF_ISOM_BOX_TYPE_SIDX)) {
if (mov->main_sidx) gf_isom_box_del( (GF_Box *) mov->main_sidx);
mov->main_sidx = (GF_SegmentIndexBox *) a;
mov->main_sidx_end_pos = mov->current_top_box_start + a->size;
} else {
gf_isom_box_del(a);
}
break;
case GF_ISOM_BOX_TYPE_MOOF:
//no support for inplace rewrite for fragmented files
gf_isom_disable_inplace_rewrite(mov);
if (!mov->moov) {
GF_LOG(mov->moof ? GF_LOG_DEBUG : GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie fragment but no moov (yet) - possibly broken parsing!\n"));
}
if (mov->single_moof_mode) {
mov->single_moof_state++;
if (mov->single_moof_state > 1) {
gf_isom_box_del(a);
return GF_OK;
}
}
((GF_MovieFragmentBox *)a)->mov = mov;
totSize += a->size;
mov->moof = (GF_MovieFragmentBox *) a;
/*some smooth streaming streams contain a SDTP under the TRAF: this is incorrect, convert it*/
FixTrackID(mov);
if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) {
FixSDTPInTRAF(mov->moof);
} else {
u32 k;
for (k=0; k<gf_list_count(mov->moof->TrackList); k++) {
GF_TrackFragmentBox *traf = (GF_TrackFragmentBox *)gf_list_get(mov->moof->TrackList, k);
if (traf->sampleGroups) {
convert_compact_sample_groups(traf->child_boxes, traf->sampleGroups);
}
}
}
/*read & debug: store at root level*/
if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) {
u32 k;
gf_list_add(mov->TopBoxes, a);
/*also update pointers to trex for debug*/
if (mov->moov) {
for (k=0; k<gf_list_count(mov->moof->TrackList); k++) {
GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k);
if (traf->tfhd && mov->moov->mvex && mov->moov->mvex->TrackExList) {
GF_TrackBox *trak = gf_isom_get_track_from_id(mov->moov, traf->tfhd->trackID);
u32 j=0;
while ((traf->trex = (GF_TrackExtendsBox*)gf_list_enum(mov->moov->mvex->TrackExList, &j))) {
if (traf->trex->trackID == traf->tfhd->trackID) {
if (!traf->trex->track) traf->trex->track = trak;
break;
}
traf->trex = NULL;
}
}
//we should only parse senc/psec when no saiz/saio is present, otherwise we fetch the info directly
if (traf->trex && traf->tfhd && traf->trex->track && traf->sample_encryption) {
GF_TrackBox *trak = GetTrackbyID(mov->moov, traf->tfhd->trackID);
if (trak) {
trak->current_traf_stsd_idx = traf->tfhd->sample_desc_index ? traf->tfhd->sample_desc_index : traf->trex->def_sample_desc_index;
e = senc_Parse(mov->movieFileMap->bs, trak, traf, traf->sample_encryption);
if (e) return e;
trak->current_traf_stsd_idx = 0;
}
}
}
} else {
for (k=0; k<gf_list_count(mov->moof->TrackList); k++) {
GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k);
if (traf->sample_encryption) {
e = senc_Parse(mov->movieFileMap->bs, NULL, traf, traf->sample_encryption);
if (e) return e;
}
}
}
} else if (mov->openMode==GF_ISOM_OPEN_KEEP_FRAGMENTS) {
mov->NextMoofNumber = mov->moof->mfhd->sequence_number+1;
mov->moof = NULL;
gf_isom_box_del(a);
} else {
/*merge all info*/
e = MergeFragment((GF_MovieFragmentBox *)a, mov);
gf_isom_box_del(a);
if (e) return e;
}
//done with moov
if (mov->root_sidx) {
gf_isom_box_del((GF_Box *) mov->root_sidx);
mov->root_sidx = NULL;
}
if (mov->root_ssix) {
gf_isom_box_del(mov->seg_ssix);
mov->root_ssix = NULL;
}
if (mov->seg_styp) {
gf_isom_box_del(mov->seg_styp);
mov->seg_styp = NULL;
}
mov->sidx_start_offset = 0;
mov->sidx_end_offset = 0;
mov->styp_start_offset = 0;
break;
#endif
case GF_ISOM_BOX_TYPE_UNKNOWN:
{
GF_UnknownBox *box = (GF_UnknownBox*)a;
if (box->original_4cc == GF_ISOM_BOX_TYPE_JP) {
u8 *c = (u8 *) box->data;
if ((box->dataSize==4) && (GF_4CC(c[0],c[1],c[2],c[3])==(u32)0x0D0A870A))
mov->is_jp2 = 1;
gf_isom_box_del(a);
} else {
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
}
}
break;
case GF_ISOM_BOX_TYPE_PRFT:
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (!(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) {
//keep the last one read
if (mov->last_producer_ref_time)
gf_isom_box_del(a);
else
mov->last_producer_ref_time = (GF_ProducerReferenceTimeBox *)a;
break;
}
#endif
//fallthrough
case GF_ISOM_BOX_TYPE_EMSG:
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) {
if (!mov->emsgs) mov->emsgs = gf_list_new();
gf_list_add(mov->emsgs, a);
break;
}
#endif
case GF_ISOM_BOX_TYPE_MFRA:
case GF_ISOM_BOX_TYPE_MFRO:
//only keep for dump mode, otherwise we ignore these boxes and we don't want to carry them over in non-fragmented file
if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) {
totSize += a->size;
gf_isom_box_del(a);
break;
}
default:
totSize += a->size;
e = gf_list_add(mov->TopBoxes, a);
if (e) return e;
break;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
/*remember where we left, in case we append an entire number of movie fragments*/
mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed;
#endif
}
/*we need at least moov or meta*/
if (!mov->moov && !mov->meta
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
&& !mov->moof && !mov->is_index_segment
#endif
) {
return GF_ISOM_INCOMPLETE_FILE;
}
/*we MUST have movie header*/
if (!gf_opts_get_bool("core", "no-check")) {
if (mov->moov && !mov->moov->mvhd) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MVHD in MOOV!\n"));
return GF_ISOM_INVALID_FILE;
}
/*we MUST have meta handler*/
if (mov->meta && !mov->meta->handler) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing handler in META!\n"));
return GF_ISOM_INVALID_FILE;
}
}
#ifndef GPAC_DISABLE_ISOM_WRITE
if (mov->moov) {
/*set the default interleaving time*/
mov->interleavingTime = mov->moov->mvhd->timeScale;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
/*in edit mode with successfully loaded fragments, delete all fragment signaling since
file is no longer fragmented*/
if ((mov->openMode > GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS) && mov->moov->mvex) {
gf_isom_box_del_parent(&mov->moov->child_boxes, (GF_Box *)mov->moov->mvex);
mov->moov->mvex = NULL;
}
#endif
}
//create a default mdat if none was found
if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) {
mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT);
if (!mov->mdat) return GF_OUT_OF_MEM;
e = gf_list_add(mov->TopBoxes, mov->mdat);
if (e) return e;
}
#endif /*GPAC_DISABLE_ISOM_WRITE*/
return GF_OK;
}
| null | null | 195,742
|
320809447116813169681723225620850413792
| 498
|
fixed #2163
|
other
|
tensorflow
|
02cc160e29d20631de3859c6653184e3f876b9d7
| 1
|
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
// Create a new SparseTensorSliceDatasetOp::Dataset, insert it in
// the step container, and return it as the output.
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
dense_shape->shape().DebugString()));
// We currently ensure that `sparse_tensor` is ordered in the
// batch dimension.
// TODO(mrry): Investigate ways to avoid this unconditional check
// if we can be sure that the sparse tensor was produced in an
// appropriate order (e.g. by `tf.parse_example()` or a Dataset
// that batches elements into rows of a SparseTensor).
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
sparse::SparseTensor tensor;
OP_REQUIRES_OK(
ctx, sparse::SparseTensor::Create(
*indices, *values, TensorShape(dense_shape->vec<int64>()),
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
| null | null | 195,752
|
171363723210961322415209119251133937799
| 47
|
Prevent nullptr deref in SparseTensorSliceDataset
The arguments must determine a valid sparse tensor. This means that when indices are empty then the values must be empty too (and the reverse).
Also added test, by modifying existing test with empty sparse tensor to now run with an invalid sparse tensor input.
PiperOrigin-RevId: 388562757
Change-Id: Id8b54cd7c2316025b4f9a77292c8fb5344d17609
|
other
|
tensorflow
|
8a84f7a2b5a2b27ecf88d25bad9ac777cd2f7992
| 1
|
void Compute(OpKernelContext* context) override {
// Only create one, if one does not exist already. Report status for all
// other exceptions. If one already exists, it unrefs the new one.
// An epsilon value of zero could cause performance issues and is therefore,
// disallowed.
const Tensor* epsilon_t;
OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t));
float epsilon = epsilon_t->scalar<float>()();
OP_REQUIRES(
context, epsilon > 0,
errors::InvalidArgument("An epsilon value of zero is not allowed."));
const Tensor* num_streams_t;
OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t));
int64_t num_streams = num_streams_t->scalar<int64>()();
auto result =
new QuantileStreamResource(epsilon, max_elements_, num_streams);
auto status = CreateResource(context, HandleFromInput(context, 0), result);
if (!status.ok() && status.code() != tensorflow::error::ALREADY_EXISTS) {
OP_REQUIRES(context, false, status);
}
}
| null | null | 195,768
|
89054104973263516632079780397171243314
| 23
|
Ensure num_streams >= 0 in tf.raw_ops.BoostedTreesCreateQuantileStreamResource
PiperOrigin-RevId: 387452765
Change-Id: I9990c760e177fabca6a3b9b4612ceeaeeba51495
|
other
|
curl
|
3a09fbb7f264c67c438d01a30669ce325aa508e2
| 1
|
CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
const char *enclist, int maybechunked)
{
struct SingleRequest *k = &data->req;
do {
const char *name;
size_t namelen;
/* Parse a single encoding name. */
while(ISSPACE(*enclist) || *enclist == ',')
enclist++;
name = enclist;
for(namelen = 0; *enclist && *enclist != ','; enclist++)
if(!ISSPACE(*enclist))
namelen = enclist - name + 1;
/* Special case: chunked encoding is handled at the reader level. */
if(maybechunked && namelen == 7 && strncasecompare(name, "chunked", 7)) {
k->chunk = TRUE; /* chunks coming our way. */
Curl_httpchunk_init(data); /* init our chunky engine. */
}
else if(namelen) {
const struct content_encoding *encoding = find_encoding(name, namelen);
struct contenc_writer *writer;
if(!k->writer_stack) {
k->writer_stack = new_unencoding_writer(data, &client_encoding, NULL);
if(!k->writer_stack)
return CURLE_OUT_OF_MEMORY;
}
if(!encoding)
encoding = &error_encoding; /* Defer error at stack use. */
/* Stack the unencoding stage. */
writer = new_unencoding_writer(data, encoding, k->writer_stack);
if(!writer)
return CURLE_OUT_OF_MEMORY;
k->writer_stack = writer;
}
} while(*enclist);
return CURLE_OK;
}
| null | null | 195,774
|
249376959292636853981047045335373252879
| 48
|
content_encoding: return error on too many compression steps
The max allowed steps is arbitrarily set to 5.
Bug: https://curl.se/docs/CVE-2022-32206.html
CVE-2022-32206
Reported-by: Harry Sintonen
Closes #9049
|
other
|
deark
|
62acb7753b0e3c0d3ab3c15057b0a65222313334
| 1
|
void fmtutil_macbitmap_read_pixmap_only_fields(deark *c, dbuf *f, struct fmtutil_macbitmap_info *bi,
i64 pos)
{
i64 pixmap_version;
i64 pack_size;
i64 plane_bytes;
i64 n;
de_dbg(c, "additional PixMap header fields, at %d", (int)pos);
de_dbg_indent(c, 1);
pixmap_version = dbuf_getu16be(f, pos+0);
de_dbg(c, "pixmap version: %d", (int)pixmap_version);
bi->packing_type = dbuf_getu16be(f, pos+2);
de_dbg(c, "packing type: %d", (int)bi->packing_type);
pack_size = dbuf_getu32be(f, pos+4);
de_dbg(c, "pixel data length: %d", (int)pack_size);
bi->hdpi = pict_read_fixed(f, pos+8);
bi->vdpi = pict_read_fixed(f, pos+12);
de_dbg(c, "dpi: %.2f"DE_CHAR_TIMES"%.2f", bi->hdpi, bi->vdpi);
bi->pixeltype = dbuf_getu16be(f, pos+16);
bi->pixelsize = dbuf_getu16be(f, pos+18);
bi->cmpcount = dbuf_getu16be(f, pos+20);
bi->cmpsize = dbuf_getu16be(f, pos+22);
de_dbg(c, "pixel type=%d, bits/pixel=%d, components/pixel=%d, bits/comp=%d",
(int)bi->pixeltype, (int)bi->pixelsize, (int)bi->cmpcount, (int)bi->cmpsize);
bi->pdwidth = (bi->rowbytes*8)/bi->pixelsize;
if(bi->pdwidth < bi->npwidth) {
bi->pdwidth = bi->npwidth;
}
plane_bytes = dbuf_getu32be(f, pos+24);
de_dbg(c, "plane bytes: %d", (int)plane_bytes);
bi->pmTable = (u32)dbuf_getu32be(f, pos+28);
de_dbg(c, "pmTable: 0x%08x", (unsigned int)bi->pmTable);
n = dbuf_getu32be(f, pos+32);
de_dbg(c, "pmReserved: 0x%08x", (unsigned int)n);
de_dbg_indent(c, -1);
}
| null | null | 195,800
|
91134474765832554215432155878217095059
| 47
|
pict,macrsrc: Fixed a bug that could cause division by 0
Found by F. Çelik.
|
other
|
php-src
|
0c8a2a2cd1056b7dc403eacb5d2c0eec6ce47c6f
| 1
|
*/
static void php_wddx_pop_element(void *user_data, const XML_Char *name)
{
st_entry *ent1, *ent2;
wddx_stack *stack = (wddx_stack *)user_data;
HashTable *target_hash;
zend_class_entry *pce;
zval obj;
/* OBJECTS_FIXME */
if (stack->top == 0) {
return;
}
if (!strcmp((char *)name, EL_STRING) || !strcmp((char *)name, EL_NUMBER) ||
!strcmp((char *)name, EL_BOOLEAN) || !strcmp((char *)name, EL_NULL) ||
!strcmp((char *)name, EL_ARRAY) || !strcmp((char *)name, EL_STRUCT) ||
!strcmp((char *)name, EL_RECORDSET) || !strcmp((char *)name, EL_BINARY) ||
!strcmp((char *)name, EL_DATETIME)) {
wddx_stack_top(stack, (void**)&ent1);
if (Z_TYPE(ent1->data) == IS_UNDEF) {
if (stack->top > 1) {
stack->top--;
} else {
stack->done = 1;
}
efree(ent1);
return;
}
if (!strcmp((char *)name, EL_BINARY)) {
zend_string *new_str = php_base64_decode(
(unsigned char *)Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zval_ptr_dtor(&ent1->data);
ZVAL_STR(&ent1->data, new_str);
}
/* Call __wakeup() method on the object. */
if (Z_TYPE(ent1->data) == IS_OBJECT) {
zval fname, retval;
ZVAL_STRING(&fname, "__wakeup");
call_user_function_ex(NULL, &ent1->data, &fname, &retval, 0, 0, 0, NULL);
zval_ptr_dtor(&fname);
zval_ptr_dtor(&retval);
}
if (stack->top > 1) {
stack->top--;
wddx_stack_top(stack, (void**)&ent2);
/* if non-existent field */
if (ent2->type == ST_FIELD && Z_ISUNDEF(ent2->data)) {
zval_ptr_dtor(&ent1->data);
efree(ent1);
return;
}
if (Z_TYPE(ent2->data) == IS_ARRAY || Z_TYPE(ent2->data) == IS_OBJECT) {
target_hash = HASH_OF(&ent2->data);
if (ent1->varname) {
if (!strcmp(ent1->varname, PHP_CLASS_NAME_VAR) &&
Z_TYPE(ent1->data) == IS_STRING && Z_STRLEN(ent1->data) &&
ent2->type == ST_STRUCT && Z_TYPE(ent2->data) == IS_ARRAY) {
zend_bool incomplete_class = 0;
zend_str_tolower(Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zend_string_forget_hash_val(Z_STR(ent1->data));
if ((pce = zend_hash_find_ptr(EG(class_table), Z_STR(ent1->data))) == NULL) {
incomplete_class = 1;
pce = PHP_IC_ENTRY;
}
/* Initialize target object */
object_init_ex(&obj, pce);
/* Merge current hashtable with object's default properties */
zend_hash_merge(Z_OBJPROP(obj),
Z_ARRVAL(ent2->data),
zval_add_ref, 0);
if (incomplete_class) {
php_store_class_name(&obj, Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
}
/* Clean up old array entry */
zval_ptr_dtor(&ent2->data);
/* Set stack entry to point to the newly created object */
ZVAL_COPY_VALUE(&ent2->data, &obj);
/* Clean up class name var entry */
zval_ptr_dtor(&ent1->data);
} else if (Z_TYPE(ent2->data) == IS_OBJECT) {
zend_class_entry *old_scope = EG(scope);
EG(scope) = Z_OBJCE(ent2->data);
add_property_zval(&ent2->data, ent1->varname, &ent1->data);
if Z_REFCOUNTED(ent1->data) Z_DELREF(ent1->data);
EG(scope) = old_scope;
} else {
zend_symtable_str_update(target_hash, ent1->varname, strlen(ent1->varname), &ent1->data);
}
efree(ent1->varname);
} else {
zend_hash_next_index_insert(target_hash, &ent1->data);
}
}
efree(ent1);
} else {
stack->done = 1;
}
} else if (!strcmp((char *)name, EL_VAR) && stack->varname) {
efree(stack->varname);
stack->varname = NULL;
} else if (!strcmp((char *)name, EL_FIELD)) {
st_entry *ent;
wddx_stack_top(stack, (void **)&ent);
efree(ent);
stack->top--;
}
| null | null | 195,801
|
281179289723329197214540489283702639538
| 125
|
Fix for bug #72790 and bug #72799
(cherry picked from commit a14fdb9746262549bbbb96abb87338bacd147e1b)
Conflicts:
ext/wddx/wddx.c
|
other
|
mongo
|
6518b22420c5bbd92c42caf907671c3a2b140bb6
| 1
|
DocumentSource::GetNextResult DocumentSourceUnionWith::doGetNext() {
if (!_pipeline) {
// We must have already been disposed, so we're finished.
return GetNextResult::makeEOF();
}
if (_executionState == ExecutionProgress::kIteratingSource) {
auto nextInput = pSource->getNext();
if (!nextInput.isEOF()) {
return nextInput;
}
_executionState = ExecutionProgress::kStartingSubPipeline;
// All documents from the base collection have been returned, switch to iterating the sub-
// pipeline by falling through below.
}
if (_executionState == ExecutionProgress::kStartingSubPipeline) {
auto serializedPipe = _pipeline->serializeToBson();
LOGV2_DEBUG(23869,
1,
"$unionWith attaching cursor to pipeline {pipeline}",
"pipeline"_attr = serializedPipe);
// $$SEARCH_META can be set during runtime earlier in the pipeline, and therefore must be
// copied to the subpipeline manually.
if (pExpCtx->variables.hasConstantValue(Variables::kSearchMetaId)) {
_pipeline->getContext()->variables.setReservedValue(
Variables::kSearchMetaId,
pExpCtx->variables.getValue(Variables::kSearchMetaId, Document()),
true);
}
try {
_pipeline =
pExpCtx->mongoProcessInterface->attachCursorSourceToPipeline(_pipeline.release());
_executionState = ExecutionProgress::kIteratingSubPipeline;
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& e) {
_pipeline = buildPipelineFromViewDefinition(
pExpCtx,
ExpressionContext::ResolvedNamespace{e->getNamespace(), e->getPipeline()},
serializedPipe);
LOGV2_DEBUG(4556300,
3,
"$unionWith found view definition. ns: {ns}, pipeline: {pipeline}. New "
"$unionWith sub-pipeline: {new_pipe}",
"ns"_attr = e->getNamespace(),
"pipeline"_attr = Value(e->getPipeline()),
"new_pipe"_attr = _pipeline->serializeToBson());
return doGetNext();
}
}
auto res = _pipeline->getNext();
if (res)
return std::move(*res);
// Record the plan summary stats after $unionWith operation is done.
recordPlanSummaryStats(*_pipeline);
_executionState = ExecutionProgress::kFinished;
return GetNextResult::makeEOF();
}
| null | null | 195,804
|
77189124809087292115725641020447994671
| 60
|
SERVER-58203 factor out logging statements into helper functions
|
other
|
binutils-gdb
|
e3e5ae049371a27fd1737aba946fe26d06e029b5
| 1
|
display_debug_names (struct dwarf_section *section, void *file)
{
unsigned char *hdrptr = section->start;
dwarf_vma unit_length;
unsigned char *unit_start;
const unsigned char *const section_end = section->start + section->size;
unsigned char *unit_end;
introduce (section, false);
load_debug_section_with_follow (str, file);
for (; hdrptr < section_end; hdrptr = unit_end)
{
unsigned int offset_size;
uint16_t dwarf_version, padding;
uint32_t comp_unit_count, local_type_unit_count, foreign_type_unit_count;
uint64_t bucket_count, name_count, abbrev_table_size;
uint32_t augmentation_string_size;
unsigned int i;
bool augmentation_printable;
const char *augmentation_string;
size_t total;
unit_start = hdrptr;
/* Get and check the length of the block. */
SAFE_BYTE_GET_AND_INC (unit_length, hdrptr, 4, section_end);
if (unit_length == 0xffffffff)
{
/* This section is 64-bit DWARF. */
SAFE_BYTE_GET_AND_INC (unit_length, hdrptr, 8, section_end);
offset_size = 8;
}
else
offset_size = 4;
if (unit_length > (size_t) (section_end - hdrptr)
|| unit_length < 2 + 2 + 4 * 7)
{
too_short:
warn (_("Debug info is corrupted, %s header at %#lx has length %s\n"),
section->name,
(unsigned long) (unit_start - section->start),
dwarf_vmatoa ("x", unit_length));
return 0;
}
unit_end = hdrptr + unit_length;
/* Get and check the version number. */
SAFE_BYTE_GET_AND_INC (dwarf_version, hdrptr, 2, unit_end);
printf (_("Version %ld\n"), (long) dwarf_version);
/* Prior versions did not exist, and future versions may not be
backwards compatible. */
if (dwarf_version != 5)
{
warn (_("Only DWARF version 5 .debug_names "
"is currently supported.\n"));
return 0;
}
SAFE_BYTE_GET_AND_INC (padding, hdrptr, 2, unit_end);
if (padding != 0)
warn (_("Padding field of .debug_names must be 0 (found 0x%x)\n"),
padding);
SAFE_BYTE_GET_AND_INC (comp_unit_count, hdrptr, 4, unit_end);
if (comp_unit_count == 0)
warn (_("Compilation unit count must be >= 1 in .debug_names\n"));
SAFE_BYTE_GET_AND_INC (local_type_unit_count, hdrptr, 4, unit_end);
SAFE_BYTE_GET_AND_INC (foreign_type_unit_count, hdrptr, 4, unit_end);
SAFE_BYTE_GET_AND_INC (bucket_count, hdrptr, 4, unit_end);
SAFE_BYTE_GET_AND_INC (name_count, hdrptr, 4, unit_end);
SAFE_BYTE_GET_AND_INC (abbrev_table_size, hdrptr, 4, unit_end);
SAFE_BYTE_GET_AND_INC (augmentation_string_size, hdrptr, 4, unit_end);
if (augmentation_string_size % 4 != 0)
{
warn (_("Augmentation string length %u must be rounded up "
"to a multiple of 4 in .debug_names.\n"),
augmentation_string_size);
augmentation_string_size += (-augmentation_string_size) & 3;
}
if (augmentation_string_size > (size_t) (unit_end - hdrptr))
goto too_short;
printf (_("Augmentation string:"));
augmentation_printable = true;
augmentation_string = (const char *) hdrptr;
for (i = 0; i < augmentation_string_size; i++)
{
unsigned char uc;
SAFE_BYTE_GET_AND_INC (uc, hdrptr, 1, unit_end);
printf (" %02x", uc);
if (uc != 0 && !ISPRINT (uc))
augmentation_printable = false;
}
if (augmentation_printable)
{
printf (" (\"");
for (i = 0;
i < augmentation_string_size && augmentation_string[i];
++i)
putchar (augmentation_string[i]);
printf ("\")");
}
putchar ('\n');
printf (_("CU table:\n"));
if (_mul_overflow (comp_unit_count, offset_size, &total)
|| total > (size_t) (unit_end - hdrptr))
goto too_short;
for (i = 0; i < comp_unit_count; i++)
{
uint64_t cu_offset;
SAFE_BYTE_GET_AND_INC (cu_offset, hdrptr, offset_size, unit_end);
printf (_("[%3u] 0x%lx\n"), i, (unsigned long) cu_offset);
}
putchar ('\n');
printf (_("TU table:\n"));
if (_mul_overflow (local_type_unit_count, offset_size, &total)
|| total > (size_t) (unit_end - hdrptr))
goto too_short;
for (i = 0; i < local_type_unit_count; i++)
{
uint64_t tu_offset;
SAFE_BYTE_GET_AND_INC (tu_offset, hdrptr, offset_size, unit_end);
printf (_("[%3u] 0x%lx\n"), i, (unsigned long) tu_offset);
}
putchar ('\n');
printf (_("Foreign TU table:\n"));
if (_mul_overflow (foreign_type_unit_count, 8, &total)
|| total > (size_t) (unit_end - hdrptr))
goto too_short;
for (i = 0; i < foreign_type_unit_count; i++)
{
uint64_t signature;
SAFE_BYTE_GET_AND_INC (signature, hdrptr, 8, unit_end);
printf (_("[%3u] "), i);
print_dwarf_vma (signature, 8);
putchar ('\n');
}
putchar ('\n');
uint64_t xtra = (bucket_count * sizeof (uint32_t)
+ name_count * (sizeof (uint32_t) + 2 * offset_size)
+ abbrev_table_size);
if (xtra > (size_t) (unit_end - hdrptr))
{
warn (_("Entry pool offset (0x%lx) exceeds unit size 0x%lx "
"for unit 0x%lx in the debug_names\n"),
(long) xtra,
(long) (unit_end - unit_start),
(long) (unit_start - section->start));
return 0;
}
const uint32_t *const hash_table_buckets = (uint32_t *) hdrptr;
hdrptr += bucket_count * sizeof (uint32_t);
const uint32_t *const hash_table_hashes = (uint32_t *) hdrptr;
hdrptr += name_count * sizeof (uint32_t);
unsigned char *const name_table_string_offsets = hdrptr;
hdrptr += name_count * offset_size;
unsigned char *const name_table_entry_offsets = hdrptr;
hdrptr += name_count * offset_size;
unsigned char *const abbrev_table = hdrptr;
hdrptr += abbrev_table_size;
const unsigned char *const abbrev_table_end = hdrptr;
unsigned char *const entry_pool = hdrptr;
size_t buckets_filled = 0;
size_t bucketi;
for (bucketi = 0; bucketi < bucket_count; bucketi++)
{
const uint32_t bucket = hash_table_buckets[bucketi];
if (bucket != 0)
++buckets_filled;
}
printf (ngettext ("Used %zu of %lu bucket.\n",
"Used %zu of %lu buckets.\n",
bucket_count),
buckets_filled, (unsigned long) bucket_count);
if (bucket_count != 0)
{
uint32_t hash_prev = 0;
size_t hash_clash_count = 0;
size_t longest_clash = 0;
size_t this_length = 0;
size_t hashi;
for (hashi = 0; hashi < name_count; hashi++)
{
const uint32_t hash_this = hash_table_hashes[hashi];
if (hashi > 0)
{
if (hash_prev % bucket_count == hash_this % bucket_count)
{
++hash_clash_count;
++this_length;
longest_clash = MAX (longest_clash, this_length);
}
else
this_length = 0;
}
hash_prev = hash_this;
}
printf (_("Out of %lu items there are %zu bucket clashes"
" (longest of %zu entries).\n"),
(unsigned long) name_count, hash_clash_count, longest_clash);
assert (name_count == buckets_filled + hash_clash_count);
}
struct abbrev_lookup_entry
{
dwarf_vma abbrev_tag;
unsigned char *abbrev_lookup_ptr;
};
struct abbrev_lookup_entry *abbrev_lookup = NULL;
size_t abbrev_lookup_used = 0;
size_t abbrev_lookup_allocated = 0;
unsigned char *abbrevptr = abbrev_table;
for (;;)
{
dwarf_vma abbrev_tag;
READ_ULEB (abbrev_tag, abbrevptr, abbrev_table_end);
if (abbrev_tag == 0)
break;
if (abbrev_lookup_used == abbrev_lookup_allocated)
{
abbrev_lookup_allocated = MAX (0x100,
abbrev_lookup_allocated * 2);
abbrev_lookup = xrealloc (abbrev_lookup,
(abbrev_lookup_allocated
* sizeof (*abbrev_lookup)));
}
assert (abbrev_lookup_used < abbrev_lookup_allocated);
struct abbrev_lookup_entry *entry;
for (entry = abbrev_lookup;
entry < abbrev_lookup + abbrev_lookup_used;
entry++)
if (entry->abbrev_tag == abbrev_tag)
{
warn (_("Duplicate abbreviation tag %lu "
"in unit 0x%lx in the debug_names\n"),
(long) abbrev_tag, (long) (unit_start - section->start));
break;
}
entry = &abbrev_lookup[abbrev_lookup_used++];
entry->abbrev_tag = abbrev_tag;
entry->abbrev_lookup_ptr = abbrevptr;
/* Skip DWARF tag. */
SKIP_ULEB (abbrevptr, abbrev_table_end);
for (;;)
{
dwarf_vma xindex, form;
READ_ULEB (xindex, abbrevptr, abbrev_table_end);
READ_ULEB (form, abbrevptr, abbrev_table_end);
if (xindex == 0 && form == 0)
break;
}
}
printf (_("\nSymbol table:\n"));
uint32_t namei;
for (namei = 0; namei < name_count; ++namei)
{
uint64_t string_offset, entry_offset;
unsigned char *p;
p = name_table_string_offsets + namei * offset_size;
SAFE_BYTE_GET (string_offset, p, offset_size, unit_end);
p = name_table_entry_offsets + namei * offset_size;
SAFE_BYTE_GET (entry_offset, p, offset_size, unit_end);
printf ("[%3u] #%08x %s:", namei, hash_table_hashes[namei],
fetch_indirect_string (string_offset));
unsigned char *entryptr = entry_pool + entry_offset;
/* We need to scan first whether there is a single or multiple
entries. TAGNO is -2 for the first entry, it is -1 for the
initial tag read of the second entry, then it becomes 0 for the
first entry for real printing etc. */
int tagno = -2;
/* Initialize it due to a false compiler warning. */
dwarf_vma second_abbrev_tag = -1;
for (;;)
{
dwarf_vma abbrev_tag;
dwarf_vma dwarf_tag;
const struct abbrev_lookup_entry *entry;
READ_ULEB (abbrev_tag, entryptr, unit_end);
if (tagno == -1)
{
second_abbrev_tag = abbrev_tag;
tagno = 0;
entryptr = entry_pool + entry_offset;
continue;
}
if (abbrev_tag == 0)
break;
if (tagno >= 0)
printf ("%s<%lu>",
(tagno == 0 && second_abbrev_tag == 0 ? " " : "\n\t"),
(unsigned long) abbrev_tag);
for (entry = abbrev_lookup;
entry < abbrev_lookup + abbrev_lookup_used;
entry++)
if (entry->abbrev_tag == abbrev_tag)
break;
if (entry >= abbrev_lookup + abbrev_lookup_used)
{
warn (_("Undefined abbreviation tag %lu "
"in unit 0x%lx in the debug_names\n"),
(long) abbrev_tag,
(long) (unit_start - section->start));
break;
}
abbrevptr = entry->abbrev_lookup_ptr;
READ_ULEB (dwarf_tag, abbrevptr, abbrev_table_end);
if (tagno >= 0)
printf (" %s", get_TAG_name (dwarf_tag));
for (;;)
{
dwarf_vma xindex, form;
READ_ULEB (xindex, abbrevptr, abbrev_table_end);
READ_ULEB (form, abbrevptr, abbrev_table_end);
if (xindex == 0 && form == 0)
break;
if (tagno >= 0)
printf (" %s", get_IDX_name (xindex));
entryptr = read_and_display_attr_value (0, form, 0,
unit_start, entryptr, unit_end,
0, 0, offset_size,
dwarf_version, NULL,
(tagno < 0), section,
NULL, '=', -1);
}
++tagno;
}
if (tagno <= 0)
printf (_(" <no entries>"));
putchar ('\n');
}
free (abbrev_lookup);
}
return 1;
}
| null | null | 195,824
|
45344829135475210734172934971132402350
| 372
|
Replace a run-time assertion failure with a warning message when parsing corrupt DWARF data.
PR 29289
* dwarf.c (display_debug_names): Replace assert with a warning
message.
|
other
|
linux
|
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
| 1
|
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 num_devices;
int ret = 0;
mutex_lock(&uuid_mutex);
num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT &&
strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = PTR_ERR(device);
goto out;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
btrfs_warn_in_rcu(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
rcu_str_deref(device->name), device->devid);
ret = -ETXTBSY;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&uuid_mutex);
ret = btrfs_shrink_device(device, 0);
if (!ret)
btrfs_reada_remove_dev(device);
mutex_lock(&uuid_mutex);
if (ret)
goto error_undo;
/*
* TODO: the superblock still includes this device in its num_devices
* counter although write_all_supers() is not locked out. This
* could give a filesystem state which requires a degraded mount.
*/
ret = btrfs_rm_dev_item(device);
if (ret)
goto error_undo;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers. Whoever is writing all supers, should
* lock the device list mutex before getting the number of
* devices in the super block (super_copy). Conversely,
* whoever updates the number of devices in the super block
* (super_copy) should hold the device list mutex.
*/
/*
* In normal cases the cur_devices == fs_devices. But in case
* of deleting a seed device, the cur_devices should point to
* its own fs_devices listed under the fs_devices->seed.
*/
cur_devices = device->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
cur_devices->num_devices--;
cur_devices->total_devices--;
/* Update total_devices of the parent fs_devices if it's seed */
if (cur_devices != fs_devices)
fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
}
num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* at this point, the device is zero sized and detached from
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(fs_info, device->bdev,
device->name->str);
btrfs_close_bdev(device);
synchronize_rcu();
btrfs_free_device(device);
if (cur_devices->open_devices == 0) {
list_del_init(&cur_devices->seed_list);
close_fs_devices(cur_devices);
free_fs_devices(cur_devices);
}
out:
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
btrfs_reada_undo_remove_dev(device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
device->fs_devices->rw_devices++;
mutex_unlock(&fs_info->chunk_mutex);
}
goto out;
}
| null | null | 195,908
|
319350513243799661932841427777686896592
| 148
|
btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: [email protected] # 5.4+
Reported-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Anand Jain <[email protected]>
Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
other
|
ImageMagick
|
d072ed6aff835c174e856ce3a428163c0da9e8f4
| 1
|
static Image *ReadOneMNGImage(MngInfo* mng_info, const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
page_geometry[MagickPathExtent];
Image
*image;
MagickBooleanType
logging;
volatile int
first_mng_object,
object_id,
term_chunk_found,
skip_to_iend;
volatile ssize_t
image_count=0;
MagickBooleanType
status;
MagickOffsetType
offset;
MngBox
default_fb,
fb,
previous_fb;
#if defined(MNG_INSERT_LAYERS)
PixelInfo
mng_background_color;
#endif
register unsigned char
*p;
register ssize_t
i;
size_t
count;
ssize_t
loop_level;
volatile short
skipping_loop;
#if defined(MNG_INSERT_LAYERS)
unsigned int
mandatory_back=0;
#endif
volatile unsigned int
#ifdef MNG_OBJECT_BUFFERS
mng_background_object=0,
#endif
mng_type=0; /* 0: PNG or JNG; 1: MNG; 2: MNG-LC; 3: MNG-VLC */
size_t
default_frame_timeout,
frame_timeout,
#if defined(MNG_INSERT_LAYERS)
image_height,
image_width,
#endif
length;
/* These delays are all measured in image ticks_per_second,
* not in MNG ticks_per_second
*/
volatile size_t
default_frame_delay,
final_delay,
final_image_delay,
frame_delay,
#if defined(MNG_INSERT_LAYERS)
insert_layers,
#endif
mng_iterations=1,
simplicity=0,
subframe_height=0,
subframe_width=0;
previous_fb.top=0;
previous_fb.bottom=0;
previous_fb.left=0;
previous_fb.right=0;
default_fb.top=0;
default_fb.bottom=0;
default_fb.left=0;
default_fb.right=0;
logging=LogMagickEvent(CoderEvent,GetMagickModule(),
" Enter ReadOneMNGImage()");
image=mng_info->image;
if (LocaleCompare(image_info->magick,"MNG") == 0)
{
char
magic_number[MagickPathExtent];
/* Verify MNG signature. */
count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number);
if (memcmp(magic_number,"\212MNG\r\n\032\n",8) != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/* Initialize some nonzero members of the MngInfo structure. */
for (i=0; i < MNG_MAX_OBJECTS; i++)
{
mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX;
mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX;
}
mng_info->exists[0]=MagickTrue;
}
skipping_loop=(-1);
first_mng_object=MagickTrue;
mng_type=0;
#if defined(MNG_INSERT_LAYERS)
insert_layers=MagickFalse; /* should be False during convert or mogrify */
#endif
default_frame_delay=0;
default_frame_timeout=0;
frame_delay=0;
final_delay=1;
mng_info->ticks_per_second=1UL*image->ticks_per_second;
object_id=0;
skip_to_iend=MagickFalse;
term_chunk_found=MagickFalse;
mng_info->framing_mode=1;
#if defined(MNG_INSERT_LAYERS)
mandatory_back=MagickFalse;
#endif
#if defined(MNG_INSERT_LAYERS)
mng_background_color=image->background_color;
#endif
default_fb=mng_info->frame;
previous_fb=mng_info->frame;
do
{
char
type[MagickPathExtent];
if (LocaleCompare(image_info->magick,"MNG") == 0)
{
unsigned char
*chunk;
/*
Read a new chunk.
*/
type[0]='\0';
(void) ConcatenateMagickString(type,"errr",MagickPathExtent);
length=ReadBlobMSBLong(image);
count=(size_t) ReadBlob(image,4,(unsigned char *) type);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading MNG chunk type %c%c%c%c, length: %.20g",
type[0],type[1],type[2],type[3],(double) length);
if (length > PNG_UINT_31_MAX)
{
status=MagickFalse;
break;
}
if (count == 0)
ThrowReaderException(CorruptImageError,"CorruptImage");
p=NULL;
chunk=(unsigned char *) NULL;
if (length != 0)
{
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
chunk=(unsigned char *) AcquireQuantumMemory(length+
MagickPathExtent,sizeof(*chunk));
if (chunk == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) length; i++)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
chunk[i]=(unsigned char) c;
}
p=chunk;
}
(void) ReadBlobMSBLong(image); /* read crc word */
#if !defined(JNG_SUPPORTED)
if (memcmp(type,mng_JHDR,4) == 0)
{
skip_to_iend=MagickTrue;
if (mng_info->jhdr_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"JNGCompressNotSupported","`%s'",image->filename);
mng_info->jhdr_warning++;
}
#endif
if (memcmp(type,mng_DHDR,4) == 0)
{
skip_to_iend=MagickTrue;
if (mng_info->dhdr_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"DeltaPNGNotSupported","`%s'",image->filename);
mng_info->dhdr_warning++;
}
if (memcmp(type,mng_MEND,4) == 0)
break;
if (skip_to_iend)
{
if (memcmp(type,mng_IEND,4) == 0)
skip_to_iend=MagickFalse;
if (length != 0)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Skip to IEND.");
continue;
}
if (memcmp(type,mng_MHDR,4) == 0)
{
if (length != 28)
{
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
mng_info->mng_width=(size_t) ((p[0] << 24) | (p[1] << 16) |
(p[2] << 8) | p[3]);
mng_info->mng_height=(size_t) ((p[4] << 24) | (p[5] << 16) |
(p[6] << 8) | p[7]);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" MNG width: %.20g",(double) mng_info->mng_width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" MNG height: %.20g",(double) mng_info->mng_height);
}
p+=8;
mng_info->ticks_per_second=(size_t) mng_get_long(p);
if (mng_info->ticks_per_second == 0)
default_frame_delay=0;
else
default_frame_delay=1UL*image->ticks_per_second/
mng_info->ticks_per_second;
frame_delay=default_frame_delay;
simplicity=0;
p+=16;
simplicity=(size_t) mng_get_long(p);
mng_type=1; /* Full MNG */
if ((simplicity != 0) && ((simplicity | 11) == 11))
mng_type=2; /* LC */
if ((simplicity != 0) && ((simplicity | 9) == 9))
mng_type=3; /* VLC */
#if defined(MNG_INSERT_LAYERS)
if (mng_type != 3)
insert_layers=MagickTrue;
#endif
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return((Image *) NULL);
image=SyncNextImageInList(image);
mng_info->image=image;
}
if ((mng_info->mng_width > 65535L) ||
(mng_info->mng_height > 65535L))
{
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit");
}
(void) FormatLocaleString(page_geometry,MagickPathExtent,
"%.20gx%.20g+0+0",(double) mng_info->mng_width,(double)
mng_info->mng_height);
mng_info->frame.left=0;
mng_info->frame.right=(ssize_t) mng_info->mng_width;
mng_info->frame.top=0;
mng_info->frame.bottom=(ssize_t) mng_info->mng_height;
mng_info->clip=default_fb=previous_fb=mng_info->frame;
for (i=0; i < MNG_MAX_OBJECTS; i++)
mng_info->object_clip[i]=mng_info->frame;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_TERM,4) == 0)
{
int
repeat=0;
if (length != 0)
repeat=p[0];
if (repeat == 3)
{
final_delay=(png_uint_32) mng_get_long(&p[2]);
mng_iterations=(png_uint_32) mng_get_long(&p[6]);
if (mng_iterations == PNG_UINT_31_MAX)
mng_iterations=0;
image->iterations=mng_iterations;
term_chunk_found=MagickTrue;
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" repeat=%d, final_delay=%.20g, iterations=%.20g",
repeat,(double) final_delay, (double) image->iterations);
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_DEFI,4) == 0)
{
if (mng_type == 3)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"DEFI chunk found in MNG-VLC datastream","`%s'",
image->filename);
if (length < 2)
{
if (chunk)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
object_id=(p[0] << 8) | p[1];
if (mng_type == 2 && object_id != 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"Nonzero object_id in MNG-LC datastream","`%s'",
image->filename);
if (object_id > MNG_MAX_OBJECTS)
{
/*
Instead of using a warning we should allocate a larger
MngInfo structure and continue.
*/
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"object id too large","`%s'",image->filename);
object_id=MNG_MAX_OBJECTS;
}
if (mng_info->exists[object_id])
if (mng_info->frozen[object_id])
{
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
(void) ThrowMagickException(exception,
GetMagickModule(),CoderError,
"DEFI cannot redefine a frozen MNG object","`%s'",
image->filename);
continue;
}
mng_info->exists[object_id]=MagickTrue;
if (length > 2)
mng_info->invisible[object_id]=p[2];
/*
Extract object offset info.
*/
if (length > 11)
{
mng_info->x_off[object_id]=(ssize_t) ((p[4] << 24) |
(p[5] << 16) | (p[6] << 8) | p[7]);
mng_info->y_off[object_id]=(ssize_t) ((p[8] << 24) |
(p[9] << 16) | (p[10] << 8) | p[11]);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" x_off[%d]: %.20g, y_off[%d]: %.20g",
object_id,(double) mng_info->x_off[object_id],
object_id,(double) mng_info->y_off[object_id]);
}
}
/*
Extract object clipping info.
*/
if (length > 27)
mng_info->object_clip[object_id]=mng_read_box(mng_info->frame,0,
&p[12]);
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_bKGD,4) == 0)
{
mng_info->have_global_bkgd=MagickFalse;
if (length > 5)
{
mng_info->mng_global_bkgd.red=
ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1]));
mng_info->mng_global_bkgd.green=
ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3]));
mng_info->mng_global_bkgd.blue=
ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5]));
mng_info->have_global_bkgd=MagickTrue;
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_BACK,4) == 0)
{
#if defined(MNG_INSERT_LAYERS)
if (length > 6)
mandatory_back=p[6];
else
mandatory_back=0;
if (mandatory_back && length > 5)
{
mng_background_color.red=
ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1]));
mng_background_color.green=
ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3]));
mng_background_color.blue=
ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5]));
mng_background_color.alpha=OpaqueAlpha;
}
#ifdef MNG_OBJECT_BUFFERS
if (length > 8)
mng_background_object=(p[7] << 8) | p[8];
#endif
#endif
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_PLTE,4) == 0)
{
/* Read global PLTE. */
if (length && (length < 769))
{
if (mng_info->global_plte == (png_colorp) NULL)
mng_info->global_plte=(png_colorp) AcquireQuantumMemory(256,
sizeof(*mng_info->global_plte));
for (i=0; i < (ssize_t) (length/3); i++)
{
mng_info->global_plte[i].red=p[3*i];
mng_info->global_plte[i].green=p[3*i+1];
mng_info->global_plte[i].blue=p[3*i+2];
}
mng_info->global_plte_length=(unsigned int) (length/3);
}
#ifdef MNG_LOOSE
for ( ; i < 256; i++)
{
mng_info->global_plte[i].red=i;
mng_info->global_plte[i].green=i;
mng_info->global_plte[i].blue=i;
}
if (length != 0)
mng_info->global_plte_length=256;
#endif
else
mng_info->global_plte_length=0;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_tRNS,4) == 0)
{
/* read global tRNS */
if (length > 0 && length < 257)
for (i=0; i < (ssize_t) length; i++)
mng_info->global_trns[i]=p[i];
#ifdef MNG_LOOSE
for ( ; i < 256; i++)
mng_info->global_trns[i]=255;
#endif
mng_info->global_trns_length=(unsigned int) length;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_gAMA,4) == 0)
{
if (length == 4)
{
ssize_t
igamma;
igamma=mng_get_long(p);
mng_info->global_gamma=((float) igamma)*0.00001;
mng_info->have_global_gama=MagickTrue;
}
else
mng_info->have_global_gama=MagickFalse;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_cHRM,4) == 0)
{
/* Read global cHRM */
if (length == 32)
{
mng_info->global_chrm.white_point.x=0.00001*mng_get_long(p);
mng_info->global_chrm.white_point.y=0.00001*mng_get_long(&p[4]);
mng_info->global_chrm.red_primary.x=0.00001*mng_get_long(&p[8]);
mng_info->global_chrm.red_primary.y=0.00001*
mng_get_long(&p[12]);
mng_info->global_chrm.green_primary.x=0.00001*
mng_get_long(&p[16]);
mng_info->global_chrm.green_primary.y=0.00001*
mng_get_long(&p[20]);
mng_info->global_chrm.blue_primary.x=0.00001*
mng_get_long(&p[24]);
mng_info->global_chrm.blue_primary.y=0.00001*
mng_get_long(&p[28]);
mng_info->have_global_chrm=MagickTrue;
}
else
mng_info->have_global_chrm=MagickFalse;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_sRGB,4) == 0)
{
/*
Read global sRGB.
*/
if (length != 0)
{
mng_info->global_srgb_intent=
Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]);
mng_info->have_global_srgb=MagickTrue;
}
else
mng_info->have_global_srgb=MagickFalse;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_iCCP,4) == 0)
{
/* To do: */
/*
Read global iCCP.
*/
if (length != 0)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_FRAM,4) == 0)
{
if (mng_type == 3)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"FRAM chunk found in MNG-VLC datastream","`%s'",
image->filename);
if ((mng_info->framing_mode == 2) || (mng_info->framing_mode == 4))
image->delay=frame_delay;
frame_delay=default_frame_delay;
frame_timeout=default_frame_timeout;
fb=default_fb;
if (length != 0)
if (p[0])
mng_info->framing_mode=p[0];
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Framing_mode=%d",mng_info->framing_mode);
if (length > 6)
{
/* Note the delay and frame clipping boundaries. */
p++; /* framing mode */
while (*p && ((p-chunk) < (ssize_t) length))
p++; /* frame name */
p++; /* frame name terminator */
if ((p-chunk) < (ssize_t) (length-4))
{
int
change_delay,
change_timeout,
change_clipping;
change_delay=(*p++);
change_timeout=(*p++);
change_clipping=(*p++);
p++; /* change_sync */
if (change_delay)
{
frame_delay=1UL*image->ticks_per_second*
mng_get_long(p);
if (mng_info->ticks_per_second != 0)
frame_delay/=mng_info->ticks_per_second;
else
frame_delay=PNG_UINT_31_MAX;
if (change_delay == 2)
default_frame_delay=frame_delay;
p+=4;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Framing_delay=%.20g",(double) frame_delay);
}
if (change_timeout)
{
frame_timeout=1UL*image->ticks_per_second*
mng_get_long(p);
if (mng_info->ticks_per_second != 0)
frame_timeout/=mng_info->ticks_per_second;
else
frame_timeout=PNG_UINT_31_MAX;
if (change_timeout == 2)
default_frame_timeout=frame_timeout;
p+=4;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Framing_timeout=%.20g",(double) frame_timeout);
}
if (change_clipping)
{
fb=mng_read_box(previous_fb,(char) p[0],&p[1]);
p+=17;
previous_fb=fb;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Frame_clip: L=%.20g R=%.20g T=%.20g B=%.20g",
(double) fb.left,(double) fb.right,(double) fb.top,
(double) fb.bottom);
if (change_clipping == 2)
default_fb=fb;
}
}
}
mng_info->clip=fb;
mng_info->clip=mng_minimum_box(fb,mng_info->frame);
subframe_width=(size_t) (mng_info->clip.right
-mng_info->clip.left);
subframe_height=(size_t) (mng_info->clip.bottom
-mng_info->clip.top);
/*
Insert a background layer behind the frame if framing_mode is 4.
*/
#if defined(MNG_INSERT_LAYERS)
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" subframe_width=%.20g, subframe_height=%.20g",(double)
subframe_width,(double) subframe_height);
if (insert_layers && (mng_info->framing_mode == 4) &&
(subframe_width) && (subframe_height))
{
/* Allocate next image structure. */
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
mng_info->image=image;
if (term_chunk_found)
{
image->start_loop=MagickTrue;
image->iterations=mng_iterations;
term_chunk_found=MagickFalse;
}
else
image->start_loop=MagickFalse;
image->columns=subframe_width;
image->rows=subframe_height;
image->page.width=subframe_width;
image->page.height=subframe_height;
image->page.x=mng_info->clip.left;
image->page.y=mng_info->clip.top;
image->background_color=mng_background_color;
image->alpha_trait=UndefinedPixelTrait;
image->delay=0;
(void) SetImageBackgroundColor(image,exception);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Insert backgd layer, L=%.20g, R=%.20g T=%.20g, B=%.20g",
(double) mng_info->clip.left,
(double) mng_info->clip.right,
(double) mng_info->clip.top,
(double) mng_info->clip.bottom);
}
#endif
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_CLIP,4) == 0)
{
unsigned int
first_object,
last_object;
/*
Read CLIP.
*/
if (length > 3)
{
first_object=(p[0] << 8) | p[1];
last_object=(p[2] << 8) | p[3];
p+=4;
for (i=(int) first_object; i <= (int) last_object; i++)
{
if (mng_info->exists[i] && !mng_info->frozen[i])
{
MngBox
box;
box=mng_info->object_clip[i];
if ((p-chunk) < (ssize_t) (length-17))
mng_info->object_clip[i]=
mng_read_box(box,(char) p[0],&p[1]);
}
}
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_SAVE,4) == 0)
{
for (i=1; i < MNG_MAX_OBJECTS; i++)
if (mng_info->exists[i])
{
mng_info->frozen[i]=MagickTrue;
#ifdef MNG_OBJECT_BUFFERS
if (mng_info->ob[i] != (MngBuffer *) NULL)
mng_info->ob[i]->frozen=MagickTrue;
#endif
}
if (length != 0)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if ((memcmp(type,mng_DISC,4) == 0) || (memcmp(type,mng_SEEK,4) == 0))
{
/* Read DISC or SEEK. */
if ((length == 0) || !memcmp(type,mng_SEEK,4))
{
for (i=1; i < MNG_MAX_OBJECTS; i++)
MngInfoDiscardObject(mng_info,i);
}
else
{
register ssize_t
j;
for (j=1; j < (ssize_t) length; j+=2)
{
i=p[j-1] << 8 | p[j];
MngInfoDiscardObject(mng_info,i);
}
}
if (length != 0)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_MOVE,4) == 0)
{
size_t
first_object,
last_object;
/* read MOVE */
if (length > 3)
{
first_object=(p[0] << 8) | p[1];
last_object=(p[2] << 8) | p[3];
p+=4;
for (i=(ssize_t) first_object; i <= (ssize_t) last_object; i++)
{
if ((i < 0) || (i >= MNG_MAX_OBJECTS))
continue;
if (mng_info->exists[i] && !mng_info->frozen[i] &&
(p-chunk) < (ssize_t) (length-8))
{
MngPair
new_pair;
MngPair
old_pair;
old_pair.a=mng_info->x_off[i];
old_pair.b=mng_info->y_off[i];
new_pair=mng_read_pair(old_pair,(int) p[0],&p[1]);
mng_info->x_off[i]=new_pair.a;
mng_info->y_off[i]=new_pair.b;
}
}
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_LOOP,4) == 0)
{
ssize_t loop_iters=1;
if (length > 4)
{
loop_level=chunk[0];
mng_info->loop_active[loop_level]=1; /* mark loop active */
/* Record starting point. */
loop_iters=mng_get_long(&chunk[1]);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" LOOP level %.20g has %.20g iterations ",
(double) loop_level, (double) loop_iters);
if (loop_iters == 0)
skipping_loop=loop_level;
else
{
mng_info->loop_jump[loop_level]=TellBlob(image);
mng_info->loop_count[loop_level]=loop_iters;
}
mng_info->loop_iteration[loop_level]=0;
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_ENDL,4) == 0)
{
if (length > 0)
{
loop_level=chunk[0];
if (skipping_loop > 0)
{
if (skipping_loop == loop_level)
{
/*
Found end of zero-iteration loop.
*/
skipping_loop=(-1);
mng_info->loop_active[loop_level]=0;
}
}
else
{
if (mng_info->loop_active[loop_level] == 1)
{
mng_info->loop_count[loop_level]--;
mng_info->loop_iteration[loop_level]++;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ENDL: LOOP level %.20g has %.20g remaining iters",
(double) loop_level,(double)
mng_info->loop_count[loop_level]);
if (mng_info->loop_count[loop_level] != 0)
{
offset=
SeekBlob(image,mng_info->loop_jump[loop_level],
SEEK_SET);
if (offset < 0)
{
chunk=(unsigned char *) RelinquishMagickMemory(
chunk);
ThrowReaderException(CorruptImageError,
"ImproperImageHeader");
}
}
else
{
short
last_level;
/*
Finished loop.
*/
mng_info->loop_active[loop_level]=0;
last_level=(-1);
for (i=0; i < loop_level; i++)
if (mng_info->loop_active[i] == 1)
last_level=(short) i;
loop_level=last_level;
}
}
}
}
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_CLON,4) == 0)
{
if (mng_info->clon_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"CLON is not implemented yet","`%s'",
image->filename);
mng_info->clon_warning++;
}
if (memcmp(type,mng_MAGN,4) == 0)
{
png_uint_16
magn_first,
magn_last,
magn_mb,
magn_ml,
magn_mr,
magn_mt,
magn_mx,
magn_my,
magn_methx,
magn_methy;
if (length > 1)
magn_first=(p[0] << 8) | p[1];
else
magn_first=0;
if (length > 3)
magn_last=(p[2] << 8) | p[3];
else
magn_last=magn_first;
#ifndef MNG_OBJECT_BUFFERS
if (magn_first || magn_last)
if (mng_info->magn_warning == 0)
{
(void) ThrowMagickException(exception,
GetMagickModule(),CoderError,
"MAGN is not implemented yet for nonzero objects",
"`%s'",image->filename);
mng_info->magn_warning++;
}
#endif
if (length > 4)
magn_methx=p[4];
else
magn_methx=0;
if (length > 6)
magn_mx=(p[5] << 8) | p[6];
else
magn_mx=1;
if (magn_mx == 0)
magn_mx=1;
if (length > 8)
magn_my=(p[7] << 8) | p[8];
else
magn_my=magn_mx;
if (magn_my == 0)
magn_my=1;
if (length > 10)
magn_ml=(p[9] << 8) | p[10];
else
magn_ml=magn_mx;
if (magn_ml == 0)
magn_ml=1;
if (length > 12)
magn_mr=(p[11] << 8) | p[12];
else
magn_mr=magn_mx;
if (magn_mr == 0)
magn_mr=1;
if (length > 14)
magn_mt=(p[13] << 8) | p[14];
else
magn_mt=magn_my;
if (magn_mt == 0)
magn_mt=1;
if (length > 16)
magn_mb=(p[15] << 8) | p[16];
else
magn_mb=magn_my;
if (magn_mb == 0)
magn_mb=1;
if (length > 17)
magn_methy=p[17];
else
magn_methy=magn_methx;
if (magn_methx > 5 || magn_methy > 5)
if (mng_info->magn_warning == 0)
{
(void) ThrowMagickException(exception,
GetMagickModule(),CoderError,
"Unknown MAGN method in MNG datastream","`%s'",
image->filename);
mng_info->magn_warning++;
}
#ifdef MNG_OBJECT_BUFFERS
/* Magnify existing objects in the range magn_first to magn_last */
#endif
if (magn_first == 0 || magn_last == 0)
{
/* Save the magnification factors for object 0 */
mng_info->magn_mb=magn_mb;
mng_info->magn_ml=magn_ml;
mng_info->magn_mr=magn_mr;
mng_info->magn_mt=magn_mt;
mng_info->magn_mx=magn_mx;
mng_info->magn_my=magn_my;
mng_info->magn_methx=magn_methx;
mng_info->magn_methy=magn_methy;
}
}
if (memcmp(type,mng_PAST,4) == 0)
{
if (mng_info->past_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"PAST is not implemented yet","`%s'",
image->filename);
mng_info->past_warning++;
}
if (memcmp(type,mng_SHOW,4) == 0)
{
if (mng_info->show_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"SHOW is not implemented yet","`%s'",
image->filename);
mng_info->show_warning++;
}
if (memcmp(type,mng_sBIT,4) == 0)
{
if (length < 4)
mng_info->have_global_sbit=MagickFalse;
else
{
mng_info->global_sbit.gray=p[0];
mng_info->global_sbit.red=p[0];
mng_info->global_sbit.green=p[1];
mng_info->global_sbit.blue=p[2];
mng_info->global_sbit.alpha=p[3];
mng_info->have_global_sbit=MagickTrue;
}
}
if (memcmp(type,mng_pHYs,4) == 0)
{
if (length > 8)
{
mng_info->global_x_pixels_per_unit=
(size_t) mng_get_long(p);
mng_info->global_y_pixels_per_unit=
(size_t) mng_get_long(&p[4]);
mng_info->global_phys_unit_type=p[8];
mng_info->have_global_phys=MagickTrue;
}
else
mng_info->have_global_phys=MagickFalse;
}
if (memcmp(type,mng_pHYg,4) == 0)
{
if (mng_info->phyg_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"pHYg is not implemented.","`%s'",image->filename);
mng_info->phyg_warning++;
}
if (memcmp(type,mng_BASI,4) == 0)
{
skip_to_iend=MagickTrue;
if (mng_info->basi_warning == 0)
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"BASI is not implemented yet","`%s'",
image->filename);
mng_info->basi_warning++;
#ifdef MNG_BASI_SUPPORTED
basi_width=(size_t) ((p[0] << 24) | (p[1] << 16) |
(p[2] << 8) | p[3]);
basi_height=(size_t) ((p[4] << 24) | (p[5] << 16) |
(p[6] << 8) | p[7]);
basi_color_type=p[8];
basi_compression_method=p[9];
basi_filter_type=p[10];
basi_interlace_method=p[11];
if (length > 11)
basi_red=(p[12] << 8) & p[13];
else
basi_red=0;
if (length > 13)
basi_green=(p[14] << 8) & p[15];
else
basi_green=0;
if (length > 15)
basi_blue=(p[16] << 8) & p[17];
else
basi_blue=0;
if (length > 17)
basi_alpha=(p[18] << 8) & p[19];
else
{
if (basi_sample_depth == 16)
basi_alpha=65535L;
else
basi_alpha=255;
}
if (length > 19)
basi_viewable=p[20];
else
basi_viewable=0;
#endif
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
if (memcmp(type,mng_IHDR,4)
#if defined(JNG_SUPPORTED)
&& memcmp(type,mng_JHDR,4)
#endif
)
{
/* Not an IHDR or JHDR chunk */
if (length != 0)
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
/* Process IHDR */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Processing %c%c%c%c chunk",type[0],type[1],type[2],type[3]);
mng_info->exists[object_id]=MagickTrue;
mng_info->viewable[object_id]=MagickTrue;
if (mng_info->invisible[object_id])
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Skipping invisible object");
skip_to_iend=MagickTrue;
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
continue;
}
#if defined(MNG_INSERT_LAYERS)
if (length < 8)
{
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
image_width=(size_t) mng_get_long(p);
image_height=(size_t) mng_get_long(&p[4]);
#endif
chunk=(unsigned char *) RelinquishMagickMemory(chunk);
/*
Insert a transparent background layer behind the entire animation
if it is not full screen.
*/
#if defined(MNG_INSERT_LAYERS)
if (insert_layers && mng_type && first_mng_object)
{
if ((mng_info->clip.left > 0) || (mng_info->clip.top > 0) ||
(image_width < mng_info->mng_width) ||
(mng_info->clip.right < (ssize_t) mng_info->mng_width) ||
(image_height < mng_info->mng_height) ||
(mng_info->clip.bottom < (ssize_t) mng_info->mng_height))
{
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
mng_info->image=image;
if (term_chunk_found)
{
image->start_loop=MagickTrue;
image->iterations=mng_iterations;
term_chunk_found=MagickFalse;
}
else
image->start_loop=MagickFalse;
/* Make a background rectangle. */
image->delay=0;
image->columns=mng_info->mng_width;
image->rows=mng_info->mng_height;
image->page.width=mng_info->mng_width;
image->page.height=mng_info->mng_height;
image->page.x=0;
image->page.y=0;
image->background_color=mng_background_color;
(void) SetImageBackgroundColor(image,exception);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Inserted transparent background layer, W=%.20g, H=%.20g",
(double) mng_info->mng_width,(double) mng_info->mng_height);
}
}
/*
Insert a background layer behind the upcoming image if
framing_mode is 3, and we haven't already inserted one.
*/
if (insert_layers && (mng_info->framing_mode == 3) &&
(subframe_width) && (subframe_height) && (simplicity == 0 ||
(simplicity & 0x08)))
{
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
mng_info->image=image;
if (term_chunk_found)
{
image->start_loop=MagickTrue;
image->iterations=mng_iterations;
term_chunk_found=MagickFalse;
}
else
image->start_loop=MagickFalse;
image->delay=0;
image->columns=subframe_width;
image->rows=subframe_height;
image->page.width=subframe_width;
image->page.height=subframe_height;
image->page.x=mng_info->clip.left;
image->page.y=mng_info->clip.top;
image->background_color=mng_background_color;
image->alpha_trait=UndefinedPixelTrait;
(void) SetImageBackgroundColor(image,exception);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Insert background layer, L=%.20g, R=%.20g T=%.20g, B=%.20g",
(double) mng_info->clip.left,(double) mng_info->clip.right,
(double) mng_info->clip.top,(double) mng_info->clip.bottom);
}
#endif /* MNG_INSERT_LAYERS */
first_mng_object=MagickFalse;
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
mng_info->image=image;
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
if (term_chunk_found)
{
image->start_loop=MagickTrue;
term_chunk_found=MagickFalse;
}
else
image->start_loop=MagickFalse;
if (mng_info->framing_mode == 1 || mng_info->framing_mode == 3)
{
image->delay=frame_delay;
frame_delay=default_frame_delay;
}
else
image->delay=0;
image->page.width=mng_info->mng_width;
image->page.height=mng_info->mng_height;
image->page.x=mng_info->x_off[object_id];
image->page.y=mng_info->y_off[object_id];
image->iterations=mng_iterations;
/*
Seek back to the beginning of the IHDR or JHDR chunk's length field.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Seeking back to beginning of %c%c%c%c chunk",type[0],type[1],
type[2],type[3]);
offset=SeekBlob(image,-((ssize_t) length+12),SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
mng_info->image=image;
mng_info->mng_type=mng_type;
mng_info->object_id=object_id;
if (memcmp(type,mng_IHDR,4) == 0)
image=ReadOnePNGImage(mng_info,image_info,exception);
#if defined(JNG_SUPPORTED)
else
image=ReadOneJNGImage(mng_info,image_info,exception);
#endif
if (image == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadJNGImage() with error");
return((Image *) NULL);
}
if (image->columns == 0 || image->rows == 0)
{
(void) CloseBlob(image);
return(DestroyImageList(image));
}
mng_info->image=image;
if (mng_type)
{
MngBox
crop_box;
if (mng_info->magn_methx || mng_info->magn_methy)
{
png_uint_32
magnified_height,
magnified_width;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Processing MNG MAGN chunk");
if (mng_info->magn_methx == 1)
{
magnified_width=mng_info->magn_ml;
if (image->columns > 1)
magnified_width += mng_info->magn_mr;
if (image->columns > 2)
magnified_width += (png_uint_32)
((image->columns-2)*(mng_info->magn_mx));
}
else
{
magnified_width=(png_uint_32) image->columns;
if (image->columns > 1)
magnified_width += mng_info->magn_ml-1;
if (image->columns > 2)
magnified_width += mng_info->magn_mr-1;
if (image->columns > 3)
magnified_width += (png_uint_32)
((image->columns-3)*(mng_info->magn_mx-1));
}
if (mng_info->magn_methy == 1)
{
magnified_height=mng_info->magn_mt;
if (image->rows > 1)
magnified_height += mng_info->magn_mb;
if (image->rows > 2)
magnified_height += (png_uint_32)
((image->rows-2)*(mng_info->magn_my));
}
else
{
magnified_height=(png_uint_32) image->rows;
if (image->rows > 1)
magnified_height += mng_info->magn_mt-1;
if (image->rows > 2)
magnified_height += mng_info->magn_mb-1;
if (image->rows > 3)
magnified_height += (png_uint_32)
((image->rows-3)*(mng_info->magn_my-1));
}
if (magnified_height > image->rows ||
magnified_width > image->columns)
{
Image
*large_image;
int
yy;
Quantum
*next,
*prev;
png_uint_16
magn_methx,
magn_methy;
ssize_t
m,
y;
register Quantum
*n,
*q;
register ssize_t
x;
/* Allocate next image structure. */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Allocate magnified image");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
large_image=SyncNextImageInList(image);
large_image->columns=magnified_width;
large_image->rows=magnified_height;
magn_methx=mng_info->magn_methx;
magn_methy=mng_info->magn_methy;
#if (MAGICKCORE_QUANTUM_DEPTH > 16)
#define QM unsigned short
if (magn_methx != 1 || magn_methy != 1)
{
/*
Scale pixels to unsigned shorts to prevent
overflow of intermediate values of interpolations
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
SetPixelRed(image,ScaleQuantumToShort(
GetPixelRed(image,q)),q);
SetPixelGreen(image,ScaleQuantumToShort(
GetPixelGreen(image,q)),q);
SetPixelBlue(image,ScaleQuantumToShort(
GetPixelBlue(image,q)),q);
SetPixelAlpha(image,ScaleQuantumToShort(
GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
#else
#define QM Quantum
#endif
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageBackgroundColor(large_image,exception);
else
{
large_image->background_color.alpha=OpaqueAlpha;
(void) SetImageBackgroundColor(large_image,exception);
if (magn_methx == 4)
magn_methx=2;
if (magn_methx == 5)
magn_methx=3;
if (magn_methy == 4)
magn_methy=2;
if (magn_methy == 5)
magn_methy=3;
}
/* magnify the rows into the right side of the large image */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Magnify the rows to %.20g",
(double) large_image->rows);
m=(ssize_t) mng_info->magn_mt;
yy=0;
length=(size_t) GetPixelChannels(image)*image->columns;
next=(Quantum *) AcquireQuantumMemory(length,sizeof(*next));
prev=(Quantum *) AcquireQuantumMemory(length,sizeof(*prev));
if ((prev == (Quantum *) NULL) ||
(next == (Quantum *) NULL))
{
image=DestroyImageList(image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
n=GetAuthenticPixels(image,0,0,image->columns,1,exception);
(void) CopyMagickMemory(next,n,length);
for (y=0; y < (ssize_t) image->rows; y++)
{
if (y == 0)
m=(ssize_t) mng_info->magn_mt;
else if (magn_methy > 1 && y == (ssize_t) image->rows-2)
m=(ssize_t) mng_info->magn_mb;
else if (magn_methy <= 1 && y == (ssize_t) image->rows-1)
m=(ssize_t) mng_info->magn_mb;
else if (magn_methy > 1 && y == (ssize_t) image->rows-1)
m=1;
else
m=(ssize_t) mng_info->magn_my;
n=prev;
prev=next;
next=n;
if (y < (ssize_t) image->rows-1)
{
n=GetAuthenticPixels(image,0,y+1,image->columns,1,
exception);
(void) CopyMagickMemory(next,n,length);
}
for (i=0; i < m; i++, yy++)
{
register Quantum
*pixels;
assert(yy < (ssize_t) large_image->rows);
pixels=prev;
n=next;
q=GetAuthenticPixels(large_image,0,yy,large_image->columns,
1,exception);
q+=(large_image->columns-image->columns)*
GetPixelChannels(large_image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
/* To do: get color as function of indexes[x] */
/*
if (image->storage_class == PseudoClass)
{
}
*/
if (magn_methy <= 1)
{
/* replicate previous */
SetPixelRed(large_image,GetPixelRed(image,pixels),q);
SetPixelGreen(large_image,GetPixelGreen(image,
pixels),q);
SetPixelBlue(large_image,GetPixelBlue(image,
pixels),q);
SetPixelAlpha(large_image,GetPixelAlpha(image,
pixels),q);
}
else if (magn_methy == 2 || magn_methy == 4)
{
if (i == 0)
{
SetPixelRed(large_image,GetPixelRed(image,
pixels),q);
SetPixelGreen(large_image,GetPixelGreen(image,
pixels),q);
SetPixelBlue(large_image,GetPixelBlue(image,
pixels),q);
SetPixelAlpha(large_image,GetPixelAlpha(image,
pixels),q);
}
else
{
/* Interpolate */
SetPixelRed(large_image,((QM) (((ssize_t)
(2*i*(GetPixelRed(image,n)
-GetPixelRed(image,pixels)+m))/
((ssize_t) (m*2))
+GetPixelRed(image,pixels)))),q);
SetPixelGreen(large_image,((QM) (((ssize_t)
(2*i*(GetPixelGreen(image,n)
-GetPixelGreen(image,pixels)+m))/
((ssize_t) (m*2))
+GetPixelGreen(image,pixels)))),q);
SetPixelBlue(large_image,((QM) (((ssize_t)
(2*i*(GetPixelBlue(image,n)
-GetPixelBlue(image,pixels)+m))/
((ssize_t) (m*2))
+GetPixelBlue(image,pixels)))),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(large_image, ((QM) (((ssize_t)
(2*i*(GetPixelAlpha(image,n)
-GetPixelAlpha(image,pixels)+m))
/((ssize_t) (m*2))+
GetPixelAlpha(image,pixels)))),q);
}
if (magn_methy == 4)
{
/* Replicate nearest */
if (i <= ((m+1) << 1))
SetPixelAlpha(large_image,GetPixelAlpha(image,
pixels),q);
else
SetPixelAlpha(large_image,GetPixelAlpha(image,
n),q);
}
}
else /* if (magn_methy == 3 || magn_methy == 5) */
{
/* Replicate nearest */
if (i <= ((m+1) << 1))
{
SetPixelRed(large_image,GetPixelRed(image,
pixels),q);
SetPixelGreen(large_image,GetPixelGreen(image,
pixels),q);
SetPixelBlue(large_image,GetPixelBlue(image,
pixels),q);
SetPixelAlpha(large_image,GetPixelAlpha(image,
pixels),q);
}
else
{
SetPixelRed(large_image,GetPixelRed(image,n),q);
SetPixelGreen(large_image,GetPixelGreen(image,n),
q);
SetPixelBlue(large_image,GetPixelBlue(image,n),
q);
SetPixelAlpha(large_image,GetPixelAlpha(image,n),
q);
}
if (magn_methy == 5)
{
SetPixelAlpha(large_image,(QM) (((ssize_t) (2*i*
(GetPixelAlpha(image,n)
-GetPixelAlpha(image,pixels))
+m))/((ssize_t) (m*2))
+GetPixelAlpha(image,pixels)),q);
}
}
n+=GetPixelChannels(image);
q+=GetPixelChannels(large_image);
pixels+=GetPixelChannels(image);
} /* x */
if (SyncAuthenticPixels(large_image,exception) == 0)
break;
} /* i */
} /* y */
prev=(Quantum *) RelinquishMagickMemory(prev);
next=(Quantum *) RelinquishMagickMemory(next);
length=image->columns;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Delete original image");
DeleteImageFromList(&image);
image=large_image;
mng_info->image=image;
/* magnify the columns */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Magnify the columns to %.20g",
(double) image->columns);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*pixels;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
pixels=q+(image->columns-length)*GetPixelChannels(image);
n=pixels+GetPixelChannels(image);
for (x=(ssize_t) (image->columns-length);
x < (ssize_t) image->columns; x++)
{
/* To do: Rewrite using Get/Set***PixelChannel() */
if (x == (ssize_t) (image->columns-length))
m=(ssize_t) mng_info->magn_ml;
else if (magn_methx > 1 && x == (ssize_t) image->columns-2)
m=(ssize_t) mng_info->magn_mr;
else if (magn_methx <= 1 &&
x == (ssize_t) image->columns-1)
m=(ssize_t) mng_info->magn_mr;
else if (magn_methx > 1 && x == (ssize_t) image->columns-1)
m=1;
else
m=(ssize_t) mng_info->magn_mx;
for (i=0; i < m; i++)
{
if (magn_methx <= 1)
{
/* replicate previous */
SetPixelRed(image,GetPixelRed(image,pixels),q);
SetPixelGreen(image,GetPixelGreen(image,pixels),q);
SetPixelBlue(image,GetPixelBlue(image,pixels),q);
SetPixelAlpha(image,GetPixelAlpha(image,pixels),q);
}
else if (magn_methx == 2 || magn_methx == 4)
{
if (i == 0)
{
SetPixelRed(image,GetPixelRed(image,pixels),q);
SetPixelGreen(image,GetPixelGreen(image,pixels),q);
SetPixelBlue(image,GetPixelBlue(image,pixels),q);
SetPixelAlpha(image,GetPixelAlpha(image,pixels),q);
}
/* To do: Rewrite using Get/Set***PixelChannel() */
else
{
/* Interpolate */
SetPixelRed(image,(QM) ((2*i*(
GetPixelRed(image,n)
-GetPixelRed(image,pixels))+m)
/((ssize_t) (m*2))+
GetPixelRed(image,pixels)),q);
SetPixelGreen(image,(QM) ((2*i*(
GetPixelGreen(image,n)
-GetPixelGreen(image,pixels))+m)
/((ssize_t) (m*2))+
GetPixelGreen(image,pixels)),q);
SetPixelBlue(image,(QM) ((2*i*(
GetPixelBlue(image,n)
-GetPixelBlue(image,pixels))+m)
/((ssize_t) (m*2))+
GetPixelBlue(image,pixels)),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,(QM) ((2*i*(
GetPixelAlpha(image,n)
-GetPixelAlpha(image,pixels))+m)
/((ssize_t) (m*2))+
GetPixelAlpha(image,pixels)),q);
}
if (magn_methx == 4)
{
/* Replicate nearest */
if (i <= ((m+1) << 1))
{
SetPixelAlpha(image,
GetPixelAlpha(image,pixels)+0,q);
}
else
{
SetPixelAlpha(image,
GetPixelAlpha(image,n)+0,q);
}
}
}
else /* if (magn_methx == 3 || magn_methx == 5) */
{
/* Replicate nearest */
if (i <= ((m+1) << 1))
{
SetPixelRed(image,GetPixelRed(image,pixels),q);
SetPixelGreen(image,GetPixelGreen(image,
pixels),q);
SetPixelBlue(image,GetPixelBlue(image,pixels),q);
SetPixelAlpha(image,GetPixelAlpha(image,
pixels),q);
}
else
{
SetPixelRed(image,GetPixelRed(image,n),q);
SetPixelGreen(image,GetPixelGreen(image,n),q);
SetPixelBlue(image,GetPixelBlue(image,n),q);
SetPixelAlpha(image,GetPixelAlpha(image,n),q);
}
if (magn_methx == 5)
{
/* Interpolate */
SetPixelAlpha(image,
(QM) ((2*i*( GetPixelAlpha(image,n)
-GetPixelAlpha(image,pixels))+m)/
((ssize_t) (m*2))
+GetPixelAlpha(image,pixels)),q);
}
}
q+=GetPixelChannels(image);
}
n+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
#if (MAGICKCORE_QUANTUM_DEPTH > 16)
if (magn_methx != 1 || magn_methy != 1)
{
/*
Rescale pixels to Quantum
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
SetPixelRed(image,ScaleShortToQuantum(
GetPixelRed(image,q)),q);
SetPixelGreen(image,ScaleShortToQuantum(
GetPixelGreen(image,q)),q);
SetPixelBlue(image,ScaleShortToQuantum(
GetPixelBlue(image,q)),q);
SetPixelAlpha(image,ScaleShortToQuantum(
GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
#endif
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Finished MAGN processing");
}
}
/*
Crop_box is with respect to the upper left corner of the MNG.
*/
crop_box.left=mng_info->image_box.left+mng_info->x_off[object_id];
crop_box.right=mng_info->image_box.right+mng_info->x_off[object_id];
crop_box.top=mng_info->image_box.top+mng_info->y_off[object_id];
crop_box.bottom=mng_info->image_box.bottom+mng_info->y_off[object_id];
crop_box=mng_minimum_box(crop_box,mng_info->clip);
crop_box=mng_minimum_box(crop_box,mng_info->frame);
crop_box=mng_minimum_box(crop_box,mng_info->object_clip[object_id]);
if ((crop_box.left != (mng_info->image_box.left
+mng_info->x_off[object_id])) ||
(crop_box.right != (mng_info->image_box.right
+mng_info->x_off[object_id])) ||
(crop_box.top != (mng_info->image_box.top
+mng_info->y_off[object_id])) ||
(crop_box.bottom != (mng_info->image_box.bottom
+mng_info->y_off[object_id])))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Crop the PNG image");
if ((crop_box.left < crop_box.right) &&
(crop_box.top < crop_box.bottom))
{
Image
*im;
RectangleInfo
crop_info;
/*
Crop_info is with respect to the upper left corner of
the image.
*/
crop_info.x=(crop_box.left-mng_info->x_off[object_id]);
crop_info.y=(crop_box.top-mng_info->y_off[object_id]);
crop_info.width=(size_t) (crop_box.right-crop_box.left);
crop_info.height=(size_t) (crop_box.bottom-crop_box.top);
image->page.width=image->columns;
image->page.height=image->rows;
image->page.x=0;
image->page.y=0;
im=CropImage(image,&crop_info,exception);
if (im != (Image *) NULL)
{
image->columns=im->columns;
image->rows=im->rows;
im=DestroyImage(im);
image->page.width=image->columns;
image->page.height=image->rows;
image->page.x=crop_box.left;
image->page.y=crop_box.top;
}
}
else
{
/*
No pixels in crop area. The MNG spec still requires
a layer, though, so make a single transparent pixel in
the top left corner.
*/
image->columns=1;
image->rows=1;
image->colors=2;
(void) SetImageBackgroundColor(image,exception);
image->page.width=1;
image->page.height=1;
image->page.x=0;
image->page.y=0;
}
}
#ifndef PNG_READ_EMPTY_PLTE_SUPPORTED
image=mng_info->image;
#endif
}
#if (MAGICKCORE_QUANTUM_DEPTH > 16)
/* PNG does not handle depths greater than 16 so reduce it even
* if lossy.
*/
if (image->depth > 16)
image->depth=16;
#endif
#if (MAGICKCORE_QUANTUM_DEPTH > 8)
if (image->depth > 8)
{
/* To do: fill low byte properly */
image->depth=16;
}
if (LosslessReduceDepthOK(image,exception) != MagickFalse)
image->depth = 8;
#endif
if (image_info->number_scenes != 0)
{
if (mng_info->scenes_found >
(ssize_t) (image_info->first_scene+image_info->number_scenes))
break;
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Finished reading image datastream.");
} while (LocaleCompare(image_info->magick,"MNG") == 0);
(void) CloseBlob(image);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Finished reading all image datastreams.");
#if defined(MNG_INSERT_LAYERS)
if (insert_layers && !mng_info->image_found && (mng_info->mng_width) &&
(mng_info->mng_height))
{
/*
Insert a background layer if nothing else was found.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" No images found. Inserting a background layer.");
if (GetAuthenticPixelQueue(image) != (Quantum *) NULL)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Allocation failed, returning NULL.");
return(DestroyImageList(image));;
}
image=SyncNextImageInList(image);
}
image->columns=mng_info->mng_width;
image->rows=mng_info->mng_height;
image->page.width=mng_info->mng_width;
image->page.height=mng_info->mng_height;
image->page.x=0;
image->page.y=0;
image->background_color=mng_background_color;
image->alpha_trait=UndefinedPixelTrait;
if (image_info->ping == MagickFalse)
(void) SetImageBackgroundColor(image,exception);
mng_info->image_found++;
}
#endif
image->iterations=mng_iterations;
if (mng_iterations == 1)
image->start_loop=MagickTrue;
while (GetPreviousImageInList(image) != (Image *) NULL)
{
image_count++;
if (image_count > 10*mng_info->image_found)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," No beginning");
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"Linked list is corrupted, beginning of list not found",
"`%s'",image_info->filename);
return(DestroyImageList(image));
}
image=GetPreviousImageInList(image);
if (GetNextImageInList(image) == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Corrupt list");
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"Linked list is corrupted; next_image is NULL","`%s'",
image_info->filename);
}
}
if (mng_info->ticks_per_second && mng_info->image_found > 1 &&
GetNextImageInList(image) ==
(Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" First image null");
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"image->next for first image is NULL but shouldn't be.",
"`%s'",image_info->filename);
}
if (mng_info->image_found == 0)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" No visible images found.");
(void) ThrowMagickException(exception,GetMagickModule(),
CoderError,"No visible images in file","`%s'",image_info->filename);
return(DestroyImageList(image));
}
if (mng_info->ticks_per_second)
final_delay=1UL*MagickMax(image->ticks_per_second,1L)*
final_delay/mng_info->ticks_per_second;
else
image->start_loop=MagickTrue;
/* Find final nonzero image delay */
final_image_delay=0;
while (GetNextImageInList(image) != (Image *) NULL)
{
if (image->delay)
final_image_delay=image->delay;
image=GetNextImageInList(image);
}
if (final_delay < final_image_delay)
final_delay=final_image_delay;
image->delay=final_delay;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->delay=%.20g, final_delay=%.20g",(double) image->delay,
(double) final_delay);
if (logging != MagickFalse)
{
int
scene;
scene=0;
image=GetFirstImageInList(image);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Before coalesce:");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene 0 delay=%.20g",(double) image->delay);
while (GetNextImageInList(image) != (Image *) NULL)
{
image=GetNextImageInList(image);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene %.20g delay=%.20g",(double) scene++,
(double) image->delay);
}
}
image=GetFirstImageInList(image);
#ifdef MNG_COALESCE_LAYERS
if (insert_layers)
{
Image
*next_image,
*next;
size_t
scene;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Coalesce Images");
scene=image->scene;
next_image=CoalesceImages(image,exception);
if (next_image == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
image=DestroyImageList(image);
image=next_image;
for (next=image; next != (Image *) NULL; next=next_image)
{
next->page.width=mng_info->mng_width;
next->page.height=mng_info->mng_height;
next->page.x=0;
next->page.y=0;
next->scene=scene++;
next_image=GetNextImageInList(next);
if (next_image == (Image *) NULL)
break;
if (next->delay == 0)
{
scene--;
next_image->previous=GetPreviousImageInList(next);
if (GetPreviousImageInList(next) == (Image *) NULL)
image=next_image;
else
next->previous->next=next_image;
next=DestroyImage(next);
}
}
}
#endif
while (GetNextImageInList(image) != (Image *) NULL)
image=GetNextImageInList(image);
image->dispose=BackgroundDispose;
if (logging != MagickFalse)
{
int
scene;
scene=0;
image=GetFirstImageInList(image);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" After coalesce:");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene 0 delay=%.20g dispose=%.20g",(double) image->delay,
(double) image->dispose);
while (GetNextImageInList(image) != (Image *) NULL)
{
image=GetNextImageInList(image);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene %.20g delay=%.20g dispose=%.20g",(double) scene++,
(double) image->delay,(double) image->dispose);
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit ReadOneMNGImage();");
return(image);
}
| null | null | 195,909
|
3754765081343933811961558712152130638
| 2,411
|
Skip MNG CLIP chunk with out-of-range object IDs
|
other
|
samba
|
a60863458dc6b60a09aa8d31fada6c36f5043c76
| 1
|
struct torture_suite *torture_raw_write(TALLOC_CTX *mem_ctx)
{
struct torture_suite *suite = torture_suite_create(mem_ctx, "write");
torture_suite_add_1smb_test(suite, "write", test_write);
torture_suite_add_1smb_test(suite, "write unlock", test_writeunlock);
torture_suite_add_1smb_test(suite, "write close", test_writeclose);
torture_suite_add_1smb_test(suite, "writex", test_writex);
return suite;
}
| null | null | 195,927
|
221440379242522067882758642217412717539
| 11
|
CVE-2022-32742: s4: torture: Add raw.write.bad-write test.
Reproduces the test code in:
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15085
Add knownfail.
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: David Disseldorp <[email protected]>
|
other
|
pjproject
|
9fae8f43accef8ea65d4a8ae9cdf297c46cfe29a
| 1
|
static pj_status_t parse_query(pj_dns_parsed_query *q, pj_pool_t *pool,
const pj_uint8_t *pkt, const pj_uint8_t *start,
const pj_uint8_t *max, int *parsed_len)
{
const pj_uint8_t *p = start;
int name_len, name_part_len;
pj_status_t status;
/* Get the length of the name */
status = get_name_len(0, pkt, start, max, &name_part_len, &name_len);
if (status != PJ_SUCCESS)
return status;
/* Allocate memory for the name */
q->name.ptr = (char*) pj_pool_alloc(pool, name_len+4);
q->name.slen = 0;
/* Get the name */
status = get_name(0, pkt, start, max, &q->name);
if (status != PJ_SUCCESS)
return status;
p = (start + name_part_len);
/* Get the type */
pj_memcpy(&q->type, p, 2);
q->type = pj_ntohs(q->type);
p += 2;
/* Get the class */
pj_memcpy(&q->dnsclass, p, 2);
q->dnsclass = pj_ntohs(q->dnsclass);
p += 2;
*parsed_len = (int)(p - start);
return PJ_SUCCESS;
}
| null | null | 195,954
|
126258121414833088809132976389975059096
| 38
|
Merge pull request from GHSA-p6g5-v97c-w5q4
* Prevent heap buffer overflow when parsing DNS packets
* Make sure packet parsing doesn't advance beyond max/end
* Update checks
* Remove check
Co-authored-by: sauwming <[email protected]>
|
other
|
tensorflow
|
30721cf564cb029d34535446d6a5a6357bebc8e7
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor* hypothesis_indices;
const Tensor* hypothesis_values;
const Tensor* hypothesis_shape;
const Tensor* truth_indices;
const Tensor* truth_values;
const Tensor* truth_shape;
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape));
OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices));
OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values));
OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape));
OP_REQUIRES_OK(
ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values,
*hypothesis_shape, *truth_indices, *truth_values,
*truth_shape));
TensorShape hypothesis_st_shape;
OP_REQUIRES_OK(ctx,
TensorShapeUtils::MakeShape(
hypothesis_shape->vec<int64_t>().data(),
hypothesis_shape->NumElements(), &hypothesis_st_shape));
TensorShape truth_st_shape;
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
truth_shape->vec<int64_t>().data(),
truth_shape->NumElements(), &truth_st_shape));
// Assume indices are sorted in row-major order.
std::vector<int64_t> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
sparse::SparseTensor hypothesis;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*hypothesis_indices, *hypothesis_values,
hypothesis_st_shape, sorted_order, &hypothesis));
sparse::SparseTensor truth;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*truth_indices, *truth_values, truth_st_shape,
sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
std::vector<int64_t> group_dims(truth_st_shape.dims() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
TensorShape output_shape;
for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) {
output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d),
truth_st_shape.dim_size(d)));
}
const auto output_elements = output_shape.num_elements();
OP_REQUIRES(
ctx, output_elements > 0,
errors::InvalidArgument("Got output shape ", output_shape.DebugString(),
" which has 0 elements"));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output));
auto output_t = output->flat<float>();
output_t.setZero();
std::vector<int64_t> output_strides(output_shape.dims());
output_strides[output_shape.dims() - 1] = 1;
for (int d = output_shape.dims() - 2; d >= 0; --d) {
output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
}
auto hypothesis_grouper = hypothesis.group(group_dims);
auto truth_grouper = truth.group(group_dims);
auto hypothesis_iter = hypothesis_grouper.begin();
auto truth_iter = truth_grouper.begin();
auto cmp = std::equal_to<T>();
while (hypothesis_iter != hypothesis_grouper.end() &&
truth_iter != truth_grouper.end()) {
sparse::Group truth_i = *truth_iter;
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_truth = truth_i.group();
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto truth_seq = truth_i.values<T>();
auto hypothesis_seq = hypothesis_j.values<T>();
if (g_truth == g_hypothesis) {
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) =
gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp);
if (normalize_) output_t(loc) /= truth_seq.size();
++hypothesis_iter;
++truth_iter;
} else if (g_truth > g_hypothesis) { // zero-length truth
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
} else { // zero-length hypothesis
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto hypothesis_seq = hypothesis_j.values<T>();
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
}
while (truth_iter != truth_grouper.end()) { // missing hypotheses
sparse::Group truth_i = *truth_iter;
std::vector<int64_t> g_truth = truth_i.group();
auto truth_seq = truth_i.values<T>();
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
| null | null | 195,965
|
21516249110451770540540545715504410219
| 163
|
Fix tf.raw_ops.EditDistance vulnerability with negative indices.
Check that indices are non-negative. Fix several identical code sites.
Clean up grammar in error message.
PiperOrigin-RevId: 445442017
|
other
|
gpac
|
3dbe11b37d65c8472faf0654410068e5500b3adb
| 1
|
GF_Err diST_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
char str[1024];
GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox *)s;
i=0;
str[0]=0;
while (1) {
str[i] = gf_bs_read_u8(bs);
if (!str[i]) break;
i++;
}
ISOM_DECREASE_SIZE(p, i);
p->content_script_types = gf_strdup(str);
return GF_OK;
}
| null | null | 195,984
|
106220362428987220004954577919715973308
| 18
|
fixed #2175
|
other
|
tensorflow
|
b619c6f865715ca3b15ef1842b5b95edbaa710ad
| 1
|
void TensorSliceReader::LoadShard(int shard) const {
CHECK_LT(shard, sss_.size());
if (sss_[shard] || !status_.ok()) {
return; // Already loaded, or invalid.
}
string value;
SavedTensorSlices sts;
const string fname = fnames_[shard];
VLOG(1) << "Reading meta data from file " << fname << "...";
Table* table;
Status s = open_function_(fname, &table);
if (!s.ok()) {
status_ = errors::DataLoss("Unable to open table file ", fname, ": ",
s.ToString());
return;
}
sss_[shard].reset(table);
if (!(table->Get(kSavedTensorSlicesKey, &value) &&
ParseProtoUnlimited(&sts, value))) {
status_ = errors::Internal(
"Failed to find the saved tensor slices at the beginning of the "
"checkpoint file: ",
fname);
return;
}
status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION,
TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint",
"checkpoint");
if (!status_.ok()) return;
for (const SavedSliceMeta& ssm : sts.meta().tensor()) {
TensorShape ssm_shape(ssm.shape());
for (const TensorSliceProto& tsp : ssm.slice()) {
TensorSlice ss_slice(tsp);
status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname,
ss_slice, &tensors_);
if (!status_.ok()) return;
}
}
}
| null | null | 196,231
|
144504249104074457348474186663869882665
| 39
|
Use BuildTensorShapeBase when parsing unverified TensorShapes during checkpoint loading.
This avoids crashing when the TensorShape has negative dimensions.
PiperOrigin-RevId: 392769882
Change-Id: Id1f7ae7fcf8142193556af47abfda81b13d3cce4
|
other
|
rtl_433
|
37455483889bd1c641bdaafc493d1cc236b74904
| 1
|
static int acurite_00275rm_decode(r_device *decoder, bitbuffer_t *bitbuffer)
{
int result = 0;
bitbuffer_invert(bitbuffer);
// This sensor repeats a signal three times. Combine as fallback.
uint8_t *b_rows[3] = {0};
int n_rows = 0;
for (int row = 0; row < bitbuffer->num_rows; ++row) {
if (n_rows < 3 && bitbuffer->bits_per_row[row] == 88) {
b_rows[n_rows] = bitbuffer->bb[row];
n_rows++;
}
}
// Combine signal if exactly three repeats were found
if (n_rows == 3) {
uint8_t *b = bitbuffer->bb[bitbuffer->num_rows];
for (int i = 0; i < 11; ++i) {
// The majority bit count wins
b[i] = (b_rows[0][i] & b_rows[1][i]) |
(b_rows[1][i] & b_rows[2][i]) |
(b_rows[2][i] & b_rows[0][i]);
}
bitbuffer->bits_per_row[bitbuffer->num_rows] = 88;
bitbuffer->num_rows += 1;
}
// Output the first valid row
for (int row = 0; row < bitbuffer->num_rows; ++row) {
if (bitbuffer->bits_per_row[row] != 88) {
result = DECODE_ABORT_LENGTH;
continue; // return DECODE_ABORT_LENGTH;
}
uint8_t *b = bitbuffer->bb[row];
// Check CRC
if (crc16lsb(b, 11, 0x00b2, 0x00d0) != 0) {
decoder_log_bitrow(decoder, 1, __func__, b, 11 * 8, "sensor bad CRC");
result = DECODE_FAIL_MIC;
continue; // return DECODE_FAIL_MIC;
}
// Decode common fields
int id = (b[0] << 16) | (b[1] << 8) | b[3];
int battery_low = (b[2] & 0x40) == 0;
int model_flag = (b[2] & 1);
float tempc = ((b[4] << 4) | (b[5] >> 4)) * 0.1 - 100;
int probe = b[5] & 3;
int humidity = ((b[6] & 0x1f) << 2) | (b[7] >> 6);
// Water probe (detects water leak)
int water = (b[7] & 0x0f) == 15; // valid only if (probe == 1)
// Soil probe (detects temperature)
float ptempc = (((b[7] & 0x0f) << 8) | b[8]) * 0.1 - 100; // valid only if (probe == 2 || probe == 3)
// Spot probe (detects temperature and humidity)
int phumidity = b[9] & 0x7f; // valid only if (probe == 3)
/* clang-format off */
data_t *data = data_make(
"model", "", DATA_STRING, model_flag ? "Acurite-00275rm" : "Acurite-00276rm",
"subtype", "Probe", DATA_INT, probe,
"id", "", DATA_INT, id,
"battery_ok", "Battery", DATA_INT, !battery_low,
"temperature_C", "Celsius", DATA_FORMAT, "%.1f C", DATA_DOUBLE, tempc,
"humidity", "Humidity", DATA_FORMAT, "%u %%", DATA_INT, humidity,
"water", "", DATA_COND, probe == 1, DATA_INT, water,
"temperature_1_C", "Celsius", DATA_COND, probe == 2, DATA_FORMAT, "%.1f C", DATA_DOUBLE, ptempc,
"temperature_1_C", "Celsius", DATA_COND, probe == 3, DATA_FORMAT, "%.1f C", DATA_DOUBLE, ptempc,
"humidity_1", "Humidity", DATA_COND, probe == 3, DATA_FORMAT, "%u %%", DATA_INT, phumidity,
"mic", "Integrity", DATA_STRING, "CRC",
NULL);
/* clang-format on */
decoder_output_data(decoder, data);
return 1;
}
// Only returns the latest result, but better than nothing.
return result;
}
| null | null | 196,273
|
201249563317541586609386246410405181739
| 81
|
Fix overflow in Acurite-00275rm (closes #2012)
|
other
|
lsquic
|
a74702c630e108125e71898398737baec8f02238
| 1
|
lsquic_qeh_settings (struct qpack_enc_hdl *qeh, unsigned max_table_size,
unsigned dyn_table_size, unsigned max_risked_streams, int server)
{
enum lsqpack_enc_opts enc_opts;
assert(qeh->qeh_flags & QEH_INITIALIZED);
if (qeh->qeh_flags & QEH_HAVE_SETTINGS)
{
LSQ_WARN("settings already set");
return -1;
}
enc_opts = LSQPACK_ENC_OPT_STAGE_2
| (server ? LSQPACK_ENC_OPT_SERVER : 0);
qeh->qeh_tsu_sz = sizeof(qeh->qeh_tsu_buf);
if (0 != lsqpack_enc_init(&qeh->qeh_encoder, (void *) qeh->qeh_conn,
max_table_size, dyn_table_size, max_risked_streams, enc_opts,
qeh->qeh_tsu_buf, &qeh->qeh_tsu_sz))
{
LSQ_INFO("could not initialize QPACK encoder");
return -1;
}
LSQ_DEBUG("%zu-byte post-init TSU", qeh->qeh_tsu_sz);
qeh->qeh_flags |= QEH_HAVE_SETTINGS;
qeh->qeh_max_prefix_size =
lsqpack_enc_header_block_prefix_size(&qeh->qeh_encoder);
LSQ_DEBUG("have settings: max table size=%u; dyn table size=%u; max risked "
"streams=%u", max_table_size, dyn_table_size, max_risked_streams);
if (qeh->qeh_enc_sm_out)
qeh_begin_out(qeh);
return 0;
}
| null | null | 196,276
|
288925027585735265622903529553530375426
| 33
|
Release 3.1.0
|
other
|
barebox
|
0a9f9a7410681e55362f8311537ebc7be9ad0fbe
| 1
|
int digest_generic_verify(struct digest *d, const unsigned char *md)
{
int ret;
int len = digest_length(d);
unsigned char *tmp;
tmp = xmalloc(len);
ret = digest_final(d, tmp);
if (ret)
goto end;
ret = memcmp(md, tmp, len);
ret = ret ? -EINVAL : 0;
end:
free(tmp);
return ret;
}
| null | null | 196,316
|
78392329714916786850884103185275418998
| 18
|
crypto: digest: use crypto_memneq()
When verifying a digest it is important not to leak timing information
through memcmp(). Use crypto_memneq() instead.
Signed-off-by: Sascha Hauer <[email protected]>
|
other
|
njs
|
39e8fa1b7db1680654527f8fa0e9ee93b334ecba
| 1
|
njs_function_prototype_apply(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
int64_t i, length;
njs_int_t ret;
njs_frame_t *frame;
njs_value_t *this, *arr_like;
njs_array_t *arr;
njs_function_t *func;
if (!njs_is_function(njs_argument(args, 0))) {
njs_type_error(vm, "\"this\" argument is not a function");
return NJS_ERROR;
}
func = njs_function(njs_argument(args, 0));
this = njs_arg(args, nargs, 1);
arr_like = njs_arg(args, nargs, 2);
if (njs_is_null_or_undefined(arr_like)) {
length = 0;
goto activate;
} else if (njs_is_array(arr_like)) {
arr = arr_like->data.u.array;
args = arr->start;
length = arr->length;
goto activate;
} else if (njs_slow_path(!njs_is_object(arr_like))) {
njs_type_error(vm, "second argument is not an array-like object");
return NJS_ERROR;
}
ret = njs_object_length(vm, arr_like, &length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
arr = njs_array_alloc(vm, 1, length, NJS_ARRAY_SPARE);
if (njs_slow_path(arr == NULL)) {
return NJS_ERROR;
}
args = arr->start;
for (i = 0; i < length; i++) {
ret = njs_value_property_i64(vm, arr_like, i, &args[i]);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
activate:
/* Skip the "apply" method frame. */
vm->top_frame->skip = 1;
frame = (njs_frame_t *) vm->top_frame;
ret = njs_function_frame(vm, func, this, args, length, 0);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_function_frame_invoke(vm, frame->native.retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
return NJS_DECLINED;
}
| null | null | 196,320
|
139959814390637136613861597730885170482
| 75
|
Fixed Function.prototype.apply() with slow arrays.
Previously, the function had two issues:
* array->start was referenced without checking for fast array flag
* the created arguments list was not sanity-checked for its length,
which can be very large.
The fix is to remove micro-optimization for arrays and introduce limit
size for arguments list.
This closes #449 issue in Github.
|
other
|
linux
|
64620e0a1e712a778095bd35cbb277dc2259281f
| 1
|
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn)
{
u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
enum bpf_arg_type arg_type = fn->arg_type[arg];
enum bpf_reg_type type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
err = check_reg_arg(env, regno, SRC_OP);
if (err)
return err;
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose(env, "R%d leaks addr into helper function\n",
regno);
return -EACCES;
}
return 0;
}
if (type_is_pkt_pointer(type) &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
verbose(env, "helper access to the packet is not allowed\n");
return -EACCES;
}
if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
err = resolve_map_arg_type(env, meta, &arg_type);
if (err)
return err;
}
if (register_is_null(reg) && type_may_be_null(arg_type))
/* A NULL register has a SCALAR_VALUE type, so skip
* type checking.
*/
goto skip_type_check;
err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
if (err)
return err;
switch ((u32)type) {
case SCALAR_VALUE:
/* Pointer types where reg offset is explicitly allowed: */
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE:
case PTR_TO_MEM:
case PTR_TO_MEM | MEM_RDONLY:
case PTR_TO_BUF:
case PTR_TO_BUF | MEM_RDONLY:
case PTR_TO_STACK:
break;
/* All the rest must be rejected: */
default:
err = __check_ptr_off_reg(env, reg, regno,
type == PTR_TO_BTF_ID);
if (err < 0)
return err;
break;
}
skip_type_check:
if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
meta->ref_obj_id);
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
if (meta->map_ptr) {
/* Use map_uid (which is unique id of inner map) to reject:
* inner_map1 = bpf_map_lookup_elem(outer_map, key1)
* inner_map2 = bpf_map_lookup_elem(outer_map, key2)
* if (inner_map1 && inner_map2) {
* timer = bpf_map_lookup_elem(inner_map1);
* if (timer)
* // mismatch would have been allowed
* bpf_timer_init(timer, inner_map2);
* }
*
* Comparing map_ptr is enough to distinguish normal and outer maps.
*/
if (meta->map_ptr != reg->map_ptr ||
meta->map_uid != reg->map_uid) {
verbose(env,
"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
meta->map_uid, reg->map_uid);
return -EINVAL;
}
}
meta->map_ptr = reg->map_ptr;
meta->map_uid = reg->map_uid;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!meta->map_ptr) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
err = check_helper_mem_access(env, regno,
meta->map_ptr->key_size, false,
NULL);
} else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
if (type_may_be_null(arg_type) && register_is_null(reg))
return 0;
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
meta);
} else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
if (!reg->btf_id) {
verbose(env, "Helper has invalid btf_id in R%d\n", regno);
return -EACCES;
}
meta->ret_btf = reg->btf;
meta->ret_btf_id = reg->btf_id;
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
if (process_spin_lock(env, regno, true))
return -EACCES;
} else if (meta->func_id == BPF_FUNC_spin_unlock) {
if (process_spin_lock(env, regno, false))
return -EACCES;
} else {
verbose(env, "verifier internal error\n");
return -EFAULT;
}
} else if (arg_type == ARG_PTR_TO_TIMER) {
if (process_timer_func(env, regno, meta))
return -EACCES;
} else if (arg_type == ARG_PTR_TO_FUNC) {
meta->subprogno = reg->subprogno;
} else if (arg_type_is_mem_ptr(arg_type)) {
/* The access to this pointer is only checked when we hit the
* next is_mem_size argument below.
*/
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* This is used to refine r0 return value bounds for helpers
* that enforce this value as an upper bound on return values.
* See do_refine_retval_range() for helpers that can refine
* the return value. C type of helper is u32 so we pull register
* bound from umax_value however, if negative verifier errors
* out. Only upper bounds can be learned because retval is an
* int type and negative retvals are allowed.
*/
meta->msize_max_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
* initialize all the memory that the helper could
* just partially fill up.
*/
meta = NULL;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
if (reg->umin_value == 0) {
err = check_helper_mem_access(env, regno - 1, 0,
zero_size_allowed,
meta);
if (err)
return err;
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
err = check_helper_mem_access(env, regno - 1,
reg->umax_value,
zero_size_allowed, meta);
if (!err)
err = mark_chain_precision(env, regno);
} else if (arg_type_is_alloc_size(arg_type)) {
if (!tnum_is_const(reg->var_off)) {
verbose(env, "R%d is not a known constant'\n",
regno);
return -EACCES;
}
meta->mem_size = reg->var_off.value;
} else if (arg_type_is_int_ptr(arg_type)) {
int size = int_ptr_type_to_size(arg_type);
err = check_helper_mem_access(env, regno, size, false, meta);
if (err)
return err;
err = check_ptr_alignment(env, reg, 0, size, true);
} else if (arg_type == ARG_PTR_TO_CONST_STR) {
struct bpf_map *map = reg->map_ptr;
int map_off;
u64 map_addr;
char *str_ptr;
if (!bpf_map_is_rdonly(map)) {
verbose(env, "R%d does not point to a readonly map'\n", regno);
return -EACCES;
}
if (!tnum_is_const(reg->var_off)) {
verbose(env, "R%d is not a constant address'\n", regno);
return -EACCES;
}
if (!map->ops->map_direct_value_addr) {
verbose(env, "no direct value access support for this map type\n");
return -EACCES;
}
err = check_map_access(env, regno, reg->off,
map->value_size - reg->off, false);
if (err)
return err;
map_off = reg->off + reg->var_off.value;
err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
if (err) {
verbose(env, "direct value access on string failed\n");
return err;
}
str_ptr = (char *)(long)(map_addr);
if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
verbose(env, "string is not zero-terminated\n");
return -EINVAL;
}
}
return err;
}
| null | null | 196,322
|
43280363038976164306013313965209411511
| 273
|
bpf: Fix out of bounds access for ringbuf helpers
Both bpf_ringbuf_submit() and bpf_ringbuf_discard() have ARG_PTR_TO_ALLOC_MEM
in their bpf_func_proto definition as their first argument. They both expect
the result from a prior bpf_ringbuf_reserve() call which has a return type of
RET_PTR_TO_ALLOC_MEM_OR_NULL.
Meaning, after a NULL check in the code, the verifier will promote the register
type in the non-NULL branch to a PTR_TO_MEM and in the NULL branch to a known
zero scalar. Generally, pointer arithmetic on PTR_TO_MEM is allowed, so the
latter could have an offset.
The ARG_PTR_TO_ALLOC_MEM expects a PTR_TO_MEM register type. However, the non-
zero result from bpf_ringbuf_reserve() must be fed into either bpf_ringbuf_submit()
or bpf_ringbuf_discard() but with the original offset given it will then read
out the struct bpf_ringbuf_hdr mapping.
The verifier missed to enforce a zero offset, so that out of bounds access
can be triggered which could be used to escalate privileges if unprivileged
BPF was enabled (disabled by default in kernel).
Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
Reported-by: <[email protected]> (SecCoder Security Lab)
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
other
|
vim
|
409510c588b1eec1ae33511ae97a21eb8e110895
| 1
|
find_pattern_in_path(
char_u *ptr, // pointer to search pattern
int dir UNUSED, // direction of expansion
int len, // length of search pattern
int whole, // match whole words only
int skip_comments, // don't match inside comments
int type, // Type of search; are we looking for a type?
// a macro?
long count,
int action, // What to do when we find it
linenr_T start_lnum, // first line to start searching
linenr_T end_lnum) // last line for searching
{
SearchedFile *files; // Stack of included files
SearchedFile *bigger; // When we need more space
int max_path_depth = 50;
long match_count = 1;
char_u *pat;
char_u *new_fname;
char_u *curr_fname = curbuf->b_fname;
char_u *prev_fname = NULL;
linenr_T lnum;
int depth;
int depth_displayed; // For type==CHECK_PATH
int old_files;
int already_searched;
char_u *file_line;
char_u *line;
char_u *p;
char_u save_char;
int define_matched;
regmatch_T regmatch;
regmatch_T incl_regmatch;
regmatch_T def_regmatch;
int matched = FALSE;
int did_show = FALSE;
int found = FALSE;
int i;
char_u *already = NULL;
char_u *startp = NULL;
char_u *inc_opt = NULL;
#if defined(FEAT_QUICKFIX)
win_T *curwin_save = NULL;
#endif
regmatch.regprog = NULL;
incl_regmatch.regprog = NULL;
def_regmatch.regprog = NULL;
file_line = alloc(LSIZE);
if (file_line == NULL)
return;
if (type != CHECK_PATH && type != FIND_DEFINE
// when CONT_SOL is set compare "ptr" with the beginning of the
// line is faster than quote_meta/regcomp/regexec "ptr" -- Acevedo
&& !compl_status_sol())
{
pat = alloc(len + 5);
if (pat == NULL)
goto fpip_end;
sprintf((char *)pat, whole ? "\\<%.*s\\>" : "%.*s", len, ptr);
// ignore case according to p_ic, p_scs and pat
regmatch.rm_ic = ignorecase(pat);
regmatch.regprog = vim_regcomp(pat, magic_isset() ? RE_MAGIC : 0);
vim_free(pat);
if (regmatch.regprog == NULL)
goto fpip_end;
}
inc_opt = (*curbuf->b_p_inc == NUL) ? p_inc : curbuf->b_p_inc;
if (*inc_opt != NUL)
{
incl_regmatch.regprog = vim_regcomp(inc_opt,
magic_isset() ? RE_MAGIC : 0);
if (incl_regmatch.regprog == NULL)
goto fpip_end;
incl_regmatch.rm_ic = FALSE; // don't ignore case in incl. pat.
}
if (type == FIND_DEFINE && (*curbuf->b_p_def != NUL || *p_def != NUL))
{
def_regmatch.regprog = vim_regcomp(*curbuf->b_p_def == NUL
? p_def : curbuf->b_p_def,
magic_isset() ? RE_MAGIC : 0);
if (def_regmatch.regprog == NULL)
goto fpip_end;
def_regmatch.rm_ic = FALSE; // don't ignore case in define pat.
}
files = lalloc_clear(max_path_depth * sizeof(SearchedFile), TRUE);
if (files == NULL)
goto fpip_end;
old_files = max_path_depth;
depth = depth_displayed = -1;
lnum = start_lnum;
if (end_lnum > curbuf->b_ml.ml_line_count)
end_lnum = curbuf->b_ml.ml_line_count;
if (lnum > end_lnum) // do at least one line
lnum = end_lnum;
line = ml_get(lnum);
for (;;)
{
if (incl_regmatch.regprog != NULL
&& vim_regexec(&incl_regmatch, line, (colnr_T)0))
{
char_u *p_fname = (curr_fname == curbuf->b_fname)
? curbuf->b_ffname : curr_fname;
if (inc_opt != NULL && strstr((char *)inc_opt, "\\zs") != NULL)
// Use text from '\zs' to '\ze' (or end) of 'include'.
new_fname = find_file_name_in_path(incl_regmatch.startp[0],
(int)(incl_regmatch.endp[0] - incl_regmatch.startp[0]),
FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname);
else
// Use text after match with 'include'.
new_fname = file_name_in_line(incl_regmatch.endp[0], 0,
FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname, NULL);
already_searched = FALSE;
if (new_fname != NULL)
{
// Check whether we have already searched in this file
for (i = 0;; i++)
{
if (i == depth + 1)
i = old_files;
if (i == max_path_depth)
break;
if (fullpathcmp(new_fname, files[i].name, TRUE, TRUE)
& FPC_SAME)
{
if (type != CHECK_PATH
&& action == ACTION_SHOW_ALL
&& files[i].matched)
{
msg_putchar('\n'); // cursor below last one
if (!got_int) // don't display if 'q'
// typed at "--more--"
// message
{
msg_home_replace_hl(new_fname);
msg_puts(_(" (includes previously listed match)"));
prev_fname = NULL;
}
}
VIM_CLEAR(new_fname);
already_searched = TRUE;
break;
}
}
}
if (type == CHECK_PATH && (action == ACTION_SHOW_ALL
|| (new_fname == NULL && !already_searched)))
{
if (did_show)
msg_putchar('\n'); // cursor below last one
else
{
gotocmdline(TRUE); // cursor at status line
msg_puts_title(_("--- Included files "));
if (action != ACTION_SHOW_ALL)
msg_puts_title(_("not found "));
msg_puts_title(_("in path ---\n"));
}
did_show = TRUE;
while (depth_displayed < depth && !got_int)
{
++depth_displayed;
for (i = 0; i < depth_displayed; i++)
msg_puts(" ");
msg_home_replace(files[depth_displayed].name);
msg_puts(" -->\n");
}
if (!got_int) // don't display if 'q' typed
// for "--more--" message
{
for (i = 0; i <= depth_displayed; i++)
msg_puts(" ");
if (new_fname != NULL)
{
// using "new_fname" is more reliable, e.g., when
// 'includeexpr' is set.
msg_outtrans_attr(new_fname, HL_ATTR(HLF_D));
}
else
{
/*
* Isolate the file name.
* Include the surrounding "" or <> if present.
*/
if (inc_opt != NULL
&& strstr((char *)inc_opt, "\\zs") != NULL)
{
// pattern contains \zs, use the match
p = incl_regmatch.startp[0];
i = (int)(incl_regmatch.endp[0]
- incl_regmatch.startp[0]);
}
else
{
// find the file name after the end of the match
for (p = incl_regmatch.endp[0];
*p && !vim_isfilec(*p); p++)
;
for (i = 0; vim_isfilec(p[i]); i++)
;
}
if (i == 0)
{
// Nothing found, use the rest of the line.
p = incl_regmatch.endp[0];
i = (int)STRLEN(p);
}
// Avoid checking before the start of the line, can
// happen if \zs appears in the regexp.
else if (p > line)
{
if (p[-1] == '"' || p[-1] == '<')
{
--p;
++i;
}
if (p[i] == '"' || p[i] == '>')
++i;
}
save_char = p[i];
p[i] = NUL;
msg_outtrans_attr(p, HL_ATTR(HLF_D));
p[i] = save_char;
}
if (new_fname == NULL && action == ACTION_SHOW_ALL)
{
if (already_searched)
msg_puts(_(" (Already listed)"));
else
msg_puts(_(" NOT FOUND"));
}
}
out_flush(); // output each line directly
}
if (new_fname != NULL)
{
// Push the new file onto the file stack
if (depth + 1 == old_files)
{
bigger = ALLOC_MULT(SearchedFile, max_path_depth * 2);
if (bigger != NULL)
{
for (i = 0; i <= depth; i++)
bigger[i] = files[i];
for (i = depth + 1; i < old_files + max_path_depth; i++)
{
bigger[i].fp = NULL;
bigger[i].name = NULL;
bigger[i].lnum = 0;
bigger[i].matched = FALSE;
}
for (i = old_files; i < max_path_depth; i++)
bigger[i + max_path_depth] = files[i];
old_files += max_path_depth;
max_path_depth *= 2;
vim_free(files);
files = bigger;
}
}
if ((files[depth + 1].fp = mch_fopen((char *)new_fname, "r"))
== NULL)
vim_free(new_fname);
else
{
if (++depth == old_files)
{
/*
* lalloc() for 'bigger' must have failed above. We
* will forget one of our already visited files now.
*/
vim_free(files[old_files].name);
++old_files;
}
files[depth].name = curr_fname = new_fname;
files[depth].lnum = 0;
files[depth].matched = FALSE;
if (action == ACTION_EXPAND)
{
msg_hist_off = TRUE; // reset in msg_trunc_attr()
vim_snprintf((char*)IObuff, IOSIZE,
_("Scanning included file: %s"),
(char *)new_fname);
msg_trunc_attr((char *)IObuff, TRUE, HL_ATTR(HLF_R));
}
else if (p_verbose >= 5)
{
verbose_enter();
smsg(_("Searching included file %s"),
(char *)new_fname);
verbose_leave();
}
}
}
}
else
{
/*
* Check if the line is a define (type == FIND_DEFINE)
*/
p = line;
search_line:
define_matched = FALSE;
if (def_regmatch.regprog != NULL
&& vim_regexec(&def_regmatch, line, (colnr_T)0))
{
/*
* Pattern must be first identifier after 'define', so skip
* to that position before checking for match of pattern. Also
* don't let it match beyond the end of this identifier.
*/
p = def_regmatch.endp[0];
while (*p && !vim_iswordc(*p))
p++;
define_matched = TRUE;
}
/*
* Look for a match. Don't do this if we are looking for a
* define and this line didn't match define_prog above.
*/
if (def_regmatch.regprog == NULL || define_matched)
{
if (define_matched || compl_status_sol())
{
// compare the first "len" chars from "ptr"
startp = skipwhite(p);
if (p_ic)
matched = !MB_STRNICMP(startp, ptr, len);
else
matched = !STRNCMP(startp, ptr, len);
if (matched && define_matched && whole
&& vim_iswordc(startp[len]))
matched = FALSE;
}
else if (regmatch.regprog != NULL
&& vim_regexec(®match, line, (colnr_T)(p - line)))
{
matched = TRUE;
startp = regmatch.startp[0];
/*
* Check if the line is not a comment line (unless we are
* looking for a define). A line starting with "# define"
* is not considered to be a comment line.
*/
if (!define_matched && skip_comments)
{
if ((*line != '#' ||
STRNCMP(skipwhite(line + 1), "define", 6) != 0)
&& get_leader_len(line, NULL, FALSE, TRUE))
matched = FALSE;
/*
* Also check for a "/ *" or "/ /" before the match.
* Skips lines like "int backwards; / * normal index
* * /" when looking for "normal".
* Note: Doesn't skip "/ *" in comments.
*/
p = skipwhite(line);
if (matched
|| (p[0] == '/' && p[1] == '*') || p[0] == '*')
for (p = line; *p && p < startp; ++p)
{
if (matched
&& p[0] == '/'
&& (p[1] == '*' || p[1] == '/'))
{
matched = FALSE;
// After "//" all text is comment
if (p[1] == '/')
break;
++p;
}
else if (!matched && p[0] == '*' && p[1] == '/')
{
// Can find match after "* /".
matched = TRUE;
++p;
}
}
}
}
}
}
if (matched)
{
if (action == ACTION_EXPAND)
{
int cont_s_ipos = FALSE;
int add_r;
char_u *aux;
if (depth == -1 && lnum == curwin->w_cursor.lnum)
break;
found = TRUE;
aux = p = startp;
if (compl_status_adding())
{
p += ins_compl_len();
if (vim_iswordp(p))
goto exit_matched;
p = find_word_start(p);
}
p = find_word_end(p);
i = (int)(p - aux);
if (compl_status_adding() && i == ins_compl_len())
{
// IOSIZE > compl_length, so the STRNCPY works
STRNCPY(IObuff, aux, i);
// Get the next line: when "depth" < 0 from the current
// buffer, otherwise from the included file. Jump to
// exit_matched when past the last line.
if (depth < 0)
{
if (lnum >= end_lnum)
goto exit_matched;
line = ml_get(++lnum);
}
else if (vim_fgets(line = file_line,
LSIZE, files[depth].fp))
goto exit_matched;
// we read a line, set "already" to check this "line" later
// if depth >= 0 we'll increase files[depth].lnum far
// below -- Acevedo
already = aux = p = skipwhite(line);
p = find_word_start(p);
p = find_word_end(p);
if (p > aux)
{
if (*aux != ')' && IObuff[i-1] != TAB)
{
if (IObuff[i-1] != ' ')
IObuff[i++] = ' ';
// IObuf =~ "\(\k\|\i\).* ", thus i >= 2
if (p_js
&& (IObuff[i-2] == '.'
|| (vim_strchr(p_cpo, CPO_JOINSP) == NULL
&& (IObuff[i-2] == '?'
|| IObuff[i-2] == '!'))))
IObuff[i++] = ' ';
}
// copy as much as possible of the new word
if (p - aux >= IOSIZE - i)
p = aux + IOSIZE - i - 1;
STRNCPY(IObuff + i, aux, p - aux);
i += (int)(p - aux);
cont_s_ipos = TRUE;
}
IObuff[i] = NUL;
aux = IObuff;
if (i == ins_compl_len())
goto exit_matched;
}
add_r = ins_compl_add_infercase(aux, i, p_ic,
curr_fname == curbuf->b_fname ? NULL : curr_fname,
dir, cont_s_ipos);
if (add_r == OK)
// if dir was BACKWARD then honor it just once
dir = FORWARD;
else if (add_r == FAIL)
break;
}
else if (action == ACTION_SHOW_ALL)
{
found = TRUE;
if (!did_show)
gotocmdline(TRUE); // cursor at status line
if (curr_fname != prev_fname)
{
if (did_show)
msg_putchar('\n'); // cursor below last one
if (!got_int) // don't display if 'q' typed
// at "--more--" message
msg_home_replace_hl(curr_fname);
prev_fname = curr_fname;
}
did_show = TRUE;
if (!got_int)
show_pat_in_path(line, type, TRUE, action,
(depth == -1) ? NULL : files[depth].fp,
(depth == -1) ? &lnum : &files[depth].lnum,
match_count++);
// Set matched flag for this file and all the ones that
// include it
for (i = 0; i <= depth; ++i)
files[i].matched = TRUE;
}
else if (--count <= 0)
{
found = TRUE;
if (depth == -1 && lnum == curwin->w_cursor.lnum
#if defined(FEAT_QUICKFIX)
&& g_do_tagpreview == 0
#endif
)
emsg(_(e_match_is_on_current_line));
else if (action == ACTION_SHOW)
{
show_pat_in_path(line, type, did_show, action,
(depth == -1) ? NULL : files[depth].fp,
(depth == -1) ? &lnum : &files[depth].lnum, 1L);
did_show = TRUE;
}
else
{
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
#if defined(FEAT_QUICKFIX)
// ":psearch" uses the preview window
if (g_do_tagpreview != 0)
{
curwin_save = curwin;
prepare_tagpreview(TRUE, TRUE, FALSE);
}
#endif
if (action == ACTION_SPLIT)
{
if (win_split(0, 0) == FAIL)
break;
RESET_BINDING(curwin);
}
if (depth == -1)
{
// match in current file
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
if (!win_valid(curwin_save))
break;
if (!GETFILE_SUCCESS(getfile(
curwin_save->w_buffer->b_fnum, NULL,
NULL, TRUE, lnum, FALSE)))
break; // failed to jump to file
}
else
#endif
setpcmark();
curwin->w_cursor.lnum = lnum;
check_cursor();
}
else
{
if (!GETFILE_SUCCESS(getfile(
0, files[depth].name, NULL, TRUE,
files[depth].lnum, FALSE)))
break; // failed to jump to file
// autocommands may have changed the lnum, we don't
// want that here
curwin->w_cursor.lnum = files[depth].lnum;
}
}
if (action != ACTION_SHOW)
{
curwin->w_cursor.col = (colnr_T)(startp - line);
curwin->w_set_curswant = TRUE;
}
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0
&& curwin != curwin_save && win_valid(curwin_save))
{
// Return cursor to where we were
validate_cursor();
redraw_later(VALID);
win_enter(curwin_save, TRUE);
}
# ifdef FEAT_PROP_POPUP
else if (WIN_IS_POPUP(curwin))
// can't keep focus in popup window
win_enter(firstwin, TRUE);
# endif
#endif
break;
}
exit_matched:
matched = FALSE;
// look for other matches in the rest of the line if we
// are not at the end of it already
if (def_regmatch.regprog == NULL
&& action == ACTION_EXPAND
&& !compl_status_sol()
&& *startp != NUL
&& *(p = startp + mb_ptr2len(startp)) != NUL)
goto search_line;
}
line_breakcheck();
if (action == ACTION_EXPAND)
ins_compl_check_keys(30, FALSE);
if (got_int || ins_compl_interrupted())
break;
/*
* Read the next line. When reading an included file and encountering
* end-of-file, close the file and continue in the file that included
* it.
*/
while (depth >= 0 && !already
&& vim_fgets(line = file_line, LSIZE, files[depth].fp))
{
fclose(files[depth].fp);
--old_files;
files[old_files].name = files[depth].name;
files[old_files].matched = files[depth].matched;
--depth;
curr_fname = (depth == -1) ? curbuf->b_fname
: files[depth].name;
if (depth < depth_displayed)
depth_displayed = depth;
}
if (depth >= 0) // we could read the line
{
files[depth].lnum++;
// Remove any CR and LF from the line.
i = (int)STRLEN(line);
if (i > 0 && line[i - 1] == '\n')
line[--i] = NUL;
if (i > 0 && line[i - 1] == '\r')
line[--i] = NUL;
}
else if (!already)
{
if (++lnum > end_lnum)
break;
line = ml_get(lnum);
}
already = NULL;
}
// End of big for (;;) loop.
// Close any files that are still open.
for (i = 0; i <= depth; i++)
{
fclose(files[i].fp);
vim_free(files[i].name);
}
for (i = old_files; i < max_path_depth; i++)
vim_free(files[i].name);
vim_free(files);
if (type == CHECK_PATH)
{
if (!did_show)
{
if (action != ACTION_SHOW_ALL)
msg(_("All included files were found"));
else
msg(_("No included files"));
}
}
else if (!found && action != ACTION_EXPAND)
{
if (got_int || ins_compl_interrupted())
emsg(_(e_interrupted));
else if (type == FIND_DEFINE)
emsg(_(e_couldnt_find_definition));
else
emsg(_(e_couldnt_find_pattern));
}
if (action == ACTION_SHOW || action == ACTION_SHOW_ALL)
msg_end();
fpip_end:
vim_free(file_line);
vim_regfree(regmatch.regprog);
vim_regfree(incl_regmatch.regprog);
vim_regfree(def_regmatch.regprog);
}
| null | null | 196,328
|
38231164732816384836004234817431625979
| 684
|
patch 8.2.5050: using freed memory when searching for pattern in path
Problem: Using freed memory when searching for pattern in path.
Solution: Make a copy of the line.
|
other
|
vim
|
44db8213d38c39877d2148eff6a72f4beccfb94e
| 1
|
yank_copy_line(struct block_def *bd, long y_idx, int exclude_trailing_space)
{
char_u *pnew;
if (exclude_trailing_space)
bd->endspaces = 0;
if ((pnew = alloc(bd->startspaces + bd->endspaces + bd->textlen + 1))
== NULL)
return FAIL;
y_current->y_array[y_idx] = pnew;
vim_memset(pnew, ' ', (size_t)bd->startspaces);
pnew += bd->startspaces;
mch_memmove(pnew, bd->textstart, (size_t)bd->textlen);
pnew += bd->textlen;
vim_memset(pnew, ' ', (size_t)bd->endspaces);
pnew += bd->endspaces;
if (exclude_trailing_space)
{
int s = bd->textlen + bd->endspaces;
while (VIM_ISWHITE(*(bd->textstart + s - 1)) && s > 0)
{
s = s - (*mb_head_off)(bd->textstart, bd->textstart + s - 1) - 1;
pnew--;
}
}
*pnew = NUL;
return OK;
}
| null | null | 196,578
|
246228854524410019724598079825181397279
| 29
|
patch 8.2.4219: reading before the start of the line
Problem: Reading before the start of the line.
Solution: Check boundary before trying to read the character.
|
other
|
tensorflow
|
4aacb30888638da75023e6601149415b39763d76
| 1
|
void DoCompute(OpKernelContext* c) {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
Tensor* params = v->tensor();
const Tensor& indices = c->input(1);
const Tensor& updates = c->input(2);
// Check that rank(updates.shape) = rank(indices.shape + params.shape[1:])
OP_REQUIRES(c,
updates.dims() == 0 ||
updates.dims() == indices.dims() + params->dims() - 1,
errors::InvalidArgument(
"Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got ",
"updates.shape ", updates.shape().DebugString(),
", indices.shape ", indices.shape().DebugString(),
", params.shape ", params->shape().DebugString()));
// Check that we have enough index space
const int64_t N_big = indices.NumElements();
OP_REQUIRES(
c, N_big <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("indices has too many elements for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", N_big, " > ",
std::numeric_limits<Index>::max()));
const Index N = static_cast<Index>(N_big);
OP_REQUIRES(
c, params->dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params->dim_size(0), " > ",
std::numeric_limits<Index>::max()));
if (N > 0) {
auto indices_flat = indices.flat<Index>();
auto params_flat = params->flat_outer_dims<T>();
if (TensorShapeUtils::IsScalar(updates.shape())) {
const auto update = updates.scalar<T>();
functor::ScatterScalarFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, update, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params->dim_size(0), ")"));
} else {
int64_t num_updates = updates.NumElements();
OP_REQUIRES(
c, TensorShapeUtils::StartsWith(updates.shape(), indices.shape()),
errors::InvalidArgument(
"The shape of indices (", indices.shape().DebugString(),
") must be a prefix of the shape of updates (",
updates.shape().DebugString(), ")"));
auto updates_flat = updates.shaped<T, 2>({N, num_updates / N});
functor::ScatterFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, updates_flat, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params->dim_size(0), ")"));
}
}
}
| null | null | 196,587
|
163644490547300222226168716636939380480
| 69
|
Disallow division by zero FPE in `tf.raw_ops.ResourceScatterDiv`
Had to update a test that was broken.
PiperOrigin-RevId: 388516976
Change-Id: Ic358e6bf0559e011539974d453fc7aa18b427e9c
|
other
|
ffjpeg
|
caade60a69633d74100bd3c2528bddee0b6a1291
| 1
|
int main(int argc, char *argv[])
{
void *jfif = NULL;
BMP bmp = {0};
if (argc < 3) {
printf(
"jfif test program\n"
"usage: ffjpeg -d filename decode jpg file to decode.bmp\n"
" ffjpeg -e filename encode bmp file to encode.jpg\n"
);
return 0;
}
if (strcmp(argv[1], "-d") == 0) {
jfif = jfif_load(argv[2]);
jfif_decode(jfif, &bmp);
jfif_free (jfif);
bmp_save(&bmp, "decode.bmp");
bmp_free(&bmp);
} else if (strcmp(argv[1], "-e") == 0) {
bmp_load(&bmp, argv[2]);
jfif = jfif_encode(&bmp);
bmp_free(&bmp);
jfif_save(jfif, "encode.jpg");
jfif_free(jfif);
}
return 0;
}
| null | null | 196,609
|
327590585403055995575159456174046768253
| 30
|
exit program when bmp file is oversize
|
other
|
booth
|
35bf0b7b048d715f671eb68974fb6b4af6528c67
| 1
|
static int setup_config(int type)
{
int rv;
rv = read_config(cl.configfile, type);
if (rv < 0)
goto out;
if (is_auth_req()) {
rv = read_authkey();
if (rv < 0)
goto out;
#if HAVE_LIBGCRYPT
if (!gcry_check_version(NULL)) {
log_error("gcry_check_version");
rv = -ENOENT;
goto out;
}
gcry_control(GCRYCTL_DISABLE_SECMEM, 0);
gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
#endif
}
/* Set "local" pointer, ignoring errors. */
if (cl.type == DAEMON && cl.site[0]) {
if (!find_site_by_name(cl.site, &local, 1)) {
log_error("Cannot find \"%s\" in the configuration.",
cl.site);
return -EINVAL;
}
local->local = 1;
} else
find_myself(NULL, type == CLIENT || type == GEOSTORE);
rv = check_config(type);
if (rv < 0)
goto out;
/* Per default the PID file name is derived from the
* configuration name. */
if (!cl.lockfile[0]) {
snprintf(cl.lockfile, sizeof(cl.lockfile)-1,
"%s/%s.pid", BOOTH_RUN_DIR, booth_conf->name);
}
out:
return rv;
}
| null | null | 196,611
|
55095981735811690584245498640396195697
| 50
|
Revert "Refactor: main: substitute is_auth_req macro"
This reverts commit da79b8ba28ad4837a0fee13e5f8fb6f89fe0e24c.
authfile != authkey
Signed-off-by: Jan Friesse <[email protected]>
|
other
|
tensorflow
|
20cb18724b0bf6c09071a3f53434c4eec53cc147
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const TensorShape& input_shape = input.shape();
const int32_t input_dims = input_shape.dims();
const Tensor& segment_id = context->input(1);
const TensorShape& segment_id_shape = segment_id.shape();
const int32_t segment_dims = segment_id_shape.dims();
const Tensor& num_segments_tensor = context->input(2);
OP_REQUIRES(context, num_segments_tensor.NumElements() != 0,
errors::InvalidArgument("Number of segments cannot be empty."));
auto num_segments = num_segments_tensor.scalar<NUM_SEGMENTS_TYPE>()();
OP_REQUIRES(context, num_segments > 0,
errors::InvalidArgument("Number of segments must be positive"));
OP_REQUIRES(context, segment_dims != 0,
errors::InvalidArgument("Segment_id cannot have rank 0"));
OP_REQUIRES(
context, segment_dims <= input_dims,
errors::OutOfRange("Invalid segment_id rank ", segment_dims,
" for input with ", input_dims, " dimension(s)"));
for (auto i = 0; i < segment_dims; i++) {
OP_REQUIRES(
context, segment_id_shape.dim_size(i) == input_shape.dim_size(i),
errors::InvalidArgument(
"Segment dimension is ", segment_id_shape.dim_size(i),
" while input dimension is ", input_dims, " in rank ", i));
}
// Making output tensor.
Tensor* output_tensor = nullptr;
TensorShape output_shape =
GetOutputShape(input_shape, segment_id_shape, num_segments);
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
// Preparating flat tensors.
auto output_flat = output_tensor->flat<tstring>();
auto flat_segment_id = segment_id.flat<INDICES_TYPE>();
auto flat_input = input.flat<tstring>();
for (int i = 0; i < flat_segment_id.size(); i++) {
OP_REQUIRES(
context,
((flat_segment_id(i) < num_segments) && (flat_segment_id(i) >= 0)),
errors::InvalidArgument(
"segment_ids are not allowed to exceed num_segments or"
" to have negative values."));
}
int64_t big_stride;
int64_t small_stride;
std::tie(big_stride, small_stride) =
GetStrides<INDICES_TYPE>(input_shape, segment_id_shape);
auto relative_offset_set =
GetFlattenedRelativeOffsets<INDICES_TYPE>(small_stride, big_stride);
for (auto start_offset = 0; start_offset < big_stride; start_offset++) {
for (auto i = 0; i < relative_offset_set.size(); i++) {
auto output_index = start_offset + flat_segment_id(i) * big_stride;
auto offset = start_offset + relative_offset_set[i];
if (output_flat(output_index).length() != 0)
output_flat(output_index).append(separator_.c_str());
output_flat(output_index).append(flat_input(offset));
}
}
}
| null | null | 196,620
|
110207759568301742016620853453621387570
| 68
|
Allow 0 for number of segments in `unsorted_segment_join_op.cc`
Related to the fix for #55305
PiperOrigin-RevId: 443157549
|
other
|
mruby
|
b1d0296a937fe278239bdfac840a3fd0e93b3ee9
| 1
|
mrb_remove_method(mrb_state *mrb, struct RClass *c, mrb_sym mid)
{
mt_tbl *h;
MRB_CLASS_ORIGIN(c);
h = c->mt;
if (h && mt_del(mrb, h, mid)) return;
mrb_name_error(mrb, mid, "method '%n' not defined in %C", mid, c);
}
| null | null | 196,621
|
331456770623121739023197011651271269672
| 10
|
class.c: clear method cache after `remove_method`.
|
other
|
seatd
|
7cffe0797fdb17a9c08922339465b1b187394335
| 1
|
int main(int argc, char *argv[]) {
(void)argc;
const char *usage = "Usage: seatd-launch [options] [--] command\n"
"\n"
" -h Show this help message\n"
" -s <path> Where to create the seatd socket\n"
" -v Show the version number\n"
"\n";
int c;
char *sockpath = NULL;
while ((c = getopt(argc, argv, "vhs:")) != -1) {
switch (c) {
case 's':
sockpath = optarg;
break;
case 'v':
printf("seatd-launch version %s\n", SEATD_VERSION);
return 0;
case 'h':
printf("%s", usage);
return 0;
case '?':
fprintf(stderr, "Try '%s -h' for more information.\n", argv[0]);
return 1;
default:
abort();
}
}
if (optind >= argc) {
fprintf(stderr, "A command must be specified\n\n%s", usage);
return 1;
}
char **command = &argv[optind];
char sockbuf[256];
if (sockpath == NULL) {
sprintf(sockbuf, "/tmp/seatd.%d.sock", getpid());
sockpath = sockbuf;
}
unlink(sockpath);
int fds[2];
if (pipe(fds) == -1) {
perror("Could not create pipe");
goto error;
}
pid_t seatd_child = fork();
if (seatd_child == -1) {
perror("Could not fork seatd process");
goto error;
} else if (seatd_child == 0) {
close(fds[0]);
char pipebuf[16] = {0};
snprintf(pipebuf, sizeof pipebuf, "%d", fds[1]);
char *env[2] = {NULL, NULL};
char loglevelbuf[32] = {0};
char *cur_loglevel = getenv("SEATD_LOGLEVEL");
if (cur_loglevel != NULL) {
snprintf(loglevelbuf, sizeof loglevelbuf, "SEATD_LOGLEVEL=%s", cur_loglevel);
env[0] = loglevelbuf;
}
char *command[] = {"seatd", "-n", pipebuf, "-s", sockpath, NULL};
execve(SEATD_INSTALLPATH, command, env);
perror("Could not start seatd");
_exit(1);
}
close(fds[1]);
// Wait for seatd to be ready
char buf[1] = {0};
while (true) {
pid_t p = waitpid(seatd_child, NULL, WNOHANG);
if (p == seatd_child) {
fprintf(stderr, "seatd exited prematurely\n");
goto error_seatd;
} else if (p == -1 && (errno != EINTR && errno != ECHILD)) {
perror("Could not wait for seatd process");
goto error_seatd;
}
struct pollfd fd = {
.fd = fds[0],
.events = POLLIN,
};
// We poll with timeout to avoid a racing on a blocking read
if (poll(&fd, 1, 1000) == -1) {
if (errno == EAGAIN || errno == EINTR) {
continue;
} else {
perror("Could not poll notification fd");
goto error_seatd;
}
}
if (fd.revents & POLLIN) {
ssize_t n = read(fds[0], buf, 1);
if (n == -1 && errno != EINTR) {
perror("Could not read from pipe");
goto error_seatd;
} else if (n > 0) {
break;
}
}
}
close(fds[0]);
uid_t uid = getuid();
gid_t gid = getgid();
// Restrict access to the socket to just us
if (chown(sockpath, uid, gid) == -1) {
perror("Could not chown seatd socket");
goto error_seatd;
}
if (chmod(sockpath, 0700) == -1) {
perror("Could not chmod socket");
goto error_seatd;
}
// Drop privileges
if (setgid(gid) == -1) {
perror("Could not set gid to drop privileges");
goto error_seatd;
}
if (setuid(uid) == -1) {
perror("Could not set uid to drop privileges");
goto error_seatd;
}
pid_t child = fork();
if (child == -1) {
perror("Could not fork target process");
goto error_seatd;
} else if (child == 0) {
setenv("SEATD_SOCK", sockpath, 1);
execvp(command[0], command);
perror("Could not start target");
_exit(1);
}
int status = 0;
while (true) {
pid_t p = waitpid(child, &status, 0);
if (p == child) {
break;
} else if (p == -1 && errno != EINTR) {
perror("Could not wait for target process");
goto error_seatd;
}
}
if (unlink(sockpath) != 0) {
perror("Could not unlink socket");
}
if (kill(seatd_child, SIGTERM) != 0) {
perror("Could not kill seatd");
}
if (WIFEXITED(status)) {
return WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
return 128 + WTERMSIG(status);
} else {
abort(); // unreachable
}
error_seatd:
unlink(sockpath);
kill(seatd_child, SIGTERM);
error:
return 1;
}
| null | null | 196,623
|
269031710364232709260337589812485265866
| 181
|
seatd-launch: Remove socket path command line arg
This should not need to be configured, so remove the argument. If
downstream prefers a different folder, the location can be made
compile-time configurable like for seatd itself.
|
other
|
tensorflow
|
579261dcd446385831fe4f7457d802a59685121d
| 1
|
void ComputeAsync(OpKernelContext* context, DoneCallback done) final {
const Tensor& input = context->input(0);
const Tensor& rhs = context->input(1);
const int ndims = input.dims();
const int64 n = input.dim_size(ndims - 1);
const int64 nrhs = rhs.dim_size(ndims - 1);
// Validate inputs.
OP_REQUIRES_ASYNC(
context, ndims >= 2,
errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
done);
OP_REQUIRES_ASYNC(context, rhs.dims() == ndims,
errors::InvalidArgument(
"Input and right-hand side must have same rank, got ",
ndims, " != ", rhs.dims()),
done);
OP_REQUIRES_ASYNC(
context, input.dim_size(ndims - 2) == n,
errors::InvalidArgument("Input matrices must be squares, got",
input.dim_size(ndims - 2), " != ", n),
done);
OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n,
errors::InvalidArgument(
"Input matrix and right-hand side must have the "
"same number of rows, got",
n, " != ", rhs.dim_size(ndims - 2)),
done);
// Allocate output.
Tensor* output;
OP_REQUIRES_OK_ASYNC(
context,
context->forward_input_or_allocate_output({1}, 0, rhs.shape(), &output),
done);
// To be consistent with the MatrixInverse op, we define the solution for
// an empty set of equations as the empty matrix.
if (input.NumElements() == 0 || rhs.NumElements() == 0) {
done();
return;
}
// TODO(rmlarsen): Convert to std::make_unique when available.
std::unique_ptr<CudaSolver> solver(new CudaSolver(context));
// Make a copy of the input for the factorization step, or, if adjoint_ is
// false, try to reuse the input buffer if this op owns it exclusively.
Tensor input_copy;
const GPUDevice& device = context->eigen_device<GPUDevice>();
if (adjoint_) {
// For the adjoint case, it is simpler to always make a transposed copy up
// front.
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value,
input.shape(), &input_copy),
done);
OP_REQUIRES_OK_ASYNC(context,
DoMatrixTranspose(device, input, &input_copy), done);
} else {
OP_REQUIRES_OK_ASYNC(
context,
solver->forward_input_or_allocate_scoped_tensor(
{0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy),
done);
if (!input.SharesBufferWith(input_copy)) {
device.memcpy(input_copy.flat<Scalar>().data(),
input.flat<Scalar>().data(),
input.NumElements() * sizeof(Scalar));
}
}
auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>();
const int64 batch_size = input_copy_reshaped.dimension(0);
// Allocate pivots on the device.
Tensor pivots;
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<int>::value,
TensorShape{batch_size, n}, &pivots),
done);
auto pivots_mat = pivots.template matrix<int>();
// 1. Compute the partially pivoted LU factorization(s) of the
// matrix/matrices.
std::vector<DeviceLapackInfo> dev_info;
auto input_copy_ptrs = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "input_copt_ptrs",
/* on_host */ true);
const int kMaxMatrixSizeToBatchSizeRatio = 128;
const bool use_batched_solver =
n <= kMaxMatrixSizeToBatchSizeRatio * batch_size;
if (use_batched_solver) {
// For small matrices or large batch sizes, we use the batched interface
// from cuBlas.
const Scalar** input_copy_ptrs_base =
reinterpret_cast<const Scalar**>(input_copy_ptrs.mutable_data());
for (int batch = 0; batch < batch_size; ++batch) {
input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0);
}
dev_info.push_back(
solver->GetDeviceLapackInfo(batch_size, "getrfBatched"));
OP_REQUIRES_OK_ASYNC(
context,
solver->GetrfBatched(n, input_copy_ptrs_base, n, pivots_mat.data(),
&dev_info.back(), batch_size),
done);
} else {
// For small batch sizes or large matrices, we use the non-batched
// interface from cuSolver, which is much faster for large matrices.
dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrf"));
for (int batch = 0; batch < batch_size; ++batch) {
OP_REQUIRES_OK_ASYNC(
context,
solver->Getrf(n, n, &input_copy_reshaped(batch, 0, 0), n,
&pivots_mat(batch, 0), &dev_info.back()(batch)),
done);
}
}
// 2. Make a transposed copy of the right-hand sides. This is necessary
// because cuBLAS assumes column-major storage while TensorFlow TF uses
// row-major.
TensorShape transposed_rhs_shape(rhs.shape());
transposed_rhs_shape.RemoveLastDims(2);
transposed_rhs_shape.AddDim(nrhs);
transposed_rhs_shape.AddDim(n);
Tensor transposed_rhs;
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value,
transposed_rhs_shape, &transposed_rhs),
done);
if (nrhs > 1) {
OP_REQUIRES_OK_ASYNC(
context, DoMatrixTranspose(device, rhs, &transposed_rhs), done);
} else {
device.memcpy(transposed_rhs.flat<Scalar>().data(),
rhs.flat<Scalar>().data(),
rhs.NumElements() * sizeof(Scalar));
}
// 3. Solve op(A) X = B (in column major form).
// We use a trick here: If adjoint_ is true, we converted A to column major
// form above. If adjoint is false then I leave A in row-major form and use
// trans_a = CUBLAS_OP_T to effectively transform it to column-major on the
// fly. (This means that we actually use the LU-factorization of A^T in that
// case, but that is equally good for solving AX=B). This way we save an
// explicit transpose in the more common case of adjoint_ == false.
auto input_copy_ptr_array = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "input_copy_ptr_array",
/* on_host */ true);
auto transposed_rhs_ptr_array = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "transposed_rhs_ptr_array",
/* on_host */ true);
auto transposed_rhs_reshaped =
transposed_rhs.template flat_inner_dims<Scalar, 3>();
if (use_batched_solver) {
const Scalar** input_copy_ptrs_base =
reinterpret_cast<const Scalar**>(input_copy_ptr_array.mutable_data());
const Scalar** transposed_rhs_ptrs_base =
reinterpret_cast<const Scalar**>(
transposed_rhs_ptr_array.mutable_data());
for (int batch = 0; batch < batch_size; ++batch) {
input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0);
transposed_rhs_ptrs_base[batch] = &transposed_rhs_reshaped(batch, 0, 0);
}
int host_info = 0;
OP_REQUIRES_OK_ASYNC(
context,
solver->GetrsBatched(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs,
input_copy_ptrs_base, n, pivots_mat.data(),
transposed_rhs_ptrs_base, n, &host_info,
batch_size),
done);
OP_REQUIRES_ASYNC(
context, host_info == 0,
errors::InvalidArgument("The ", -host_info,
"'th argument to cublas*getrsBatched had "
"an illegal value."),
done);
} else {
dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrs"));
for (int batch = 0; batch < batch_size; ++batch) {
OP_REQUIRES_OK_ASYNC(
context,
solver->Getrs(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs,
&input_copy_reshaped(batch, 0, 0), n,
&pivots_mat(batch, 0),
&transposed_rhs_reshaped(batch, 0, 0), n,
&dev_info.back()(batch)),
done);
}
}
// 4. Transpose X to get the final result in row-major form.
if (nrhs > 1) {
OP_REQUIRES_OK_ASYNC(
context, DoMatrixTranspose(device, transposed_rhs, output), done);
} else {
device.memcpy(output->flat<Scalar>().data(),
transposed_rhs.flat<Scalar>().data(),
transposed_rhs.NumElements() * sizeof(Scalar));
}
// Callback for checking info after kernels finish. Also capture the
// temporary Tensors/ScratchSpace so they don't get deallocated before the
// kernels run. TODO(rmlarsen): Use move capture once C++14 becomes
// available.
auto info_checker = [context, done, dev_info](
const Status& status,
const std::vector<HostLapackInfo>& host_infos) {
if (!status.ok() && errors::IsInvalidArgument(status) &&
!host_infos.empty()) {
for (int i = 0; i < host_infos[0].size(); ++i) {
// Match the CPU error message for singular matrices. Otherwise
// just print the original error message from the status below.
OP_REQUIRES_ASYNC(context, host_infos[0].data()[i] <= 0,
errors::InvalidArgument(kErrMsg), done);
}
}
OP_REQUIRES_OK_ASYNC(context, status, done);
done();
};
CudaSolver::CheckLapackInfoAndDeleteSolverAsync(std::move(solver), dev_info,
std::move(info_checker));
}
| null | null | 196,629
|
2151236193610516009407418019752947503
| 227
|
Fix crash in MatrixSolve when inputs have different batch dimensions.
Before, the process would crash or certain elements would be silently ignored. Now an InvalidArgument is raised.
PiperOrigin-RevId: 384844020
Change-Id: Iba44417e383bdd0e1abc4012bfca83b2377dd335
|
other
|
minetest
|
da71e86633d0b27cd02d7aac9fdac625d141ca13
| 1
|
static inline int checkSettingSecurity(lua_State* L, const std::string &name)
{
if (ScriptApiSecurity::isSecure(L) && name.compare(0, 7, "secure.") == 0)
throw LuaError("Attempt to set secure setting.");
bool is_mainmenu = false;
#ifndef SERVER
is_mainmenu = ModApiBase::getGuiEngine(L) != nullptr;
#endif
if (!is_mainmenu && (name == "mg_name" || name == "mg_flags")) {
errorstream << "Tried to set global setting " << name << ", ignoring. "
"minetest.set_mapgen_setting() should be used instead." << std::endl;
infostream << script_get_backtrace(L) << std::endl;
return -1;
}
return 0;
}
| null | null | 196,670
|
14542184638408056621499358845757545617
| 18
|
Protect a few more settings from being set from mods
Of those settings main_menu_script has concrete security impact, the rest are added out of abundance of caution.
|
other
|
bareos
|
abe462037388635193f3b5b71575f32596c3b69d
| 1
|
static int DoPamAuth(struct pam_handle* pamh,
const char* username,
std::string& authenticated_username)
{
int err = pam_set_item(pamh, PAM_RUSER, username);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM set_item failed: %s\n", pam_strerror(pamh, err));
return err;
}
err = pam_authenticate(pamh, 0);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM authentication failed: %s\n",
pam_strerror(pamh, err));
return err;
}
#if defined(__sun)
void* data;
#else
const void* data;
#endif
err = pam_get_item(pamh, PAM_USER, &data);
if (err != PAM_SUCCESS) {
Dmsg1(debuglevel, "PAM get_item failed: %s\n", pam_strerror(pamh, err));
return err;
} else {
if (data) { authenticated_username = static_cast<const char*>(data); }
}
return err;
}
| null | null | 196,682
|
303404528456224642824010820660072442794
| 31
|
dir: check account authorization during PAM login
Fixes CVE-2022-24755
Previously, when a user logged in via PAM, Bareos did only check for
authentication (i.e. the "auth" section in PAM). No authorization checks
were made (the "account" section in PAM). This patch now adds the proper
check.
This will break existing PAM configuration!
|
other
|
shim
|
159151b6649008793d6204a34d7b9c41221fb4b0
| 1
|
EFI_STATUS verify_image(void *data, unsigned int datasize,
EFI_LOADED_IMAGE *li,
PE_COFF_LOADER_IMAGE_CONTEXT *context)
{
EFI_STATUS efi_status;
UINT8 sha1hash[SHA1_DIGEST_SIZE];
UINT8 sha256hash[SHA256_DIGEST_SIZE];
/*
* The binary header contains relevant context and section pointers
*/
efi_status = read_header(data, datasize, context);
if (EFI_ERROR(efi_status)) {
perror(L"Failed to read header: %r\n", efi_status);
return efi_status;
}
/*
* We only need to verify the binary if we're in secure mode
*/
efi_status = generate_hash(data, datasize, context, sha256hash,
sha1hash);
if (EFI_ERROR(efi_status))
return efi_status;
/* Measure the binary into the TPM */
#ifdef REQUIRE_TPM
efi_status =
#endif
tpm_log_pe((EFI_PHYSICAL_ADDRESS)(UINTN)data, datasize,
(EFI_PHYSICAL_ADDRESS)(UINTN)context->ImageAddress,
li->FilePath, sha1hash, 4);
#ifdef REQUIRE_TPM
if (efi_status != EFI_SUCCESS) {
return efi_status;
}
#endif
if (secure_mode()) {
efi_status = verify_buffer(data, datasize,
context, sha256hash, sha1hash);
if (EFI_ERROR(efi_status)) {
if (verbose)
console_print(L"Verification failed: %r\n", efi_status);
else
console_error(L"Verification failed", efi_status);
return efi_status;
} else if (verbose)
console_print(L"Verification succeeded\n");
}
return EFI_SUCCESS;
}
| null | null | 196,687
|
213147111462734701312298267958315465833
| 53
|
Also avoid CVE-2022-28737 in verify_image()
PR 446 ("Add verify_image") duplicates some of the code affected by
Chris Coulson's defense in depth patch against CVE-2022-28737 ("pe:
Perform image verification earlier when loading grub").
This patch makes the same change to the new function.
Signed-off-by: Peter Jones <[email protected]>
|
other
|
tensorflow
|
cebe3c45d76357d201c65bdbbf0dbe6e8a63bbdb
| 1
|
void Compute(OpKernelContext* ctx) override {
Buffer* buf = nullptr;
OP_REQUIRES_OK(ctx, GetBuffer(ctx, def(), &buf));
core::ScopedUnref scope(buf);
Buffer::Tuple tuple;
std::size_t index = ctx->input(0).scalar<int>()();
OP_REQUIRES_OK(ctx, buf->Peek(index, &tuple));
OP_REQUIRES(
ctx, tuple.size() == (size_t)ctx->num_outputs(),
errors::InvalidArgument("Mismatch stage/unstage: ", tuple.size(),
" vs. ", ctx->num_outputs()));
for (size_t i = 0; i < tuple.size(); ++i) {
ctx->set_output(i, tuple[i]);
}
}
| null | null | 196,689
|
139304201094829090604286879553789504495
| 19
|
Fix tf.raw_ops.StagePeek vulnerability with invalid `index`.
Check that input is actually a scalar before treating it as such.
PiperOrigin-RevId: 445524908
|
other
|
gpac
|
71460d72ec07df766dab0a4d52687529f3efcf0a
| 1
|
static GF_Err isoffin_process(GF_Filter *filter)
{
ISOMReader *read = gf_filter_get_udta(filter);
u32 i, count = gf_list_count(read->channels);
Bool is_active = GF_FALSE;
Bool in_is_eos = GF_FALSE;
Bool check_forced_end = GF_FALSE;
Bool has_new_data = GF_FALSE;
u64 min_offset_plus_one = 0;
u32 nb_forced_end=0;
if (read->in_error)
return read->in_error;
if (read->pid) {
Bool fetch_input = GF_TRUE;
//we failed at loading the init segment during a dash switch, retry
if (!read->is_partial_download && !read->mem_load_mode && (read->moov_not_loaded==2) ) {
isoffin_configure_pid(filter, read->pid, GF_FALSE);
if (read->moov_not_loaded) return GF_OK;
}
if (read->mem_load_mode==2) {
if (!read->force_fetch && read->mem_blob.size > read->mstore_size) {
fetch_input = GF_FALSE;
}
read->force_fetch = GF_FALSE;
}
while (fetch_input) {
GF_FilterPacket *pck = gf_filter_pid_get_packet(read->pid);
if (!pck) {
//we issued a seek, wait for the first packet to be received before fetching channels
//otherwise we could end up reading from the wrong cache
if (read->wait_for_source) {
//something went wrong during the seek request
if (gf_filter_pid_is_eos(read->pid))
return GF_EOS;
return GF_OK;
}
break;
}
read->wait_for_source = GF_FALSE;
if (read->mem_load_mode) {
u32 data_size;
const u8 *pck_data = gf_filter_pck_get_data(pck, &data_size);
isoffin_push_buffer(filter, read, pck_data, data_size);
}
//we just had a switch but init seg is not completely done: input packet is only a part of the init, drop it
else if (read->moov_not_loaded==2) {
gf_filter_pid_drop_packet(read->pid);
return GF_OK;
}
gf_filter_pid_drop_packet(read->pid);
has_new_data = GF_TRUE;
if (read->in_error)
return read->in_error;
}
if (gf_filter_pid_is_eos(read->pid)) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
}
if (read->input_is_stop) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
read->input_is_stop = GF_FALSE;
}
if (!read->frag_type && read->input_loaded) {
in_is_eos = GF_TRUE;
}
//segment is invalid, wait for eos on input an send eos on all channels
if (read->invalid_segment) {
if (!in_is_eos) return GF_OK;
read->invalid_segment = GF_FALSE;
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
if (!ch->playing) {
continue;
}
if (!ch->eos_sent) {
ch->eos_sent = GF_TRUE;
gf_filter_pid_set_eos(ch->pid);
}
}
read->eos_signaled = GF_TRUE;
return GF_EOS;
}
} else if (read->extern_mov) {
in_is_eos = GF_TRUE;
read->input_loaded = GF_TRUE;
}
if (read->moov_not_loaded==1) {
if (read->mem_load_mode)
return GF_OK;
read->moov_not_loaded = GF_FALSE;
return isoffin_setup(filter, read);
}
if (read->refresh_fragmented) {
const GF_PropertyValue *prop;
if (in_is_eos) {
read->refresh_fragmented = GF_FALSE;
} else {
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILE_CACHED);
if (prop && prop->value.boolean)
read->refresh_fragmented = GF_FALSE;
}
if (has_new_data) {
u64 bytesMissing=0;
GF_Err e;
const char *new_url = NULL;
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILEPATH);
if (prop) new_url = prop->value.string;
e = gf_isom_refresh_fragmented(read->mov, &bytesMissing, new_url);
if (e && (e!= GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[IsoMedia] Failed to refresh current segment: %s\n", gf_error_to_string(e) ));
read->refresh_fragmented = GF_FALSE;
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] Refreshing current segment at UTC "LLU" - "LLU" bytes still missing - input is EOS %d\n", gf_net_get_utc(), bytesMissing, in_is_eos));
}
if (!read->refresh_fragmented && (e==GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[IsoMedia] Incomplete Segment received - "LLU" bytes missing but EOF found\n", bytesMissing ));
}
#ifndef GPAC_DISABLE_LOG
if (gf_log_tool_level_on(GF_LOG_DASH, GF_LOG_DEBUG)) {
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] refresh track %d fragment - cur sample %d - new sample count %d\n", ch->track, ch->sample_num, gf_isom_get_sample_count(ch->owner->mov, ch->track) ));
}
}
#endif
isor_check_producer_ref_time(read);
if (!read->frag_type)
read->refresh_fragmented = GF_FALSE;
}
}
for (i=0; i<count; i++) {
u8 *data;
u32 nb_pck=50;
ISOMChannel *ch;
ch = gf_list_get(read->channels, i);
if (!ch->playing) {
nb_forced_end++;
continue;
}
//eos not sent on this channel, we are active
if (!ch->eos_sent)
is_active = GF_TRUE;
while (nb_pck) {
ch->sample_data_offset = 0;
if (!read->full_segment_flush && gf_filter_pid_would_block(ch->pid) )
break;
if (ch->item_id) {
isor_reader_get_sample_from_item(ch);
} else {
isor_reader_get_sample(ch);
}
if (read->stsd && (ch->last_sample_desc_index != read->stsd) && ch->sample) {
isor_reader_release_sample(ch);
continue;
}
if (ch->sample) {
u32 sample_dur;
u8 dep_flags;
u8 *subs_buf;
u32 subs_buf_size;
GF_FilterPacket *pck;
if (ch->needs_pid_reconfig) {
isor_update_channel_config(ch);
ch->needs_pid_reconfig = GF_FALSE;
}
//we have at least two samples, update GF_PROP_PID_HAS_SYNC if needed
if (ch->check_has_rap && (gf_isom_get_sample_count(ch->owner->mov, ch->track)>1) && (gf_isom_has_sync_points(ch->owner->mov, ch->track)==1)) {
ch->check_has_rap = GF_FALSE;
ch->has_rap = GF_TRUE;
gf_filter_pid_set_property(ch->pid, GF_PROP_PID_HAS_SYNC, &PROP_BOOL(ch->has_rap) );
}
//strip param sets from payload, trigger reconfig if needed
isor_reader_check_config(ch);
if (read->nodata) {
pck = gf_filter_pck_new_shared(ch->pid, NULL, ch->sample->dataLength, NULL);
if (!pck) return GF_OUT_OF_MEM;
} else {
pck = gf_filter_pck_new_alloc(ch->pid, ch->sample->dataLength, &data);
if (!pck) return GF_OUT_OF_MEM;
memcpy(data, ch->sample->data, ch->sample->dataLength);
}
gf_filter_pck_set_dts(pck, ch->dts);
gf_filter_pck_set_cts(pck, ch->cts);
if (ch->sample->IsRAP==-1) {
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_1);
ch->redundant = 1;
} else {
gf_filter_pck_set_sap(pck, (GF_FilterSAPType) ch->sample->IsRAP);
}
if (ch->sap_3)
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_3);
else if (ch->sap_4_type) {
gf_filter_pck_set_sap(pck, (ch->sap_4_type==GF_ISOM_SAMPLE_PREROLL) ? GF_FILTER_SAP_4_PROL : GF_FILTER_SAP_4);
gf_filter_pck_set_roll_info(pck, ch->roll);
}
sample_dur = ch->au_duration;
if (ch->sample->nb_pack)
sample_dur *= ch->sample->nb_pack;
gf_filter_pck_set_duration(pck, sample_dur);
gf_filter_pck_set_seek_flag(pck, ch->seek_flag);
//for now we only signal xPS mask for non-sap
if (ch->xps_mask && !gf_filter_pck_get_sap(pck) ) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_XPS_MASK, &PROP_UINT(ch->xps_mask) );
}
dep_flags = ch->isLeading;
dep_flags <<= 2;
dep_flags |= ch->dependsOn;
dep_flags <<= 2;
dep_flags |= ch->dependedOn;
dep_flags <<= 2;
dep_flags |= ch->redundant;
if (dep_flags)
gf_filter_pck_set_dependency_flags(pck, dep_flags);
gf_filter_pck_set_crypt_flags(pck, ch->pck_encrypted ? GF_FILTER_PCK_CRYPT : 0);
gf_filter_pck_set_seq_num(pck, ch->sample_num);
subs_buf = gf_isom_sample_get_subsamples_buffer(read->mov, ch->track, ch->sample_num, &subs_buf_size);
if (subs_buf) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SUBS, &PROP_DATA_NO_COPY(subs_buf, subs_buf_size) );
}
if (ch->sai_buffer && ch->pck_encrypted) {
assert(ch->sai_buffer_size);
gf_filter_pck_set_property(pck, GF_PROP_PCK_CENC_SAI, &PROP_DATA(ch->sai_buffer, ch->sai_buffer_size) );
}
if (read->sigfrag) {
GF_ISOFragmentBoundaryInfo finfo;
if (gf_isom_sample_is_fragment_start(read->mov, ch->track, ch->sample_num, &finfo) ) {
u64 start=0;
u32 traf_start = finfo.seg_start_plus_one ? 2 : 1;
if (finfo.seg_start_plus_one)
gf_filter_pck_set_property(pck, GF_PROP_PCK_CUE_START, &PROP_BOOL(GF_TRUE));
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_START, &PROP_UINT(traf_start));
start = finfo.frag_start;
if (finfo.seg_start_plus_one) start = finfo.seg_start_plus_one-1;
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_RANGE, &PROP_FRAC64_INT(start, finfo.mdat_end));
if (finfo.moof_template) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_MOOF_TEMPLATE, &PROP_DATA((u8 *)finfo.moof_template, finfo.moof_template_size));
}
if (finfo.sidx_end) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SIDX_RANGE, &PROP_FRAC64_INT(finfo.sidx_start , finfo.sidx_end));
}
if (read->seg_name_changed) {
const GF_PropertyValue *p = gf_filter_pid_get_property(read->pid, GF_PROP_PID_URL);
read->seg_name_changed = GF_FALSE;
if (p && p->value.string) {
gf_filter_pck_set_property(pck, GF_PROP_PID_URL, &PROP_STRING(p->value.string));
}
}
}
}
if (ch->sender_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SENDER_NTP, &PROP_LONGUINT(ch->sender_ntp));
if (ch->ntp_at_server_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_RECEIVER_NTP, &PROP_LONGUINT(ch->ntp_at_server_ntp));
}
}
ch->eos_sent = GF_FALSE;
//this might not be the true end of stream
if ((ch->streamType==GF_STREAM_AUDIO) && (ch->sample_num == gf_isom_get_sample_count(read->mov, ch->track))) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_END_RANGE, &PROP_BOOL(GF_TRUE));
}
gf_filter_pck_send(pck);
isor_reader_release_sample(ch);
ch->last_valid_sample_data_offset = ch->sample_data_offset;
nb_pck--;
} else if (ch->last_state==GF_EOS) {
if (ch->playing == 2) {
if (in_is_eos) {
ch->playing = GF_FALSE;
} else {
nb_forced_end++;
check_forced_end = GF_TRUE;
}
}
if (in_is_eos && !ch->eos_sent) {
void *tfrf;
const void *gf_isom_get_tfrf(GF_ISOFile *movie, u32 trackNumber);
ch->eos_sent = GF_TRUE;
read->eos_signaled = GF_TRUE;
tfrf = (void *) gf_isom_get_tfrf(read->mov, ch->track);
if (tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", &PROP_POINTER(tfrf) );
ch->last_has_tfrf = GF_TRUE;
} else if (ch->last_has_tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", NULL);
ch->last_has_tfrf = GF_FALSE;
}
gf_filter_pid_set_eos(ch->pid);
}
break;
} else {
read->force_fetch = GF_TRUE;
break;
}
}
if (!min_offset_plus_one || (min_offset_plus_one - 1 > ch->last_valid_sample_data_offset))
min_offset_plus_one = 1 + ch->last_valid_sample_data_offset;
}
if (read->mem_load_mode && min_offset_plus_one) {
isoffin_purge_mem(read, min_offset_plus_one-1);
}
//we reached end of playback due to play range request, we must send eos - however for safety reason with DASH, we first need to cancel the input
if (read->pid && check_forced_end && (nb_forced_end==count)) {
//abort input
GF_FilterEvent evt;
GF_FEVT_INIT(evt, GF_FEVT_STOP, read->pid);
gf_filter_pid_send_event(read->pid, &evt);
}
if (!is_active) {
return GF_EOS;
}
//if (in_is_eos)
// gf_filter_ask_rt_reschedule(filter, 1);
return GF_OK;
}
| null | null | 196,691
|
15984395158403865840168206053223917598
| 358
|
fixed #1876
|
other
|
wireshark
|
41bfc9112480c3d83331ed93470c7f675a9d5b1a
| 1
|
proto_register_sysdig_event(void)
{
/* XXX Match up with Sysdig's names. */
static hf_register_info hf[] = {
{ &hf_se_cpu_id,
{ "CPU ID", "sysdig.cpu_id",
FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_thread_id,
{ "Thread ID", "sysdig.thread_id",
FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_event_length,
{ "Event length", "sysdig.event_len",
FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_nparams,
{ "Number of parameters", "sysdig.nparams",
FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_event_type,
{ "Event type", "sysdig.event_type",
FT_UINT16, BASE_DEC, VALS(event_type_vals), 0, NULL, HFILL }
},
{ &hf_se_param_lens,
{ "Parameter lengths", "sysdig.param.lens",
FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL }
},
{ &hf_se_param_len,
{ "Parameter length", "sysdig.param.len",
FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }
},
/* Header field registration. Automatically generated by tools/generate-sysdig-event.py */
{ &hf_param_ID_bytes, { "ID", "sysdig.param.syscall.ID", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_action_uint32, { "action", "sysdig.param.cpu_hotplug.action", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_addr_bytes, { "addr", "sysdig.param.ptrace.addr", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_addr_uint64, { "addr", "sysdig.param.page_fault.addr", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_args_string, { "Program arguments", "sysdig.param.execve.args", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_argument_uint64, { "I/O control: argument", "sysdig.param.ioctl.argument", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_backlog_uint32, { "backlog", "sysdig.param.listen.backlog", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cgroups_bytes, { "cgroups", "sysdig.param.execve.cgroups", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_clockid_uint8, { "clockid", "sysdig.param.timerfd_create.clockid", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cmd_bytes, { "cmd", "sysdig.param.semctl.cmd", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_cmd_int64, { "cmd", "sysdig.param.bpf.cmd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_comm_string, { "Command", "sysdig.param.execve.comm", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_sys_uint64, { "cpu_sys", "sysdig.param.procinfo.cpu_sys", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_uint32, { "cpu", "sysdig.param.cpu_hotplug.cpu", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_usr_uint64, { "cpu_usr", "sysdig.param.procinfo.cpu_usr", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cur_int64, { "cur", "sysdig.param.setrlimit.cur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cwd_string, { "Current working directory", "sysdig.param.execve.cwd", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_data_bytes, { "data", "sysdig.param.ptrace.data", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_desc_string, { "desc", "sysdig.param.notification.desc", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_description_string, { "description", "sysdig.param.infra.description", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dev_string, { "dev", "sysdig.param.mount.dev", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dev_uint32, { "dev", "sysdig.param.openat.dev", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_dir_string, { "dir", "sysdig.param.mount.dir", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dirfd_int64, { "dirfd", "sysdig.param.openat.dirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_domain_bytes, { "domain", "sysdig.param.socketpair.domain", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dpid_bytes, { "dpid", "sysdig.param.signaldeliver.dpid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_bhardlimit_uint64, { "dqb_bhardlimit", "sysdig.param.quotactl.dqb_bhardlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_bsoftlimit_uint64, { "dqb_bsoftlimit", "sysdig.param.quotactl.dqb_bsoftlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_btime_bytes, { "dqb_btime", "sysdig.param.quotactl.dqb_btime", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_curspace_uint64, { "dqb_curspace", "sysdig.param.quotactl.dqb_curspace", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_ihardlimit_uint64, { "dqb_ihardlimit", "sysdig.param.quotactl.dqb_ihardlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_isoftlimit_uint64, { "dqb_isoftlimit", "sysdig.param.quotactl.dqb_isoftlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_itime_bytes, { "dqb_itime", "sysdig.param.quotactl.dqb_itime", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_bgrace_bytes, { "dqi_bgrace", "sysdig.param.quotactl.dqi_bgrace", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_flags_bytes, { "dqi_flags", "sysdig.param.quotactl.dqi_flags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_igrace_bytes, { "dqi_igrace", "sysdig.param.quotactl.dqi_igrace", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_egid_bytes, { "egid", "sysdig.param.getresgid.egid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_env_string, { "env", "sysdig.param.execve.env", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_error_bytes, { "error", "sysdig.param.page_fault.error", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_euid_bytes, { "euid", "sysdig.param.getresuid.euid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_event_data_uint64, { "event_data", "sysdig.param.sysdigevent.event_data", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_event_type_uint32, { "event_type", "sysdig.param.sysdigevent.event_type", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_exe_string, { "exe", "sysdig.param.execve.exe", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_fd1_int64, { "fd1", "sysdig.param.pipe.fd1", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd2_int64, { "fd2", "sysdig.param.pipe.fd2", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_in_int64, { "fd_in", "sysdig.param.splice.fd_in", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_int64, { "fd", "sysdig.param.openat.fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_out_int64, { "fd_out", "sysdig.param.splice.fd_out", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fdlimit_int64, { "fdlimit", "sysdig.param.vfork.fdlimit", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fdlimit_uint64, { "fdlimit", "sysdig.param.execve.fdlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fds_bytes, { "fds", "sysdig.param.ppoll.fds", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_filename_string, { "filename", "sysdig.param.execve.filename", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_flags_bytes, { "flags", "sysdig.param.linkat.flags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_flags_uint32, { "flags", "sysdig.param.accept.flags", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_gid_bytes, { "gid", "sysdig.param.getgid.gid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_gid_uint32, { "gid", "sysdig.param.vfork.gid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_how_bytes, { "how", "sysdig.param.shutdown.how", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_id_int64, { "id", "sysdig.param.tracer.id", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_id_string, { "id", "sysdig.param.notification.id", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_id_uint32, { "id", "sysdig.param.quotactl.id", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_image_string, { "image", "sysdig.param.container.image", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_in_fd_int64, { "in_fd", "sysdig.param.sendfile.in_fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_initval_uint64, { "initval", "sysdig.param.eventfd.initval", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_ino_uint64, { "ino", "sysdig.param.pipe.ino", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_interval_bytes, { "interval", "sysdig.param.nanosleep.interval", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ip_uint64, { "ip", "sysdig.param.page_fault.ip", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_json_string, { "json", "sysdig.param.container.json", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_key_int32, { "key", "sysdig.param.semget.key", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_length_uint64, { "length", "sysdig.param.munmap.length", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_level_bytes, { "level", "sysdig.param.getsockopt.level", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_linkdirfd_int64, { "linkdirfd", "sysdig.param.symlinkat.linkdirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_linkpath_string, { "linkpath", "sysdig.param.symlinkat.linkpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_loginuid_int32, { "loginuid", "sysdig.param.execve.loginuid", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_mask_uint32, { "mask", "sysdig.param.signalfd.mask", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_max_int64, { "max", "sysdig.param.setrlimit.max", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_maxevents_bytes, { "maxevents", "sysdig.param.epoll_wait.maxevents", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_mode_bytes, { "mode", "sysdig.param.access.mode", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_mode_uint32, { "mode", "sysdig.param.openat.mode", FT_UINT32, BASE_OCT, NULL, 0, NULL, HFILL } },
{ &hf_param_name_string, { "name", "sysdig.param.openat.name", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_nativeID_uint16, { "nativeID", "sysdig.param.syscall.nativeID", FT_UINT16, BASE_DEC, VALS(nativeID_uint16_vals), 0, NULL, HFILL } },
{ &hf_param_newcur_int64, { "newcur", "sysdig.param.prlimit.newcur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newdir_int64, { "newdir", "sysdig.param.linkat.newdir", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newdirfd_int64, { "newdirfd", "sysdig.param.renameat.newdirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newmax_int64, { "newmax", "sysdig.param.prlimit.newmax", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newpath_string, { "newpath", "sysdig.param.linkat.newpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_next_bytes, { "next", "sysdig.param.switch.next", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_nsems_int32, { "nsems", "sysdig.param.semget.nsems", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_nsops_uint32, { "nsops", "sysdig.param.semop.nsops", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_nstype_bytes, { "nstype", "sysdig.param.setns.nstype", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_offset_uint64, { "offset", "sysdig.param.sendfile.offset", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldcur_int64, { "oldcur", "sysdig.param.prlimit.oldcur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_olddir_int64, { "olddir", "sysdig.param.linkat.olddir", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_olddirfd_int64, { "olddirfd", "sysdig.param.renameat.olddirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldmax_int64, { "oldmax", "sysdig.param.prlimit.oldmax", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldpath_string, { "oldpath", "sysdig.param.linkat.oldpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_op_bytes, { "op", "sysdig.param.futex.op", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_op_uint64, { "op", "sysdig.param.seccomp.op", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_operation_bytes, { "operation", "sysdig.param.flock.operation", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_optlen_uint32, { "optlen", "sysdig.param.getsockopt.optlen", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_optname_bytes, { "optname", "sysdig.param.getsockopt.optname", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_out_fd_int64, { "out_fd", "sysdig.param.sendfile.out_fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_path_string, { "path", "sysdig.param.mkdirat.path", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_peer_uint64, { "peer", "sysdig.param.socketpair.peer", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_pgft_maj_uint64, { "pgft_maj", "sysdig.param.execve.pgft_maj", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pgft_min_uint64, { "pgft_min", "sysdig.param.execve.pgft_min", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pgid_bytes, { "pgid", "sysdig.param.setpgid.pgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_pgoffset_uint64, { "pgoffset", "sysdig.param.mmap2.pgoffset", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pid_bytes, { "pid", "sysdig.param.setpgid.pid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_pos_uint64, { "pos", "sysdig.param.pwritev.pos", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_prot_bytes, { "prot", "sysdig.param.mmap2.prot", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_proto_uint32, { "proto", "sysdig.param.socketpair.proto", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_ptid_bytes, { "ptid", "sysdig.param.execve.ptid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_queuelen_uint32, { "queuelen", "sysdig.param.accept.queuelen", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_queuemax_uint32, { "queuemax", "sysdig.param.accept.queuemax", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_queuepct_uint8, { "Accept queue per connection", "sysdig.param.accept.queuepct", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_quota_fmt_bytes, { "quota_fmt", "sysdig.param.quotactl.quota_fmt", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_quota_fmt_out_bytes, { "quota_fmt_out", "sysdig.param.quotactl.quota_fmt_out", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_quotafilepath_string, { "quotafilepath", "sysdig.param.quotactl.quotafilepath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ratio_uint32, { "ratio", "sysdig.param.drop.ratio", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_request_bytes, { "request", "sysdig.param.ptrace.request", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_request_uint64, { "I/O control: request", "sysdig.param.ioctl.request", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_res_bytes, { "res", "sysdig.param.linkat.res", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_res_int64, { "res", "sysdig.param.fcntl.res", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_res_or_fd_bytes, { "res_or_fd", "sysdig.param.bpf.res_or_fd", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_res_uint64, { "res", "sysdig.param.mmap2.res", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_resource_bytes, { "resource", "sysdig.param.prlimit.resource", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_rgid_bytes, { "rgid", "sysdig.param.getresgid.rgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ruid_bytes, { "ruid", "sysdig.param.getresuid.ruid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_scope_string, { "scope", "sysdig.param.infra.scope", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_flg_0_bytes, { "sem_flg_0", "sysdig.param.semop.sem_flg_0", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_flg_1_bytes, { "sem_flg_1", "sysdig.param.semop.sem_flg_1", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_num_0_uint16, { "sem_num_0", "sysdig.param.semop.sem_num_0", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_num_1_uint16, { "sem_num_1", "sysdig.param.semop.sem_num_1", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_op_0_int16, { "sem_op_0", "sysdig.param.semop.sem_op_0", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_op_1_int16, { "sem_op_1", "sysdig.param.semop.sem_op_1", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_semflg_bytes, { "semflg", "sysdig.param.semget.semflg", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_semid_int32, { "semid", "sysdig.param.semctl.semid", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_semnum_int32, { "semnum", "sysdig.param.semctl.semnum", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sgid_bytes, { "sgid", "sysdig.param.getresgid.sgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sig_bytes, { "sig", "sysdig.param.signaldeliver.sig", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sigmask_bytes, { "sigmask", "sysdig.param.ppoll.sigmask", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_size_uint32, { "size", "sysdig.param.pwritev.size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_size_uint64, { "size", "sysdig.param.sendfile.size", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_source_string, { "source", "sysdig.param.infra.source", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_source_uint64, { "source", "sysdig.param.socketpair.source", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_special_string, { "special", "sysdig.param.quotactl.special", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_spid_bytes, { "spid", "sysdig.param.signaldeliver.spid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_status_bytes, { "status", "sysdig.param.procexit.status", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_suid_bytes, { "suid", "sysdig.param.getresuid.suid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_tags_bytes, { "tags", "sysdig.param.tracer.tags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_target_string, { "target", "sysdig.param.symlinkat.target", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_tid_bytes, { "tid", "sysdig.param.execve.tid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_timeout_bytes, { "timeout", "sysdig.param.ppoll.timeout", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_timeout_int64, { "timeout", "sysdig.param.poll.timeout", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_tty_int32, { "tty", "sysdig.param.execve.tty", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_tuple_bytes, { "tuple", "sysdig.param.accept.tuple", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_bytes, { "type", "sysdig.param.quotactl.type", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_string, { "type", "sysdig.param.mount.type", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_uint32, { "type", "sysdig.param.container.type", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_uid_bytes, { "uid", "sysdig.param.getuid.uid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_uid_uint32, { "uid", "sysdig.param.vfork.uid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_val_bytes, { "val", "sysdig.param.getsockopt.val", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_val_int32, { "val", "sysdig.param.semctl.val", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_val_uint64, { "val", "sysdig.param.futex.val", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_rss_uint32, { "vm_rss", "sysdig.param.execve.vm_rss", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_size_uint32, { "vm_size", "sysdig.param.execve.vm_size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_swap_uint32, { "vm_swap", "sysdig.param.execve.vm_swap", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vpid_bytes, { "vpid", "sysdig.param.vfork.vpid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_vtid_bytes, { "vtid", "sysdig.param.vfork.vtid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_whence_bytes, { "whence", "sysdig.param.llseek.whence", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ret_bytes, { "ret", "sysdig.param.procexit.ret", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_core_uint8, { "core", "sysdig.param.procexit.core", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
};
/* Setup protocol subtree array */
static gint *ett[] = {
&ett_sysdig_event,
&ett_sysdig_parm_lens,
&ett_sysdig_syscall
};
/* Register the protocol name and description */
proto_sysdig_event = proto_register_protocol("Sysdig System Call",
"Sysdig Event", "sysdig");
/* Required function calls to register the header fields and subtrees */
proto_register_field_array(proto_sysdig_event, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
register_dissector("sysdig", dissect_sysdig_event, proto_sysdig_event);
}
| null | null | 196,693
|
177882138436273397991326132064548612432
| 225
|
dissectors: re-generated sysdig events using provided tool.
Signed-off-by: Federico Di Pierro <[email protected]>
|
other
|
tensorflow
|
67bfd9feeecfb3c61d80f0e46d89c170fbee682b
| 1
|
void SparseFillEmptyRowsOpImpl(OpKernelContext* context,
AsyncOpKernel::DoneCallback done = nullptr) {
// Note that setting this empty lambda as the default parameter value directly
// can cause strange compiler/linker errors, so we do it like this instead.
if (!done) {
done = [] {};
}
const int kIndicesInput = 0;
const int kValuesInput = 1;
const int kDenseShapeInput = 2;
const int kDefaultValueInput = 3;
const Tensor& indices_t = context->input(kIndicesInput);
const Tensor& values_t = context->input(kValuesInput);
const Tensor& dense_shape_t = context->input(kDenseShapeInput);
const Tensor& default_value_t = context->input(kDefaultValueInput);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsVector(dense_shape_t.shape()),
errors::InvalidArgument("dense_shape must be a vector, saw: ",
dense_shape_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()),
errors::InvalidArgument("indices must be a matrix, saw: ",
indices_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()),
errors::InvalidArgument("values must be a vector, saw: ",
values_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsScalar(default_value_t.shape()),
errors::InvalidArgument("default_value must be a scalar, saw: ",
default_value_t.shape().DebugString()),
done);
// TODO(ebrevdo): add shape checks between values, indices,
// Also add check that dense rank > 0.
OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0,
errors::InvalidArgument("Dense shape cannot be empty."),
done);
using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>;
OP_REQUIRES_OK_ASYNC(context,
FunctorType()(context, default_value_t, indices_t,
values_t, dense_shape_t, done),
done);
}
| null | null | 196,698
|
110814885553575581074060288061926287313
| 48
|
Make SparseFillEmptyRows validate that the length of `values` must be equal to the number of index tuples.
PiperOrigin-RevId: 399969549
Change-Id: I3c2f2ca1c1d2cc88bb5951c6958b38c16e9436c8
|
other
|
tensorflow
|
11ced8467eccad9c7cb94867708be8fa5c66c730
| 1
|
Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values,
const Tensor *a_shape, const Tensor *b) {
if (!TensorShapeUtils::IsMatrix(a_indices->shape())) {
return errors::InvalidArgument(
"Input a_indices should be a matrix but received shape: ",
a_indices->shape().DebugString());
}
if (!TensorShapeUtils::IsVector(a_values->shape()) ||
!TensorShapeUtils::IsVector(a_shape->shape())) {
return errors::InvalidArgument(
"Inputs a_values and a_shape should be vectors "
"but received shapes: ",
a_values->shape().DebugString(), " and ",
a_shape->shape().DebugString());
}
if (a_shape->NumElements() != b->dims()) {
return errors::InvalidArgument(
"Two operands have different ranks; received: ", a_shape->NumElements(),
" and ", b->dims());
}
const auto a_shape_flat = a_shape->flat<Index>();
for (int i = 0; i < b->dims(); ++i) {
if (a_shape_flat(i) != b->dim_size(i)) {
return errors::InvalidArgument(
"Dimension ", i,
" does not equal (no broadcasting is supported): sparse side ",
a_shape_flat(i), " vs dense side ", b->dim_size(i));
}
}
return Status::OK();
}
| null | null | 196,705
|
313787551478648270695677588993470558322
| 31
|
Fix UB in SparseTensorDenseAdd
Added more input validation to avoid nullptr dereferencing and array index
out of bounds issues.
PiperOrigin-RevId: 446192704
|
other
|
mbedtls
|
e5af9fabf7d68e3807b6ea78792794b8352dbba2
| 1
|
static int ssl_check_dtls_clihlo_cookie(
mbedtls_ssl_context *ssl,
const unsigned char *cli_id, size_t cli_id_len,
const unsigned char *in, size_t in_len,
unsigned char *obuf, size_t buf_len, size_t *olen )
{
size_t sid_len, cookie_len;
unsigned char *p;
/*
* Structure of ClientHello with record and handshake headers,
* and expected values. We don't need to check a lot, more checks will be
* done when actually parsing the ClientHello - skipping those checks
* avoids code duplication and does not make cookie forging any easier.
*
* 0-0 ContentType type; copied, must be handshake
* 1-2 ProtocolVersion version; copied
* 3-4 uint16 epoch; copied, must be 0
* 5-10 uint48 sequence_number; copied
* 11-12 uint16 length; (ignored)
*
* 13-13 HandshakeType msg_type; (ignored)
* 14-16 uint24 length; (ignored)
* 17-18 uint16 message_seq; copied
* 19-21 uint24 fragment_offset; copied, must be 0
* 22-24 uint24 fragment_length; (ignored)
*
* 25-26 ProtocolVersion client_version; (ignored)
* 27-58 Random random; (ignored)
* 59-xx SessionID session_id; 1 byte len + sid_len content
* 60+ opaque cookie<0..2^8-1>; 1 byte len + content
* ...
*
* Minimum length is 61 bytes.
*/
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: in_len=%u",
(unsigned) in_len ) );
MBEDTLS_SSL_DEBUG_BUF( 4, "cli_id", cli_id, cli_id_len );
if( in_len < 61 )
{
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: record too short" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( in[0] != MBEDTLS_SSL_MSG_HANDSHAKE ||
in[3] != 0 || in[4] != 0 ||
in[19] != 0 || in[20] != 0 || in[21] != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: not a good ClientHello" ) );
MBEDTLS_SSL_DEBUG_MSG( 4, ( " type=%u epoch=%u fragment_offset=%u",
in[0],
(unsigned) in[3] << 8 | in[4],
(unsigned) in[19] << 16 | in[20] << 8 | in[21] ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
sid_len = in[59];
if( sid_len > in_len - 61 )
{
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: sid_len=%u > %u",
(unsigned) sid_len,
(unsigned) in_len - 61 ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 4, "sid received from network",
in + 60, sid_len );
cookie_len = in[60 + sid_len];
if( cookie_len > in_len - 60 ) {
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: cookie_len=%u > %u",
(unsigned) cookie_len,
(unsigned) in_len - 60 ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 4, "cookie received from network",
in + sid_len + 61, cookie_len );
if( ssl->conf->f_cookie_check( ssl->conf->p_cookie,
in + sid_len + 61, cookie_len,
cli_id, cli_id_len ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: valid" ) );
return( 0 );
}
/*
* If we get here, we've got an invalid cookie, let's prepare HVR.
*
* 0-0 ContentType type; copied
* 1-2 ProtocolVersion version; copied
* 3-4 uint16 epoch; copied
* 5-10 uint48 sequence_number; copied
* 11-12 uint16 length; olen - 13
*
* 13-13 HandshakeType msg_type; hello_verify_request
* 14-16 uint24 length; olen - 25
* 17-18 uint16 message_seq; copied
* 19-21 uint24 fragment_offset; copied
* 22-24 uint24 fragment_length; olen - 25
*
* 25-26 ProtocolVersion server_version; 0xfe 0xff
* 27-27 opaque cookie<0..2^8-1>; cookie_len = olen - 27, cookie
*
* Minimum length is 28.
*/
if( buf_len < 28 )
return( MBEDTLS_ERR_SSL_BUFFER_TOO_SMALL );
/* Copy most fields and adapt others */
memcpy( obuf, in, 25 );
obuf[13] = MBEDTLS_SSL_HS_HELLO_VERIFY_REQUEST;
obuf[25] = 0xfe;
obuf[26] = 0xff;
/* Generate and write actual cookie */
p = obuf + 28;
if( ssl->conf->f_cookie_write( ssl->conf->p_cookie,
&p, obuf + buf_len,
cli_id, cli_id_len ) != 0 )
{
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
*olen = p - obuf;
/* Go back and fill length fields */
obuf[27] = (unsigned char)( *olen - 28 );
obuf[14] = obuf[22] = MBEDTLS_BYTE_2( *olen - 25 );
obuf[15] = obuf[23] = MBEDTLS_BYTE_1( *olen - 25 );
obuf[16] = obuf[24] = MBEDTLS_BYTE_0( *olen - 25 );
MBEDTLS_PUT_UINT16_BE( *olen - 13, obuf, 11 );
return( MBEDTLS_ERR_SSL_HELLO_VERIFY_REQUIRED );
}
| null | null | 196,706
|
96472964360820481519603345267108102743
| 135
|
Add missing sid_len in calculations of cookie sizes
This could lead to a potential buffer overread with small
MBEDTLS_SSL_IN_CONTENT_LEN.
Change the bound calculations so that it is apparent
what lengths and sizes are used.
Signed-off-by: Andrzej Kurek <[email protected]>
|
other
|
njs
|
8b39afdad9a0761e0a5d4af1a762bd9a6daef572
| 1
|
njs_array_prototype_sort(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
int64_t i, und, len, nlen, length;
njs_int_t ret, fast_path;
njs_array_t *array;
njs_value_t *this, *comparefn, *start, *strings;
njs_array_sort_ctx_t ctx;
njs_array_sort_slot_t *p, *end, *slots, *nslots;
comparefn = njs_arg(args, nargs, 1);
if (njs_is_defined(comparefn)) {
if (njs_slow_path(!njs_is_function(comparefn))) {
njs_type_error(vm, "comparefn must be callable or undefined");
return NJS_ERROR;
}
ctx.function = njs_function(comparefn);
} else {
ctx.function = NULL;
}
this = njs_argument(args, 0);
ret = njs_value_to_object(vm, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_value_length(vm, this, &length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_slow_path(length < 2)) {
vm->retval = *this;
return NJS_OK;
}
slots = NULL;
ctx.vm = vm;
ctx.strings.separate = 0;
ctx.strings.pointer = 0;
ctx.exception = 0;
fast_path = njs_is_fast_array(this);
if (njs_fast_path(fast_path)) {
array = njs_array(this);
start = array->start;
slots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * length);
if (njs_slow_path(slots == NULL)) {
return NJS_ERROR;
}
und = 0;
p = slots;
for (i = 0; i < length; i++) {
if (njs_slow_path(!njs_is_valid(&start[i]))) {
fast_path = 0;
njs_mp_free(vm->mem_pool, slots);
slots = NULL;
goto slow_path;
}
if (njs_slow_path(njs_is_undefined(&start[i]))) {
und++;
continue;
}
p->value = start[i];
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
} else {
slow_path:
und = 0;
p = NULL;
end = NULL;
for (i = 0; i < length; i++) {
if (p >= end) {
nlen = njs_min(njs_max((p - slots) * 2, 8), length);
nslots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * nlen);
if (njs_slow_path(nslots == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
if (slots != NULL) {
p = (void *) njs_cpymem(nslots, slots,
sizeof(njs_array_sort_slot_t) * (p - slots));
njs_mp_free(vm->mem_pool, slots);
} else {
p = nslots;
}
slots = nslots;
end = slots + nlen;
}
ret = njs_value_property_i64(vm, this, i, &p->value);
if (njs_slow_path(ret == NJS_ERROR)) {
ret = NJS_ERROR;
goto exception;
}
if (ret == NJS_DECLINED) {
continue;
}
if (njs_is_undefined(&p->value)) {
und++;
continue;
}
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
}
strings = njs_arr_init(vm->mem_pool, &ctx.strings, NULL, len + 1,
sizeof(njs_value_t));
if (njs_slow_path(strings == NULL)) {
ret = NJS_ERROR;
goto exception;
}
njs_qsort(slots, len, sizeof(njs_array_sort_slot_t), njs_array_compare,
&ctx);
if (ctx.exception) {
ret = NJS_ERROR;
goto exception;
}
if (njs_fast_path(fast_path)) {
array = njs_array(this);
start = array->start;
for (i = 0; i < len; i++) {
start[i] = slots[i].value;
}
for (i = len; und-- > 0; i++) {
start[i] = njs_value_undefined;
}
} else {
for (i = 0; i < len; i++) {
if (slots[i].pos != i) {
ret = njs_value_property_i64_set(vm, this, i, &slots[i].value);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
for (i = len; und-- > 0; i++) {
ret = njs_value_property_i64_set(vm, this, i,
njs_value_arg(&njs_value_undefined));
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
for (; i < length; i++) {
ret = njs_value_property_i64_delete(vm, this, i, NULL);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
vm->retval = *this;
ret = NJS_OK;
exception:
if (slots != NULL) {
njs_mp_free(vm->mem_pool, slots);
}
njs_arr_destroy(&ctx.strings);
return ret;
}
| null | null | 196,726
|
13165140673111245230691626642940550743
| 204
|
Fixed Array.prototype.sort() when arr size is changed in a comparator.
This fixed #468 issue on Github.
|
other
|
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
| 1
|
GF_Err xtra_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_XtraBox *ptr = (GF_XtraBox *)s;
while (ptr->size) {
GF_XtraTag *tag;
u32 prop_type = 0;
char *data=NULL, *data2=NULL;
ISOM_DECREASE_SIZE_NO_ERR(ptr, 8)
s32 tag_size = gf_bs_read_u32(bs);
u32 name_size = gf_bs_read_u32(bs);
if (tag_size < 8) return GF_ISOM_INVALID_FILE;
tag_size -= 8;
if ((tag_size>ptr->size) || (name_size>ptr->size)) {
return GF_ISOM_INVALID_FILE;
}
ISOM_DECREASE_SIZE_NO_ERR(ptr, 10)
ISOM_DECREASE_SIZE_NO_ERR(ptr, name_size)
data = gf_malloc(sizeof(char) * (name_size+1));
gf_bs_read_data(bs, data, name_size);
data[name_size] = 0;
tag_size-=name_size;
u32 flags = gf_bs_read_u32(bs);
u32 prop_size = gf_bs_read_u32(bs);
tag_size-=8;
if (prop_size>4) {
tag_size-=2;
prop_type = gf_bs_read_u16(bs);
prop_size -= 6;
ISOM_DECREASE_SIZE_NO_ERR(ptr, prop_size)
//add 2 extra bytes for UTF16 case string dump
data2 = gf_malloc(sizeof(char) * (prop_size+2));
gf_bs_read_data(bs, data2, prop_size);
data2[prop_size] = 0;
data2[prop_size+1] = 0;
tag_size-=prop_size;
} else {
prop_size = 0;
}
GF_SAFEALLOC(tag, GF_XtraTag)
tag->flags = flags;
tag->name = data;
tag->prop_size = prop_size;
tag->prop_value = data2;
tag->prop_type = prop_type;
gf_list_add(ptr->tags, tag);
if (tag_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isom] invalid tag size in Xtra !\n"));
}
}
return GF_OK;
}
| null | null | 196,764
|
58735372241291791237218686629226360010
| 57
|
fixed #2255
|
other
|
tensorflow
|
a4e138660270e7599793fa438cd7b2fc2ce215a6
| 1
|
Status Examples::Initialize(OpKernelContext* const context,
const ModelWeights& weights,
const int num_sparse_features,
const int num_sparse_features_with_values,
const int num_dense_features) {
num_features_ = num_sparse_features + num_dense_features;
OpInputList sparse_example_indices_inputs;
TF_RETURN_IF_ERROR(context->input_list("sparse_example_indices",
&sparse_example_indices_inputs));
if (sparse_example_indices_inputs.size() != num_sparse_features)
return errors::InvalidArgument(
"Expected ", num_sparse_features,
" tensors in sparse_example_indices but got ",
sparse_example_indices_inputs.size());
OpInputList sparse_feature_indices_inputs;
TF_RETURN_IF_ERROR(context->input_list("sparse_feature_indices",
&sparse_feature_indices_inputs));
if (sparse_feature_indices_inputs.size() != num_sparse_features)
return errors::InvalidArgument(
"Expected ", num_sparse_features,
" tensors in sparse_feature_indices but got ",
sparse_feature_indices_inputs.size());
OpInputList sparse_feature_values_inputs;
if (num_sparse_features_with_values > 0) {
TF_RETURN_IF_ERROR(context->input_list("sparse_feature_values",
&sparse_feature_values_inputs));
if (sparse_feature_values_inputs.size() != num_sparse_features_with_values)
return errors::InvalidArgument(
"Expected ", num_sparse_features_with_values,
" tensors in sparse_feature_values but got ",
sparse_feature_values_inputs.size());
}
const Tensor* example_weights_t;
TF_RETURN_IF_ERROR(context->input("example_weights", &example_weights_t));
auto example_weights = example_weights_t->flat<float>();
if (example_weights.size() >= std::numeric_limits<int>::max()) {
return errors::InvalidArgument(strings::Printf(
"Too many examples in a mini-batch: %zu > %d", example_weights.size(),
std::numeric_limits<int>::max()));
}
// The static_cast here is safe since num_examples can be at max an int.
const int num_examples = static_cast<int>(example_weights.size());
const Tensor* example_labels_t;
TF_RETURN_IF_ERROR(context->input("example_labels", &example_labels_t));
auto example_labels = example_labels_t->flat<float>();
OpInputList dense_features_inputs;
TF_RETURN_IF_ERROR(
context->input_list("dense_features", &dense_features_inputs));
examples_.clear();
examples_.resize(num_examples);
probabilities_.resize(num_examples);
sampled_index_.resize(num_examples);
sampled_count_.resize(num_examples);
for (int example_id = 0; example_id < num_examples; ++example_id) {
Example* const example = &examples_[example_id];
example->sparse_features_.resize(num_sparse_features);
example->dense_vectors_.resize(num_dense_features);
example->example_weight_ = example_weights(example_id);
example->example_label_ = example_labels(example_id);
}
const DeviceBase::CpuWorkerThreads& worker_threads =
*context->device()->tensorflow_cpu_worker_threads();
TF_RETURN_IF_ERROR(CreateSparseFeatureRepresentation(
worker_threads, num_examples, num_sparse_features, weights,
sparse_example_indices_inputs, sparse_feature_indices_inputs,
sparse_feature_values_inputs, &examples_));
TF_RETURN_IF_ERROR(CreateDenseFeatureRepresentation(
worker_threads, num_examples, num_dense_features, weights,
dense_features_inputs, &examples_));
TF_RETURN_IF_ERROR(ComputeSquaredNormPerExample(
worker_threads, num_examples, num_sparse_features, num_dense_features,
&examples_));
return Status::OK();
}
| null | null | 196,790
|
310715085860763790063727145741184772931
| 80
|
Add remaining validation to `sdca_internal.cc`
PiperOrigin-RevId: 387738010
Change-Id: I28eedcfd87a53aaf34deb075acea1f8c95470808
|
other
|
percona-xtrabackup
|
7742f875bb289a874246fb4653b7cd9f14b588fe
| 1
|
char *make_argv(char *buf, size_t len, int argc, char **argv)
{
size_t left= len;
const char *arg;
buf[0]= 0;
++argv; --argc;
while (argc > 0 && left > 0)
{
arg = *argv;
if (strncmp(*argv, "--password", strlen("--password")) == 0) {
arg = "--password=...";
}
if (strncmp(*argv, "--encrypt-key",
strlen("--encrypt-key")) == 0) {
arg = "--encrypt-key=...";
}
if (strncmp(*argv, "--encrypt_key",
strlen("--encrypt_key")) == 0) {
arg = "--encrypt_key=...";
}
if (strncmp(*argv, "--transition-key",
strlen("--transition-key")) == 0) {
arg = "--transition-key=...";
}
if (strncmp(*argv, "--transition_key",
strlen("--transition_key")) == 0) {
arg = "--transition_key=...";
}
left-= ut_snprintf(buf + len - left, left,
"%s%c", arg, argc > 1 ? ' ' : 0);
++argv; --argc;
}
return buf;
}
| null | null | 196,792
|
132710794129423427640045943674520634467
| 36
|
PXB-2722 password is written into xtrabackup_info
https://jira.percona.com/browse/PXB-2722
Analysis:
password passed with -p option is written into backup tool_command in xtrabackup_info
Fix:
mask password before writting into xtrabackup_info
|
other
|
gpac
|
f5a038e6893019ee471b6a57490cf7a495673816
| 1
|
GF_Err gf_hinter_finalize(GF_ISOFile *file, GF_SDP_IODProfile IOD_Profile, u32 bandwidth)
{
u32 i, sceneT, odT, descIndex, size, size64;
GF_InitialObjectDescriptor *iod;
GF_SLConfig slc;
GF_ISOSample *samp;
Bool remove_ocr;
u8 *buffer;
char buf64[5000], sdpLine[5100];
gf_isom_sdp_clean(file);
if (bandwidth) {
sprintf(buf64, "b=AS:%d", bandwidth);
gf_isom_sdp_add_line(file, buf64);
}
//xtended attribute for copyright
if (gf_sys_is_test_mode()) {
sprintf(buf64, "a=x-copyright: %s", "MP4/3GP File hinted with GPAC - (c) Telecom ParisTech (http://gpac.io)");
} else {
sprintf(buf64, "a=x-copyright: MP4/3GP File hinted with GPAC %s - %s", gf_gpac_version(), gf_gpac_copyright() );
}
gf_isom_sdp_add_line(file, buf64);
if (IOD_Profile == GF_SDP_IOD_NONE) return GF_OK;
odT = sceneT = 0;
for (i=0; i<gf_isom_get_track_count(file); i++) {
if (!gf_isom_is_track_in_root_od(file, i+1)) continue;
switch (gf_isom_get_media_type(file,i+1)) {
case GF_ISOM_MEDIA_OD:
odT = i+1;
break;
case GF_ISOM_MEDIA_SCENE:
sceneT = i+1;
break;
}
}
remove_ocr = 0;
if (IOD_Profile == GF_SDP_IOD_ISMA_STRICT) {
IOD_Profile = GF_SDP_IOD_ISMA;
remove_ocr = 1;
}
/*if we want ISMA like iods, we need at least BIFS */
if ( (IOD_Profile == GF_SDP_IOD_ISMA) && !sceneT ) return GF_BAD_PARAM;
/*do NOT change PLs, we assume they are correct*/
iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file);
if (!iod) return GF_NOT_SUPPORTED;
/*rewrite an IOD with good SL config - embbed data if possible*/
if (IOD_Profile == GF_SDP_IOD_ISMA) {
GF_ESD *esd;
Bool is_ok = 1;
while (gf_list_count(iod->ESDescriptors)) {
esd = (GF_ESD*)gf_list_get(iod->ESDescriptors, 0);
gf_odf_desc_del((GF_Descriptor *) esd);
gf_list_rem(iod->ESDescriptors, 0);
}
/*get OD esd, and embbed stream data if possible*/
if (odT) {
esd = gf_isom_get_esd(file, odT, 1);
if (gf_isom_get_sample_count(file, odT)==1) {
samp = gf_isom_get_sample(file, odT, 1, &descIndex);
if (samp && gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_OD)) {
InitSL_NULL(&slc);
slc.predefined = 0;
slc.hasRandomAccessUnitsOnlyFlag = 1;
slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, odT);
slc.OCRResolution = 1000;
slc.startCTS = samp->DTS+samp->CTS_Offset;
slc.startDTS = samp->DTS;
//set the SL for future extraction
gf_isom_set_extraction_slc(file, odT, 1, &slc);
size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
buf64[size64] = 0;
sprintf(sdpLine, "data:application/mpeg4-od-au;base64,%s", buf64);
if (esd->decoderConfig) {
esd->decoderConfig->avgBitrate = 0;
esd->decoderConfig->bufferSizeDB = samp->dataLength;
esd->decoderConfig->maxBitrate = 0;
}
size64 = (u32) strlen(sdpLine)+1;
esd->URLString = (char*)gf_malloc(sizeof(char) * size64);
strcpy(esd->URLString, sdpLine);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] OD sample too large to be embedded in IOD - ISMA disabled\n"));
is_ok = 0;
}
gf_isom_sample_del(&samp);
}
if (remove_ocr) esd->OCRESID = 0;
else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;
//OK, add this to our IOD
gf_list_add(iod->ESDescriptors, esd);
}
esd = gf_isom_get_esd(file, sceneT, 1);
if (gf_isom_get_sample_count(file, sceneT)==1) {
samp = gf_isom_get_sample(file, sceneT, 1, &descIndex);
if (samp && gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_SCENE)) {
slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, sceneT);
slc.OCRResolution = 1000;
slc.startCTS = samp->DTS+samp->CTS_Offset;
slc.startDTS = samp->DTS;
//set the SL for future extraction
gf_isom_set_extraction_slc(file, sceneT, 1, &slc);
//encode in Base64 the sample
size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
buf64[size64] = 0;
sprintf(sdpLine, "data:application/mpeg4-bifs-au;base64,%s", buf64);
if (esd->decoderConfig) {
esd->decoderConfig->avgBitrate = 0;
esd->decoderConfig->bufferSizeDB = samp->dataLength;
esd->decoderConfig->maxBitrate = 0;
}
esd->URLString = (char*)gf_malloc(sizeof(char) * (strlen(sdpLine)+1));
strcpy(esd->URLString, sdpLine);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Scene description sample too large to be embedded in IOD - ISMA disabled\n"));
is_ok = 0;
}
gf_isom_sample_del(&samp);
}
if (remove_ocr) esd->OCRESID = 0;
else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;
gf_list_add(iod->ESDescriptors, esd);
if (is_ok) {
u32 has_a, has_v, has_i_a, has_i_v;
has_a = has_v = has_i_a = has_i_v = 0;
for (i=0; i<gf_isom_get_track_count(file); i++) {
esd = gf_isom_get_esd(file, i+1, 1);
if (!esd) continue;
if (esd->decoderConfig) {
if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) {
if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) has_i_v ++;
else has_v++;
} else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) {
if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AAC_MPEG4) has_i_a ++;
else has_a++;
}
}
gf_odf_desc_del((GF_Descriptor *)esd);
}
/*only 1 MPEG-4 visual max and 1 MPEG-4 audio max for ISMA compliancy*/
if (!has_v && !has_a && (has_i_v<=1) && (has_i_a<=1)) {
sprintf(sdpLine, "a=isma-compliance:1,1.0,1");
gf_isom_sdp_add_line(file, sdpLine);
}
}
}
//encode the IOD
buffer = NULL;
size = 0;
gf_odf_desc_write((GF_Descriptor *) iod, &buffer, &size);
gf_odf_desc_del((GF_Descriptor *)iod);
//encode in Base64 the iod
size64 = gf_base64_encode(buffer, size, buf64, 2000);
buf64[size64] = 0;
gf_free(buffer);
sprintf(sdpLine, "a=mpeg4-iod:\"data:application/mpeg4-iod;base64,%s\"", buf64);
gf_isom_sdp_add_line(file, sdpLine);
return GF_OK;
}
| null | null | 196,801
|
87434855642795708862552350553523342121
| 179
|
fixed #1885
|
other
|
mruby
|
aaa28a508903041dd7399d4159a8ace9766b022f
| 1
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
goto L_INT_OVERFLOW;
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
regs[a] = mrb_hash_get(mrb, va, vb);
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
regs[a] = mrb_str_aref(mrb, va, vb, mrb_undef_value());
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
regs[a] = mrb_vm_const_get(mrb, syms[b]);
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
regs[a] = mrb_const_get(mrb, regs[a], syms[b]);
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
regs[a] = mrb_hash_get(mrb, kdict, k);
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#define OP_MATH_OVERFLOW_INT() goto L_INT_OVERFLOW
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
regs[a] = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
regs[a] = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| null | null | 196,805
|
44647007425815231324711190586303250954
| 1,821
|
vm.c: stack may be reallocated in functions calls.
Probably due to recursive VM calls via `mrb_funcall()`.
|
other
|
njs
|
81af26364c21c196dd21fb5e14c7fa9ce7debd17
| 1
|
njs_array_convert_to_slow_array(njs_vm_t *vm, njs_array_t *array)
{
uint32_t i, length;
njs_value_t index, value;
njs_object_prop_t *prop;
njs_set_array(&value, array);
array->object.fast_array = 0;
length = array->length;
for (i = 0; i < length; i++) {
if (njs_is_valid(&array->start[i])) {
njs_uint32_to_string(&index, i);
prop = njs_object_property_add(vm, &value, &index, 0);
if (njs_slow_path(prop == NULL)) {
return NJS_ERROR;
}
prop->value = array->start[i];
}
}
/* GC: release value. */
njs_mp_free(vm->mem_pool, array->start);
array->start = NULL;
return NJS_OK;
}
| null | null | 196,817
|
15778839275107817850632511934182345441
| 30
|
Fixed Object.defineProperty() when a recursive descriptor is provided.
This closes #481 issue on Github.
|
other
|
tinyproxy
|
3764b8551463b900b5b4e3ec0cd9bb9182191cb7
| 1
|
static struct request_s *process_request (struct conn_s *connptr,
orderedmap hashofheaders)
{
char *url;
struct request_s *request;
int ret, skip_trans;
size_t request_len;
skip_trans = 0;
/* NULL out all the fields so frees don't cause segfaults. */
request =
(struct request_s *) safecalloc (1, sizeof (struct request_s));
if (!request)
return NULL;
request_len = strlen (connptr->request_line) + 1;
request->method = (char *) safemalloc (request_len);
url = (char *) safemalloc (request_len);
request->protocol = (char *) safemalloc (request_len);
if (!request->method || !url || !request->protocol) {
goto fail;
}
ret = sscanf (connptr->request_line, "%[^ ] %[^ ] %[^ ]",
request->method, url, request->protocol);
if (ret == 2 && !strcasecmp (request->method, "GET")) {
request->protocol[0] = 0;
/* Indicate that this is a HTTP/0.9 GET request */
connptr->protocol.major = 0;
connptr->protocol.minor = 9;
} else if (ret == 3 && !strncasecmp (request->protocol, "HTTP/", 5)) {
/*
* Break apart the protocol and update the connection
* structure.
*/
ret = sscanf (request->protocol + 5, "%u.%u",
&connptr->protocol.major,
&connptr->protocol.minor);
/*
* If the conversion doesn't succeed, drop down below and
* send the error to the user.
*/
if (ret != 2)
goto BAD_REQUEST_ERROR;
} else {
BAD_REQUEST_ERROR:
log_message (LOG_ERR,
"process_request: Bad Request on file descriptor %d",
connptr->client_fd);
indicate_http_error (connptr, 400, "Bad Request",
"detail", "Request has an invalid format",
"url", url, NULL);
goto fail;
}
#ifdef REVERSE_SUPPORT
if (config->reversepath_list != NULL) {
/*
* Rewrite the URL based on the reverse path. After calling
* reverse_rewrite_url "url" can be freed since we either
* have the newly rewritten URL, or something failed and
* we'll be closing anyway.
*/
char *reverse_url;
int reverse_status;
reverse_url = reverse_rewrite_url (connptr, hashofheaders, url, &reverse_status);
if (reverse_url != NULL) {
if (reverse_status == 301) {
char buf[PATH_MAX];
snprintf (buf, sizeof buf, "Location: %s\r\n", reverse_url);
send_http_headers (connptr, 301, "Moved Permanently", buf);
goto fail;
}
safefree (url);
url = reverse_url;
skip_trans = 1;
} else if (config->reverseonly) {
log_message (LOG_ERR,
"Bad request, no mapping for '%s' found",
url);
indicate_http_error (connptr, 400, "Bad Request",
"detail", "No mapping found for "
"requested url", "url", url, NULL);
goto fail;
}
}
#endif
if (strncasecmp (url, "http://", 7) == 0
|| (UPSTREAM_CONFIGURED () && strncasecmp (url, "ftp://", 6) == 0))
{
char *skipped_type = strstr (url, "//") + 2;
if (extract_url (skipped_type, HTTP_PORT, request) < 0) {
indicate_http_error (connptr, 400, "Bad Request",
"detail", "Could not parse URL",
"url", url, NULL);
goto fail;
}
} else if (strcmp (request->method, "CONNECT") == 0) {
if (extract_url (url, HTTP_PORT_SSL, request) < 0) {
indicate_http_error (connptr, 400, "Bad Request",
"detail", "Could not parse URL",
"url", url, NULL);
goto fail;
}
/* Verify that the port in the CONNECT method is allowed */
if (!check_allowed_connect_ports (request->port,
config->connect_ports))
{
indicate_http_error (connptr, 403, "Access violation",
"detail",
"The CONNECT method not allowed "
"with the port you tried to use.",
"url", url, NULL);
log_message (LOG_INFO,
"Refused CONNECT method on port %d",
request->port);
goto fail;
}
connptr->connect_method = TRUE;
} else {
#ifdef TRANSPARENT_PROXY
if (!skip_trans) {
if (!do_transparent_proxy
(connptr, hashofheaders, request, config, &url))
goto fail;
} else
#endif
{
indicate_http_error (connptr, 501, "Not Implemented",
"detail",
"Unknown method or unsupported protocol.",
"url", url, NULL);
log_message (LOG_INFO, "Unknown method (%s) or protocol (%s)",
request->method, url);
goto fail;
}
}
#ifdef FILTER_ENABLE
/*
* Filter restricted domains/urls
*/
if (config->filter) {
int fu = config->filter_opts & FILTER_OPT_URL;
ret = filter_run (fu ? url : request->host);
if (ret) {
update_stats (STAT_DENIED);
log_message (LOG_NOTICE,
"Proxying refused on filtered %s \"%s\"",
fu ? "url" : "domain",
fu ? url : request->host);
indicate_http_error (connptr, 403, "Filtered",
"detail",
"The request you made has been filtered",
"url", url, NULL);
goto fail;
}
}
#endif
/*
* Check to see if they're requesting the stat host
*/
if (config->stathost && strcmp (config->stathost, request->host) == 0) {
log_message (LOG_NOTICE, "Request for the stathost.");
connptr->show_stats = TRUE;
goto fail;
}
safefree (url);
return request;
fail:
safefree (url);
free_request_struct (request);
return NULL;
}
| null | null | 196,819
|
125292646856234711705287388719971040122
| 193
|
prevent junk from showing up in error page in invalid requests
fixes #457
|
other
|
tensorflow
|
9a133d73ae4b4664d22bd1aa6d654fec13c52ee1
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& val = ctx->input(0);
int64 id = ctx->session_state()->GetNewId();
TensorStore::TensorAndKey tk{val, id, requested_device()};
OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk));
Tensor* handle = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle));
if (ctx->expected_output_dtype(0) == DT_RESOURCE) {
ResourceHandle resource_handle = MakeResourceHandle<Tensor>(
ctx, SessionState::kTensorHandleResourceTypeName,
tk.GetHandle(name()));
resource_handle.set_maybe_type_name(
SessionState::kTensorHandleResourceTypeName);
handle->scalar<ResourceHandle>()() = resource_handle;
} else {
// Legacy behavior in V1.
handle->flat<tstring>().setConstant(tk.GetHandle(name()));
}
}
| null | null | 196,829
|
253301187505940915484989198031334916980
| 20
|
Prevent segfault in `GetSessionHandle{,V2}`.
In eager mode, session state is null.
PiperOrigin-RevId: 332548597
Change-Id: If094812c2e094044220b9ba28f7d7601be042f38
|
other
|
tensorflow
|
701cfaca222a82afbeeb17496bd718baa65a67d2
| 1
|
Status SparseCountSparseOutputShapeFn(InferenceContext *c) {
auto rank = c->Dim(c->input(0), 1);
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank)); // out.indices
c->set_output(1, c->Vector(nvals)); // out.values
c->set_output(2, c->Vector(rank)); // out.dense_shape
return Status::OK();
}
| null | null | 196,834
|
157165965848088310301354415155097213287
| 8
|
Fix heap out of bounds error in tf.raw_ops.SparseCountSparseOutput shape inference when it is called with invalid inputs, and add a test for it.
PiperOrigin-RevId: 405766415
Change-Id: I77d244ef35f351ef7b6f821efd959cac2c66db24
|
other
|
furnace
|
0eb02422d5161767e9983bdaa5c429762d3477ce
| 1
|
inline void FurnaceGUI::patternRow(int i, bool isPlaying, float lineHeight, int chans, int ord, const DivPattern** patCache) {
static char id[32];
bool selectedRow=(i>=sel1.y && i<=sel2.y);
ImGui::TableNextRow(0,lineHeight);
ImGui::TableNextColumn();
float cursorPosY=ImGui::GetCursorPos().y-ImGui::GetScrollY();
// check if the row is visible
if (cursorPosY<-lineHeight || cursorPosY>ImGui::GetWindowSize().y) {
return;
}
// check if we are in range
if (ord<0 || ord>=e->song.ordersLen) {
return;
}
if (i<0 || i>=e->song.patLen) {
return;
}
bool isPushing=false;
ImVec4 activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE];
ImVec4 inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE];
ImVec4 rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX];
if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI2];
inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI2];
rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI2];
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI1];
inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI1];
rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI1];
}
// check overflow highlight
if (settings.overflowHighlight) {
if (edit && cursor.y==i) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING]));
} else if (isPlaying && oldRow==i) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD]));
} else if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2]));
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1]));
}
} else {
isPushing=true;
if (edit && cursor.y==i) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING]));
} else if (isPlaying && oldRow==i) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD]));
} else if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2]));
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1]));
} else {
isPushing=false;
}
}
// row number
if (settings.patRowsBase==1) {
ImGui::TextColored(rowIndexColor," %.2X ",i);
} else {
ImGui::TextColored(rowIndexColor,"%3d ",i);
}
// for each column
for (int j=0; j<chans; j++) {
// check if channel is not hidden
if (!e->song.chanShow[j]) {
patChanX[j]=ImGui::GetCursorPosX();
continue;
}
int chanVolMax=e->getMaxVolumeChan(j);
if (chanVolMax<1) chanVolMax=1;
const DivPattern* pat=patCache[j];
ImGui::TableNextColumn();
patChanX[j]=ImGui::GetCursorPosX();
// selection highlight flags
int sel1XSum=sel1.xCoarse*32+sel1.xFine;
int sel2XSum=sel2.xCoarse*32+sel2.xFine;
int j32=j*32;
bool selectedNote=selectedRow && (j32>=sel1XSum && j32<=sel2XSum);
bool selectedIns=selectedRow && (j32+1>=sel1XSum && j32+1<=sel2XSum);
bool selectedVol=selectedRow && (j32+2>=sel1XSum && j32+2<=sel2XSum);
bool cursorNote=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==0);
bool cursorIns=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==1);
bool cursorVol=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==2);
// note
sprintf(id,"%s##PN_%d_%d",noteName(pat->data[i][0],pat->data[i][1]),i,j);
if (pat->data[i][0]==0 && pat->data[i][1]==0) {
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,activeColor);
}
if (cursorNote) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedNote) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedNote,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars);
if (selectedNote) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,0,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,0,i);
}
ImGui::PopStyleColor();
// the following is only visible when the channel is not collapsed
if (!e->song.chanCollapse[j]) {
// instrument
if (pat->data[i][2]==-1) {
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
sprintf(id,"..##PI_%d_%d",i,j);
} else {
if (pat->data[i][2]<0 || pat->data[i][2]>=e->song.insLen) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_ERROR]);
} else {
DivInstrumentType t=e->song.ins[pat->data[i][2]]->type;
if (t!=DIV_INS_AMIGA && t!=e->getPreferInsType(j)) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_WARN]);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS]);
}
}
sprintf(id,"%.2X##PI_%d_%d",pat->data[i][2],i,j);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorIns) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedIns) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedIns,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedIns) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,1,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,1,i);
}
ImGui::PopStyleColor();
// volume
if (pat->data[i][3]==-1) {
sprintf(id,"..##PV_%d_%d",i,j);
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
int volColor=(pat->data[i][3]*127)/chanVolMax;
if (volColor>127) volColor=127;
if (volColor<0) volColor=0;
sprintf(id,"%.2X##PV_%d_%d",pat->data[i][3],i,j);
ImGui::PushStyleColor(ImGuiCol_Text,volColors[volColor]);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorVol) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedVol) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedVol,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedVol) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,2,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,2,i);
}
ImGui::PopStyleColor();
// effects
for (int k=0; k<e->song.pat[j].effectRows; k++) {
int index=4+(k<<1);
bool selectedEffect=selectedRow && (j32+index-1>=sel1XSum && j32+index-1<=sel2XSum);
bool selectedEffectVal=selectedRow && (j32+index>=sel1XSum && j32+index<=sel2XSum);
bool cursorEffect=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index-1);
bool cursorEffectVal=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index);
// effect
if (pat->data[i][index]==-1) {
sprintf(id,"..##PE%d_%d_%d",k,i,j);
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
sprintf(id,"%.2X##PE%d_%d_%d",pat->data[i][index],k,i,j);
if (pat->data[i][index]<0x10) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[fxColors[pat->data[i][index]]]);
} else if (pat->data[i][index]<0x20) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]);
} else if (pat->data[i][index]<0x30) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_SECONDARY]);
} else if (pat->data[i][index]<0x48) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]);
} else if (pat->data[i][index]<0x90) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else if (pat->data[i][index]<0xa0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_MISC]);
} else if (pat->data[i][index]<0xc0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else if (pat->data[i][index]<0xd0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SPEED]);
} else if (pat->data[i][index]<0xe0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[extFxColors[pat->data[i][index]-0xe0]]);
}
}
ImGui::SameLine(0.0f,0.0f);
if (cursorEffect) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedEffect) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedEffect,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedEffect) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,index-1,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,index-1,i);
}
// effect value
if (pat->data[i][index+1]==-1) {
sprintf(id,"..##PF%d_%d_%d",k,i,j);
} else {
sprintf(id,"%.2X##PF%d_%d_%d",pat->data[i][index+1],k,i,j);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorEffectVal) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedEffectVal) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedEffectVal,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedEffectVal) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,index,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,index,i);
}
ImGui::PopStyleColor();
}
}
}
if (isPushing) {
ImGui::PopStyleColor();
}
ImGui::TableNextColumn();
patChanX[chans]=ImGui::GetCursorPosX();
}
| null | null | 196,841
|
136686899264564982027774933242322169613
| 275
|
fix possible pattern crash
issue #325
|
other
|
radare2
|
b4ca66f5d4363d68a6379e5706353b3bde5104a4
| 1
|
static bool consume_encoded_name_new(RBuffer *b, ut64 bound, ut32 *len_out, char **str_out) {
ut32 len;
char *orig = NULL;
if (!consume_str_new (b, bound, &len, &orig)) {
return false;
}
// room for even every character getting encoded
size_t maxsize = (len * 4) + 2;
char *sout = malloc (maxsize);
if (!sout) {
free (orig);
return false;
}
size_t i, oi = 0;
for (i = 0; i < len && oi + 4 < maxsize; i++) {
if (WASM_IS_OK (orig, i, len)) {
sout[oi++] = orig[i];
} else {
oi += snprintf (sout + oi, maxsize - oi, "_%02x_", orig[i]);
}
}
sout[oi++] = '\0';
free (orig);
char *tmp = realloc (sout, oi);
if (!tmp) {
free (sout);
free (tmp);
return false;
}
*str_out = tmp;
if (len_out) {
*len_out = len;
}
return true;
}
| null | null | 196,844
|
294432143564110724105418779321318827106
| 38
|
Fix #20336 - wasm bin parser ##crash
|
other
|
tensorflow
|
1e206baedf8bef0334cca3eb92bab134ef525a28
| 1
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(
context,
"Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
| null | null | 196,846
|
29201391895766660247072243701398557871
| 30
|
Prevent a division by 0 in division ops.
PiperOrigin-RevId: 385223169
Change-Id: Ia4228960b5d2aa44480385f74bdd70d21a3613c3
|
other
|
gpac
|
b43f9d1a4b4e33d08edaef6d313e6ce4bdf554d3
| 1
|
static void naludmx_set_dolby_vision(GF_NALUDmxCtx *ctx)
{
u8 dv_cfg[24];
switch (ctx->dv_mode) {
case DVMODE_NONE:
case DVMODE_CLEAN:
return;
//auto mode, wait until we have RPU or EL to signal profile
case DVMODE_AUTO:
if (!ctx->nb_dv_rpu && !ctx->nb_dv_el) return;
break;
}
u32 dv_level = gf_dolby_vision_level(ctx->width, ctx->height, ctx->cur_fps.num, ctx->cur_fps.den, ctx->codecid);
if (ctx->dv_profile==8) {
if (!ctx->dv_compatid) {
GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[%s] DV profile 8 used but dv_compatid not set, defaulting to bt709 (=2)\n", ctx->log_name));
ctx->dv_compatid = 2;
}
}
memset(dv_cfg, 0, sizeof(u8)*24);
GF_BitStream *bs = gf_bs_new(dv_cfg, 24, GF_BITSTREAM_WRITE);
gf_bs_write_u8(bs, 1); //version major
gf_bs_write_u8(bs, 0); //version minor
gf_bs_write_int(bs, ctx->dv_profile, 7);
gf_bs_write_int(bs, dv_level, 6);
gf_bs_write_int(bs, ctx->nb_dv_rpu ? 1 : 0, 1); //rpu present
gf_bs_write_int(bs, ctx->nb_dv_el ? 1 : 0, 1); //el present
gf_bs_write_int(bs, 1, 1); //bl_present_flag always true, we don't split streams
gf_bs_write_int(bs, ctx->dv_compatid, 4);
//the rest is zero-reserved
gf_bs_write_int(bs, 0, 28);
gf_bs_write_u32(bs, 0);
gf_bs_write_u32(bs, 0);
gf_bs_write_u32(bs, 0);
gf_bs_write_u32(bs, 0);
gf_bs_del(bs);
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DOLBY_VISION, &PROP_DATA(dv_cfg, 24));
}
| null | null | 196,859
|
254137378903265944648888015086766583040
| 42
|
fixed #2223
|
other
|
gpac
|
a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
| 1
|
GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs)
{
unsigned int i;
GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s;
ISOM_DECREASE_SIZE(ptr, 9)
ptr->long_ids = gf_bs_read_int(bs, 1);
ptr->long_offsets = gf_bs_read_int(bs, 1);
ptr->global_entries = gf_bs_read_int(bs, 1);
ptr->reserved = gf_bs_read_int(bs, 5);
ptr->time_scale = gf_bs_read_u32(bs);
ptr->entry_count = gf_bs_read_u32(bs);
if (ptr->size / ( (ptr->long_offsets ? 16 : 12) ) < ptr->entry_count)
return GF_ISOM_INVALID_FILE;
for (i=0; i<ptr->entry_count; i++) {
GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry));
if (!ae) return GF_OUT_OF_MEM;
ISOM_DECREASE_SIZE(ptr, 8)
ae->time = gf_bs_read_u64(bs);
if (ptr->long_offsets) {
ISOM_DECREASE_SIZE(ptr, 8)
ae->offset = gf_bs_read_u64(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
ae->offset = gf_bs_read_u32(bs);
}
gf_list_insert(ptr->local_access_entries, ae, i);
}
if (ptr->global_entries) {
ISOM_DECREASE_SIZE(ptr, 4)
ptr->global_entry_count = gf_bs_read_u32(bs);
for (i=0; i<ptr->global_entry_count; i++) {
GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry));
if (!ae) return GF_OUT_OF_MEM;
ISOM_DECREASE_SIZE(ptr, 8)
ae->time = gf_bs_read_u64(bs);
if (ptr->long_ids) {
ISOM_DECREASE_SIZE(ptr, 8)
ae->segment = gf_bs_read_u32(bs);
ae->fragment = gf_bs_read_u32(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
ae->segment = gf_bs_read_u16(bs);
ae->fragment = gf_bs_read_u16(bs);
}
if (ptr->long_offsets) {
ISOM_DECREASE_SIZE(ptr, 16)
ae->afra_offset = gf_bs_read_u64(bs);
ae->offset_from_afra = gf_bs_read_u64(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 8)
ae->afra_offset = gf_bs_read_u32(bs);
ae->offset_from_afra = gf_bs_read_u32(bs);
}
gf_list_insert(ptr->global_access_entries, ae, i);
}
}
return GF_OK;
}
| null | null | 196,860
|
316197592189545869425403322683843314472
| 66
|
fixed #1782 (fuzz)
|
other
|
gpac
|
44fdc3d972c31c56efe73e1a3b63438d46087652
| 1
|
GF_Filter *gf_fs_load_filter(GF_FilterSession *fsess, const char *name, GF_Err *err_code)
{
const char *args=NULL;
const char *sep, *file_ext;
u32 i, len, count = gf_list_count(fsess->registry);
Bool quiet = (err_code && (*err_code == GF_EOS)) ? GF_TRUE : GF_FALSE;
assert(fsess);
assert(name);
if (err_code) *err_code = GF_OK;
sep = gf_fs_path_escape_colon(fsess, name);
if (sep) {
args = sep+1;
len = (u32) (sep - name);
} else len = (u32) strlen(name);
if (!len) {
if (!quiet) {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Missing filter name in %s\n", name));
}
return NULL;
}
if (!strncmp(name, "enc", len)) {
return gf_fs_load_encoder(fsess, args);
}
/*regular filter loading*/
for (i=0;i<count;i++) {
const GF_FilterRegister *f_reg = gf_list_get(fsess->registry, i);
if ((strlen(f_reg->name)==len) && !strncmp(f_reg->name, name, len)) {
GF_Filter *filter;
GF_FilterArgType argtype = GF_FILTER_ARG_EXPLICIT;
if ((f_reg->flags & GF_FS_REG_REQUIRES_RESOLVER) && !fsess->max_resolve_chain_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Filter %s requires graph resolver but it is disabled\n", name));
if (err_code) *err_code = GF_BAD_PARAM;
return NULL;
}
if (f_reg->flags & GF_FS_REG_ACT_AS_SOURCE) argtype = GF_FILTER_ARG_EXPLICIT_SOURCE;
filter = gf_filter_new(fsess, f_reg, args, NULL, argtype, err_code, NULL, GF_FALSE);
if (!filter) return NULL;
if (!filter->num_output_pids) {
const char *src_url = strstr(name, "src");
if (src_url && (src_url[3]==fsess->sep_name))
gf_filter_post_process_task(filter);
}
return filter;
}
}
/*check JS file*/
file_ext = gf_file_ext_start(name);
if (file_ext && (file_ext > sep) )
file_ext = NULL;
if (!file_ext || strstr(name, ".js") || strstr(name, ".jsf") || strstr(name, ".mjs") ) {
Bool file_exists = GF_FALSE;
char szName[10+GF_MAX_PATH];
char szPath[10+GF_MAX_PATH];
if (len>GF_MAX_PATH)
return NULL;
strncpy(szPath, name, len);
szPath[len]=0;
GF_LOG(GF_LOG_DEBUG, GF_LOG_FILTER, ("Trying JS filter %s\n", szPath));
if (gf_file_exists(szPath)) {
file_exists = GF_TRUE;
} else {
strcpy(szName, szPath);
file_exists = gf_fs_solve_js_script(szPath, szName, file_ext);
if (!file_exists && !file_ext) {
strcat(szName, ".js");
if (gf_file_exists(szName)) {
strncpy(szPath, name, len);
szPath[len]=0;
strcat(szPath, ".js");
file_exists = GF_TRUE;
}
}
}
if (file_exists) {
sprintf(szName, "jsf%cjs%c", fsess->sep_args, fsess->sep_name);
strcat(szName, szPath);
if (name[len])
strcat(szName, name+len);
return gf_fs_load_filter(fsess, szName, err_code);
}
}
if (!quiet) {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Failed to load filter %s: no such filter registry\n", name));
}
if (err_code) *err_code = GF_FILTER_NOT_FOUND;
return NULL;
}
| null | null | 196,882
|
220232041198906961636892375047340335646
| 97
|
fixed #1906
|
other
|
tensorflow
|
9e62869465573cb2d9b5053f1fa02a81fce21d69
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(kInputTensorIndex);
const Tensor& input_min = ctx->input(kInputMinIndex);
const Tensor& input_max = ctx->input(kInputMaxIndex);
const size_t depth = input_max.NumElements();
OP_REQUIRES(
ctx, input_min.dim_size(0) == depth,
errors::InvalidArgument("input_min has incorrect size, expected ",
depth, " was ", input_min.dim_size(0)));
OP_REQUIRES(
ctx, input_max.dim_size(0) == depth,
errors::InvalidArgument("input_max has incorrect size, expected ",
depth, " was ", input_max.dim_size(0)));
const float* input_min_data = input_min.flat<float>().data();
const float* input_max_data = input_max.flat<float>().data();
std::vector<float> ranges(depth);
bool is_non_negative = true;
Eigen::array<int, 2> shuffling({1, 0});
auto input_matrix = input.flat_inner_dims<qint32>();
// TODO: verify performance of not transposing and finding the min max
// directly from input_matrix vs the one presented below of transposing and
// using the transposed matrix as the transposing operation in itself might
// be more costly.
// Note that this operation is a calibration step for quantization and will
// cease to exist in the final inference graph(will exist as a const node).
auto transposed_input = input_matrix.shuffle(shuffling);
// Find the ranges of each channel in parallel.
float out_min_max = std::numeric_limits<float>::min();
#ifdef ENABLE_ONEDNN_OPENMP
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for reduction(max : out_min_max)
#endif
#endif // ENABLE_ONEDNN_OPENMP
// TODO: Add eigen parallel_for
for (int64_t i = 0; i < depth; ++i) {
Eigen::Tensor<qint32, 0, Eigen::RowMajor> min =
transposed_input.chip<0>(i).minimum();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> max =
transposed_input.chip<0>(i).maximum();
const int32_t min_per_channel = min();
const int32_t max_per_channel = max();
const int32_t abs_max =
std::max(std::abs(min_per_channel), std::abs(max_per_channel));
float scale =
std::max(std::abs(input_min_data[i]), std::abs(input_max_data[i]));
ranges[i] =
scale * static_cast<float>(abs_max) / static_cast<float>(1L << 31);
if (min_per_channel < 0) is_non_negative = false;
// Thread-local out_min_max.
out_min_max = std::max(out_min_max, ranges[i]);
}
// All local out_min_max gets max-reduced into one global out_min_max at
// the end of the loop by specifying reduction(max:out_min_max) along with
// omp parallel for.
// Fixing max to clip_value_max_ (example 6.0 to support relu6)
if (out_min_max > clip_value_max_) out_min_max = clip_value_max_;
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min));
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
output_min->flat<float>()(0) = is_non_negative ? 0.0f : -out_min_max;
output_max->flat<float>()(0) = out_min_max;
}
| null | null | 196,885
|
178185501940213289525267608244583233975
| 74
|
Add more validation to `RequantizationRangePerChannel`.
PiperOrigin-RevId: 387693946
Change-Id: Ife8dcbdb021bec4787eef6a4361dd08f17c14bd6
|
other
|
rpm
|
bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8
| 1
|
int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
pgpDigParams * ret)
{
const uint8_t *p = pkts;
const uint8_t *pend = pkts + pktlen;
pgpDigParams digp = NULL;
struct pgpPkt pkt;
int rc = -1; /* assume failure */
while (p < pend) {
if (decodePkt(p, (pend - p), &pkt))
break;
if (digp == NULL) {
if (pkttype && pkt.tag != pkttype) {
break;
} else {
digp = pgpDigParamsNew(pkt.tag);
}
}
if (pgpPrtPkt(&pkt, digp))
break;
p += (pkt.body - pkt.head) + pkt.blen;
if (pkttype == PGPTAG_SIGNATURE)
break;
}
rc = (digp && (p == pend)) ? 0 : -1;
if (ret && rc == 0) {
*ret = digp;
} else {
pgpDigParamsFree(digp);
}
return rc;
}
| null | null | 196,889
|
143803285922112676036425788687927183928
| 38
|
Validate and require subkey binding signatures on PGP public keys
All subkeys must be followed by a binding signature by the primary key
as per the OpenPGP RFC, enforce the presence and validity in the parser.
The implementation is as kludgey as they come to work around our
simple-minded parser structure without touching API, to maximise
backportability. Store all the raw packets internally as we decode them
to be able to access previous elements at will, needed to validate ordering
and access the actual data. Add testcases for manipulated keys whose
import previously would succeed.
Depends on the two previous commits:
7b399fcb8f52566e6f3b4327197a85facd08db91 and
236b802a4aa48711823a191d1b7f753c82a89ec5
Fixes CVE-2021-3521.
|
other
|
envoy
|
e9f936d85dc1edc34fabd0a1725ec180f2316353
| 1
|
void DefaultCertValidator::updateDigestForSessionId(bssl::ScopedEVP_MD_CTX& md,
uint8_t hash_buffer[EVP_MAX_MD_SIZE],
unsigned hash_length) {
int rc;
// Hash all the settings that affect whether the server will allow/accept
// the client connection. This ensures that the client is always validated against
// the correct settings, even if session resumption across different listeners
// is enabled.
if (ca_cert_ != nullptr) {
rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length);
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH,
fmt::format("invalid SHA256 hash length {}", hash_length));
rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length);
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
for (const auto& hash : verify_certificate_hash_list_) {
rc = EVP_DigestUpdate(md.get(), hash.data(),
hash.size() *
sizeof(std::remove_reference<decltype(hash)>::type::value_type));
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
for (const auto& hash : verify_certificate_spki_list_) {
rc = EVP_DigestUpdate(md.get(), hash.data(),
hash.size() *
sizeof(std::remove_reference<decltype(hash)>::type::value_type));
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
}
| null | null | 196,893
|
67659419293076123709338539213514573993
| 33
|
CVE-2022-21654
tls allows re-use when some cert validation settings have changed
Signed-off-by: Yan Avlasov <[email protected]>
|
other
|
cryptopp
|
9425e16437439e68c7d96abef922167d68fafaff
| 1
|
Integer InvertibleRWFunction::CalculateInverse(RandomNumberGenerator &rng, const Integer &x) const
{
DoQuickSanityCheck();
ModularArithmetic modn(m_n);
Integer r, rInv;
do { // do this in a loop for people using small numbers for testing
r.Randomize(rng, Integer::One(), m_n - Integer::One());
rInv = modn.MultiplicativeInverse(r);
} while (rInv.IsZero());
Integer re = modn.Square(r);
re = modn.Multiply(re, x); // blind
Integer cp=re%m_p, cq=re%m_q;
if (Jacobi(cp, m_p) * Jacobi(cq, m_q) != 1)
{
cp = cp.IsOdd() ? (cp+m_p) >> 1 : cp >> 1;
cq = cq.IsOdd() ? (cq+m_q) >> 1 : cq >> 1;
}
#pragma omp parallel
#pragma omp sections
{
#pragma omp section
cp = ModularSquareRoot(cp, m_p);
#pragma omp section
cq = ModularSquareRoot(cq, m_q);
}
Integer y = CRT(cq, m_q, cp, m_p, m_u);
y = modn.Multiply(y, rInv); // unblind
y = STDMIN(y, m_n-y);
if (ApplyFunction(y) != x) // check
throw Exception(Exception::OTHER_ERROR, "InvertibleRWFunction: computational error during private key operation");
return y;
}
| null | null | 196,894
|
56333125280648792964665629207196928688
| 35
|
Fix for CVE-2015-2141. Thanks to Evgeny Sidorov for reporting. Squaring to satisfy Jacobi requirements suggested by JPM.
|
other
|
toybox
|
6d4847934fc0fe47a3254ce6c0396d197a780cf4
| 1
|
static int isunder(char *file, char *dir)
{
char *s1 = xabspath(dir, ABS_FILE), *s2 = xabspath(file, 0), *ss = s2;
int rc = strstart(&ss, s2) && (!*ss || *ss=='/' || ss[-1]=='/');
free(s2);
free(s1);
return rc;
}
| null | null | 196,895
|
97427692132315706086206005036245647311
| 10
|
Don't segfault if xabspath() returns null (spotted by Taolaw).
|
other
|
libconfuse
|
d73777c2c3566fb2647727bb56d9a2295b81669b
| 1
|
DLLIMPORT char *cfg_tilde_expand(const char *filename)
{
char *expanded = NULL;
#ifndef _WIN32
/* Do tilde expansion */
if (filename[0] == '~') {
struct passwd *passwd = NULL;
const char *file = NULL;
if (filename[1] == '/' || filename[1] == 0) {
/* ~ or ~/path */
passwd = getpwuid(geteuid());
file = filename + 1;
} else {
/* ~user or ~user/path */
char *user;
file = strchr(filename, '/');
if (file == NULL)
file = filename + strlen(filename);
user = malloc(file - filename);
if (!user)
return NULL;
strncpy(user, filename + 1, file - filename - 1);
passwd = getpwnam(user);
free(user);
}
if (passwd) {
expanded = malloc(strlen(passwd->pw_dir) + strlen(file) + 1);
if (!expanded)
return NULL;
strcpy(expanded, passwd->pw_dir);
strcat(expanded, file);
}
}
#endif
if (!expanded)
expanded = strdup(filename);
return expanded;
}
| null | null | 196,920
|
274166286719183774870337789066832153738
| 46
|
Fix #163: unterminated username used with getpwnam()
Signed-off-by: Joachim Wiberg <[email protected]>
|
other
|
curl
|
7e92d12b4e6911f424678a133b19de670e183a59
| 1
|
static bool bad_domain(const char *domain)
{
return !strchr(domain, '.') && !strcasecompare(domain, "localhost");
}
| null | null | 196,938
|
279360140156101526766245007952632687468
| 4
|
cookies: make bad_domain() not consider a trailing dot fine
The check for a dot in the domain must not consider a single trailing
dot to be fine, as then TLD + trailing dot is fine and curl will accept
setting cookies for it.
CVE-2022-27779
Reported-by: Axel Chong
Bug: https://curl.se/docs/CVE-2022-27779.html
Closes #8820
|
other
|
libjxl
|
7dfa400ded53919d986c5d3d23446a09e0cf481b
| 1
|
Status DecodeImageAPNG(Span<const uint8_t> bytes, ThreadPool* pool,
CodecInOut* io) {
Reader r;
unsigned int id, i, j, w, h, w0, h0, x0, y0;
unsigned int delay_num, delay_den, dop, bop, rowbytes, imagesize;
unsigned char sig[8];
png_structp png_ptr;
png_infop info_ptr;
CHUNK chunk;
CHUNK chunkIHDR;
std::vector<CHUNK> chunksInfo;
bool isAnimated = false;
bool skipFirst = false;
bool hasInfo = false;
bool all_dispose_bg = true;
APNGFrame frameRaw = {};
r = {bytes.data(), bytes.data() + bytes.size()};
// Not an aPNG => not an error
unsigned char png_signature[8] = {137, 80, 78, 71, 13, 10, 26, 10};
if (r.Read(sig, 8) || memcmp(sig, png_signature, 8) != 0) {
return false;
}
id = read_chunk(&r, &chunkIHDR);
io->frames.clear();
io->dec_pixels = 0;
io->metadata.m.SetUintSamples(8);
io->metadata.m.SetAlphaBits(8);
io->metadata.m.color_encoding =
ColorEncoding::SRGB(); // todo: get data from png metadata
(void)io->dec_hints.Foreach(
[](const std::string& key, const std::string& /*value*/) {
JXL_WARNING("APNG decoder ignoring %s hint", key.c_str());
return true;
});
bool errorstate = true;
if (id == kId_IHDR && chunkIHDR.size == 25) {
w0 = w = png_get_uint_32(chunkIHDR.p + 8);
h0 = h = png_get_uint_32(chunkIHDR.p + 12);
if (w > cMaxPNGSize || h > cMaxPNGSize) {
return false;
}
x0 = 0;
y0 = 0;
delay_num = 1;
delay_den = 10;
dop = 0;
bop = 0;
rowbytes = w * 4;
imagesize = h * rowbytes;
frameRaw.p = new unsigned char[imagesize];
frameRaw.rows = new png_bytep[h * sizeof(png_bytep)];
for (j = 0; j < h; j++) frameRaw.rows[j] = frameRaw.p + j * rowbytes;
if (!processing_start(png_ptr, info_ptr, (void*)&frameRaw, hasInfo,
chunkIHDR, chunksInfo)) {
bool last_base_was_none = true;
while (!r.Eof()) {
id = read_chunk(&r, &chunk);
if (!id) break;
JXL_ASSERT(chunk.p != nullptr);
if (id == kId_acTL && !hasInfo && !isAnimated) {
isAnimated = true;
skipFirst = true;
io->metadata.m.have_animation = true;
io->metadata.m.animation.tps_numerator = 1000;
} else if (id == kId_IEND ||
(id == kId_fcTL && (!hasInfo || isAnimated))) {
if (hasInfo) {
if (!processing_finish(png_ptr, info_ptr)) {
ImageBundle bundle(&io->metadata.m);
bundle.duration = delay_num * 1000 / delay_den;
bundle.origin.x0 = x0;
bundle.origin.y0 = y0;
// TODO(veluca): this could in principle be implemented.
if (last_base_was_none && !all_dispose_bg &&
(x0 != 0 || y0 != 0 || w0 != w || h0 != h || bop != 0)) {
return JXL_FAILURE(
"APNG with dispose-to-0 is not supported for non-full or "
"blended frames");
}
switch (dop) {
case 0:
bundle.use_for_next_frame = true;
last_base_was_none = false;
all_dispose_bg = false;
break;
case 2:
bundle.use_for_next_frame = false;
all_dispose_bg = false;
break;
default:
bundle.use_for_next_frame = false;
last_base_was_none = true;
}
bundle.blend = bop != 0;
io->dec_pixels += w0 * h0;
Image3F sub_frame(w0, h0);
ImageF sub_frame_alpha(w0, h0);
for (size_t y = 0; y < h0; ++y) {
float* const JXL_RESTRICT row_r = sub_frame.PlaneRow(0, y);
float* const JXL_RESTRICT row_g = sub_frame.PlaneRow(1, y);
float* const JXL_RESTRICT row_b = sub_frame.PlaneRow(2, y);
float* const JXL_RESTRICT row_alpha = sub_frame_alpha.Row(y);
uint8_t* const f = frameRaw.rows[y];
for (size_t x = 0; x < w0; ++x) {
if (f[4 * x + 3] == 0) {
row_alpha[x] = 0;
row_r[x] = 0;
row_g[x] = 0;
row_b[x] = 0;
continue;
}
row_r[x] = f[4 * x + 0] * (1.f / 255);
row_g[x] = f[4 * x + 1] * (1.f / 255);
row_b[x] = f[4 * x + 2] * (1.f / 255);
row_alpha[x] = f[4 * x + 3] * (1.f / 255);
}
}
bundle.SetFromImage(std::move(sub_frame), ColorEncoding::SRGB());
bundle.SetAlpha(std::move(sub_frame_alpha),
/*alpha_is_premultiplied=*/false);
io->frames.push_back(std::move(bundle));
} else {
delete[] chunk.p;
break;
}
}
if (id == kId_IEND) {
errorstate = false;
break;
}
// At this point the old frame is done. Let's start a new one.
w0 = png_get_uint_32(chunk.p + 12);
h0 = png_get_uint_32(chunk.p + 16);
x0 = png_get_uint_32(chunk.p + 20);
y0 = png_get_uint_32(chunk.p + 24);
delay_num = png_get_uint_16(chunk.p + 28);
delay_den = png_get_uint_16(chunk.p + 30);
dop = chunk.p[32];
bop = chunk.p[33];
if (w0 > cMaxPNGSize || h0 > cMaxPNGSize || x0 > cMaxPNGSize ||
y0 > cMaxPNGSize || x0 + w0 > w || y0 + h0 > h || dop > 2 ||
bop > 1) {
delete[] chunk.p;
break;
}
if (hasInfo) {
memcpy(chunkIHDR.p + 8, chunk.p + 12, 8);
if (processing_start(png_ptr, info_ptr, (void*)&frameRaw, hasInfo,
chunkIHDR, chunksInfo)) {
delete[] chunk.p;
break;
}
} else
skipFirst = false;
if (io->frames.size() == (skipFirst ? 1 : 0)) {
bop = 0;
if (dop == 2) dop = 1;
}
} else if (id == kId_IDAT) {
hasInfo = true;
if (processing_data(png_ptr, info_ptr, chunk.p, chunk.size)) {
delete[] chunk.p;
break;
}
} else if (id == kId_fdAT && isAnimated) {
png_save_uint_32(chunk.p + 4, chunk.size - 16);
memcpy(chunk.p + 8, "IDAT", 4);
if (processing_data(png_ptr, info_ptr, chunk.p + 4, chunk.size - 4)) {
delete[] chunk.p;
break;
}
} else if (!isAbc(chunk.p[4]) || !isAbc(chunk.p[5]) ||
!isAbc(chunk.p[6]) || !isAbc(chunk.p[7])) {
delete[] chunk.p;
break;
} else if (!hasInfo) {
if (processing_data(png_ptr, info_ptr, chunk.p, chunk.size)) {
delete[] chunk.p;
break;
}
chunksInfo.push_back(chunk);
continue;
}
delete[] chunk.p;
}
}
delete[] frameRaw.rows;
delete[] frameRaw.p;
}
for (i = 0; i < chunksInfo.size(); i++) delete[] chunksInfo[i].p;
chunksInfo.clear();
delete[] chunkIHDR.p;
if (errorstate) return false;
SetIntensityTarget(io);
return true;
}
| null | null | 196,993
|
123816616739143632951179511270786230002
| 212
|
Fix handling of APNG with 0 delay_den (#313)
|
other
|
glibc
|
ee8d5e33adb284601c00c94687bc907e10aec9bb
| 1
|
realpath_stk (const char *name, char *resolved,
struct scratch_buffer *rname_buf)
{
char *dest;
char const *start;
char const *end;
int num_links = 0;
if (name == NULL)
{
/* As per Single Unix Specification V2 we must return an error if
either parameter is a null pointer. We extend this to allow
the RESOLVED parameter to be NULL in case the we are expected to
allocate the room for the return value. */
__set_errno (EINVAL);
return NULL;
}
if (name[0] == '\0')
{
/* As per Single Unix Specification V2 we must return an error if
the name argument points to an empty string. */
__set_errno (ENOENT);
return NULL;
}
struct scratch_buffer extra_buffer, link_buffer;
scratch_buffer_init (&extra_buffer);
scratch_buffer_init (&link_buffer);
scratch_buffer_init (rname_buf);
char *rname_on_stack = rname_buf->data;
char *rname = rname_on_stack;
bool end_in_extra_buffer = false;
bool failed = true;
/* This is always zero for Posix hosts, but can be 2 for MS-Windows
and MS-DOS X:/foo/bar file names. */
idx_t prefix_len = FILE_SYSTEM_PREFIX_LEN (name);
if (!IS_ABSOLUTE_FILE_NAME (name))
{
while (!__getcwd (rname, rname_buf->length))
{
if (errno != ERANGE)
{
dest = rname;
goto error;
}
if (!scratch_buffer_grow (rname_buf))
goto error_nomem;
rname = rname_buf->data;
}
dest = __rawmemchr (rname, '\0');
start = name;
prefix_len = FILE_SYSTEM_PREFIX_LEN (rname);
}
else
{
dest = __mempcpy (rname, name, prefix_len);
*dest++ = '/';
if (DOUBLE_SLASH_IS_DISTINCT_ROOT)
{
if (prefix_len == 0 /* implies ISSLASH (name[0]) */
&& ISSLASH (name[1]) && !ISSLASH (name[2]))
*dest++ = '/';
*dest = '\0';
}
start = name + prefix_len;
}
for ( ; *start; start = end)
{
/* Skip sequence of multiple file name separators. */
while (ISSLASH (*start))
++start;
/* Find end of component. */
for (end = start; *end && !ISSLASH (*end); ++end)
/* Nothing. */;
/* Length of this file name component; it can be zero if a file
name ends in '/'. */
idx_t startlen = end - start;
if (startlen == 0)
break;
else if (startlen == 1 && start[0] == '.')
/* nothing */;
else if (startlen == 2 && start[0] == '.' && start[1] == '.')
{
/* Back up to previous component, ignore if at root already. */
if (dest > rname + prefix_len + 1)
for (--dest; dest > rname && !ISSLASH (dest[-1]); --dest)
continue;
if (DOUBLE_SLASH_IS_DISTINCT_ROOT
&& dest == rname + 1 && !prefix_len
&& ISSLASH (*dest) && !ISSLASH (dest[1]))
dest++;
}
else
{
if (!ISSLASH (dest[-1]))
*dest++ = '/';
while (rname + rname_buf->length - dest
< startlen + sizeof dir_suffix)
{
idx_t dest_offset = dest - rname;
if (!scratch_buffer_grow_preserve (rname_buf))
goto error_nomem;
rname = rname_buf->data;
dest = rname + dest_offset;
}
dest = __mempcpy (dest, start, startlen);
*dest = '\0';
char *buf;
ssize_t n;
while (true)
{
buf = link_buffer.data;
idx_t bufsize = link_buffer.length;
n = __readlink (rname, buf, bufsize - 1);
if (n < bufsize - 1)
break;
if (!scratch_buffer_grow (&link_buffer))
goto error_nomem;
}
if (0 <= n)
{
if (++num_links > __eloop_threshold ())
{
__set_errno (ELOOP);
goto error;
}
buf[n] = '\0';
char *extra_buf = extra_buffer.data;
idx_t end_idx IF_LINT (= 0);
if (end_in_extra_buffer)
end_idx = end - extra_buf;
size_t len = strlen (end);
if (INT_ADD_OVERFLOW (len, n))
{
__set_errno (ENOMEM);
goto error_nomem;
}
while (extra_buffer.length <= len + n)
{
if (!scratch_buffer_grow_preserve (&extra_buffer))
goto error_nomem;
extra_buf = extra_buffer.data;
}
if (end_in_extra_buffer)
end = extra_buf + end_idx;
/* Careful here, end may be a pointer into extra_buf... */
memmove (&extra_buf[n], end, len + 1);
name = end = memcpy (extra_buf, buf, n);
end_in_extra_buffer = true;
if (IS_ABSOLUTE_FILE_NAME (buf))
{
idx_t pfxlen = FILE_SYSTEM_PREFIX_LEN (buf);
dest = __mempcpy (rname, buf, pfxlen);
*dest++ = '/'; /* It's an absolute symlink */
if (DOUBLE_SLASH_IS_DISTINCT_ROOT)
{
if (ISSLASH (buf[1]) && !ISSLASH (buf[2]) && !pfxlen)
*dest++ = '/';
*dest = '\0';
}
/* Install the new prefix to be in effect hereafter. */
prefix_len = pfxlen;
}
else
{
/* Back up to previous component, ignore if at root
already: */
if (dest > rname + prefix_len + 1)
for (--dest; dest > rname && !ISSLASH (dest[-1]); --dest)
continue;
if (DOUBLE_SLASH_IS_DISTINCT_ROOT && dest == rname + 1
&& ISSLASH (*dest) && !ISSLASH (dest[1]) && !prefix_len)
dest++;
}
}
else if (! (suffix_requires_dir_check (end)
? dir_check (rname, dest)
: errno == EINVAL))
goto error;
}
}
if (dest > rname + prefix_len + 1 && ISSLASH (dest[-1]))
--dest;
if (DOUBLE_SLASH_IS_DISTINCT_ROOT && dest == rname + 1 && !prefix_len
&& ISSLASH (*dest) && !ISSLASH (dest[1]))
dest++;
failed = false;
error:
*dest++ = '\0';
if (resolved != NULL && dest - rname <= get_path_max ())
rname = strcpy (resolved, rname);
error_nomem:
scratch_buffer_free (&extra_buffer);
scratch_buffer_free (&link_buffer);
if (failed || rname == resolved)
{
scratch_buffer_free (rname_buf);
return failed ? NULL : resolved;
}
return scratch_buffer_dupfree (rname_buf, dest - rname);
}
| null | null | 197,010
|
137106727751924431395811492905376031581
| 220
|
realpath: Set errno to ENAMETOOLONG for result larger than PATH_MAX [BZ #28770]
realpath returns an allocated string when the result exceeds PATH_MAX,
which is unexpected when its second argument is not NULL. This results
in the second argument (resolved) being uninitialized and also results
in a memory leak since the caller expects resolved to be the same as the
returned value.
Return NULL and set errno to ENAMETOOLONG if the result exceeds
PATH_MAX. This fixes [BZ #28770], which is CVE-2021-3998.
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]>
|
other
|
gpac
|
dae9900580a8888969481cd72035408091edb11b
| 1
|
GF_Err SetupWriters(MovieWriter *mw, GF_List *writers, u8 interleaving)
{
u32 i, trackCount;
TrackWriter *writer;
GF_TrackBox *trak;
GF_ISOFile *movie = mw->movie;
mw->total_samples = mw->nb_done = 0;
if (!movie->moov) return GF_OK;
trackCount = gf_list_count(movie->moov->trackList);
for (i = 0; i < trackCount; i++) {
trak = gf_isom_get_track(movie->moov, i+1);
GF_SAFEALLOC(writer, TrackWriter);
if (!writer) goto exit;
writer->sampleNumber = 1;
writer->mdia = trak->Media;
writer->stbl = trak->Media->information->sampleTable;
writer->timeScale = trak->Media->mediaHeader->timeScale;
writer->all_dref_mode = Media_SelfContainedType(writer->mdia);
if (trak->sample_encryption)
writer->prevent_dispatch = GF_TRUE;
writer->isDone = 0;
writer->DTSprev = 0;
writer->chunkDur = 0;
writer->chunkSize = 0;
writer->constant_size = writer->constant_dur = 0;
if (writer->stbl->SampleSize->sampleSize)
writer->constant_size = writer->stbl->SampleSize->sampleSize;
if (writer->stbl->TimeToSample->nb_entries==1) {
writer->constant_dur = writer->stbl->TimeToSample->entries[0].sampleDelta;
if (writer->constant_dur>1) writer->constant_dur = 0;
}
if (!writer->constant_dur || !writer->constant_size || (writer->constant_size>=10))
writer->constant_size = writer->constant_dur = 0;
writer->stsc = (GF_SampleToChunkBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_STSC);
if (!writer->stsc) return GF_OUT_OF_MEM;
if (writer->stbl->ChunkOffset->type == GF_ISOM_BOX_TYPE_STCO) {
writer->stco = gf_isom_box_new(GF_ISOM_BOX_TYPE_STCO);
} else {
writer->stco = gf_isom_box_new(GF_ISOM_BOX_TYPE_CO64);
}
if (!writer->stco) return GF_OUT_OF_MEM;
/*stops from chunk escape*/
if (interleaving) writer->stbl->MaxSamplePerChunk = 0;
/*for progress, assume only one descIndex*/
if (Media_IsSelfContained(writer->mdia, 1))
mw->total_samples += writer->stbl->SampleSize->sampleCount;
/*optimization for interleaving: put audio last (this can be overridden by priorities)*/
if (movie->storageMode != GF_ISOM_STORE_INTERLEAVED) {
gf_list_add(writers, writer);
} else {
if (writer->mdia->information->InfoHeader && writer->mdia->information->InfoHeader->type == GF_ISOM_BOX_TYPE_SMHD) {
gf_list_add(writers, writer);
} else {
gf_list_insert(writers, writer, 0);
}
}
if (movie->sample_groups_in_traf && trak->Media->information->sampleTable) {
gf_isom_box_array_del_parent(&trak->Media->information->sampleTable->child_boxes, trak->Media->information->sampleTable->sampleGroupsDescription);
trak->Media->information->sampleTable->sampleGroupsDescription = NULL;
}
}
return GF_OK;
exit:
CleanWriters(writers);
return GF_OUT_OF_MEM;
}
| null | null | 197,015
|
100209179535470776698861527430241074796
| 73
|
fixed #1659
|
other
|
tensorflow
|
93f428fd1768df147171ed674fee1fc5ab8309ec
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& in0 = ctx->input(0);
const Tensor& in1 = ctx->input(1);
auto in0_flat = in0.flat<Tin>();
auto in1_flat = in1.flat<Tin>();
const Device& eigen_device = ctx->eigen_device<Device>();
Tensor* out = nullptr;
if (std::is_same<Tin, Tout>::value) {
OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output(
{0, 1}, 0, in0.shape(), &out));
} else {
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, in0.shape(), &out));
}
auto out_flat = out->flat<Tout>();
functor::SimpleBinaryFunctor<Device, Functor>()(eigen_device, out_flat,
in0_flat, in1_flat);
}
| null | null | 197,024
|
92857430627306028918441575167793149967
| 18
|
Fix nullptr deref and heap OOB access in binary cwise ops.
PiperOrigin-RevId: 387936777
Change-Id: I608b8074cec36a982cca622b7144cb2c43e6e19f
|
other
|
drogon
|
3c785326c63a34aa1799a639ae185bc9453cb447
| 1
|
int HttpFileImpl::save(const std::string &path) const
{
assert(!path.empty());
if (fileName_.empty())
return -1;
filesystem::path fsPath(utils::toNativePath(path));
if (!fsPath.is_absolute() &&
(!fsPath.has_parent_path() ||
(fsPath.begin()->string() != "." && fsPath.begin()->string() != "..")))
{
filesystem::path fsUploadPath(utils::toNativePath(
HttpAppFrameworkImpl::instance().getUploadPath()));
fsPath = fsUploadPath / fsPath;
}
filesystem::path fsFileName(utils::toNativePath(fileName_));
if (!filesystem::exists(fsPath))
{
LOG_TRACE << "create path:" << fsPath;
drogon::error_code err;
filesystem::create_directories(fsPath, err);
if (err)
{
LOG_SYSERR;
return -1;
}
}
return saveTo(fsPath / fsFileName);
}
| null | null | 197,057
|
300389952786254526295960796960160809202
| 28
|
Prevent malformed upload path causing arbitrary write (#1174)
|
other
|
ImageMagick
|
8043433ba9ce0c550e09f2b3b6a3f5f62d802e6d
| 1
|
static Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define ThrowDCMException(exception,message) \
{ \
RelinquishDCMMemory(&info,&map,stream_info,stack,data); \
if (info_copy != (DCMInfo *) NULL) \
info_copy=(DCMInfo *) RelinquishDCMInfo(info_copy); \
ThrowReaderException((exception),(message)); \
}
char
explicit_vr[MagickPathExtent],
implicit_vr[MagickPathExtent],
magick[MagickPathExtent],
photometric[MagickPathExtent];
DCMInfo
info,
*info_copy = (DCMInfo *) NULL;
DCMMap
map;
DCMStreamInfo
*stream_info;
Image
*image;
int
datum;
LinkedListInfo
*stack;
MagickBooleanType
explicit_file,
explicit_retry,
use_explicit;
MagickOffsetType
blob_size,
offset;
unsigned char
*p;
ssize_t
i;
size_t
colors,
length,
number_scenes,
quantum,
status;
ssize_t
count,
scene,
sequence_depth;
unsigned char
*data;
unsigned short
group,
element;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->depth=8UL;
image->endian=LSBEndian;
/*
Read DCM preamble.
*/
(void) memset(&info,0,sizeof(info));
(void) memset(&map,0,sizeof(map));
data=(unsigned char *) NULL;
stream_info=(DCMStreamInfo *) AcquireMagickMemory(sizeof(*stream_info));
sequence_depth=0;
stack=NewLinkedList(256);
if (stream_info == (DCMStreamInfo *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(stream_info,0,sizeof(*stream_info));
count=ReadBlob(image,128,(unsigned char *) magick);
if (count != 128)
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
count=ReadBlob(image,4,(unsigned char *) magick);
if ((count != 4) || (LocaleNCompare(magick,"DICM",4) != 0))
{
offset=SeekBlob(image,0L,SEEK_SET);
if (offset < 0)
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
}
/*
Read DCM Medical image.
*/
(void) CopyMagickString(photometric,"MONOCHROME1 ",MagickPathExtent);
info.bits_allocated=8;
info.bytes_per_pixel=1;
info.depth=8;
info.mask=0xffff;
info.max_value=255UL;
info.samples_per_pixel=1;
info.signed_data=(~0UL);
info.rescale_slope=1.0;
element=0;
explicit_vr[2]='\0';
explicit_file=MagickFalse;
colors=0;
number_scenes=1;
use_explicit=MagickFalse;
explicit_retry=MagickFalse;
blob_size=(MagickOffsetType) GetBlobSize(image);
while (TellBlob(image) < blob_size)
{
for (group=0; (group != 0x7FE0) || (element != 0x0010) ; )
{
/*
Read a group.
*/
image->offset=(ssize_t) TellBlob(image);
group=ReadBlobLSBShort(image);
element=ReadBlobLSBShort(image);
if ((group == 0xfffc) && (element == 0xfffc))
break;
if ((group != 0x0002) && (image->endian == MSBEndian))
{
group=(unsigned short) ((group << 8) | ((group >> 8) & 0xFF));
element=(unsigned short) ((element << 8) | ((element >> 8) & 0xFF));
}
quantum=0;
/*
Find corresponding VR for this group and element.
*/
for (i=0; dicom_info[i].group < 0xffff; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
(void) CopyMagickString(implicit_vr,dicom_info[i].vr,MagickPathExtent);
count=ReadBlob(image,2,(unsigned char *) explicit_vr);
if (count != 2)
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
/*
Check for "explicitness", but meta-file headers always explicit.
*/
if ((explicit_file == MagickFalse) && (group != 0x0002))
explicit_file=(isupper((int) ((unsigned char) *explicit_vr)) != 0) &&
(isupper((int) ((unsigned char) *(explicit_vr+1))) != 0) ?
MagickTrue : MagickFalse;
use_explicit=((group == 0x0002) && (explicit_retry == MagickFalse)) ||
(explicit_file != MagickFalse) ? MagickTrue : MagickFalse;
if ((use_explicit != MagickFalse) && (strncmp(implicit_vr,"xs",2) == 0))
(void) CopyMagickString(implicit_vr,explicit_vr,MagickPathExtent);
if ((use_explicit == MagickFalse) || (strncmp(implicit_vr,"!!",2) == 0))
{
offset=SeekBlob(image,(MagickOffsetType) -2,SEEK_CUR);
if (offset < 0)
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
quantum=4;
}
else
{
/*
Assume explicit type.
*/
quantum=2;
if ((strcmp(explicit_vr,"OB") == 0) ||
(strcmp(explicit_vr,"OW") == 0) ||
(strcmp(explicit_vr,"OF") == 0) ||
(strcmp(explicit_vr,"SQ") == 0) ||
(strcmp(explicit_vr,"UN") == 0) ||
(strcmp(explicit_vr,"UT") == 0))
{
(void) ReadBlobLSBShort(image);
quantum=4;
}
}
if ((group == 0xFFFE) && (element == 0xE0DD))
{
/*
If we're exiting a sequence, restore the previous image parameters,
effectively undoing any parameter changes that happened inside the
sequence.
*/
sequence_depth--;
info_copy=(DCMInfo *) RemoveLastElementFromLinkedList(stack);
if (info_copy == (DCMInfo *)NULL)
{
/*
The sequence's entry and exit points don't line up (tried to
exit one more sequence than we entered).
*/
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
}
if (info.scale != (Quantum *) NULL)
info.scale=(Quantum *) RelinquishMagickMemory(info.scale);
(void) memcpy(&info,info_copy,sizeof(info));
info_copy=(DCMInfo *) RelinquishMagickMemory(info_copy);
}
if (strcmp(explicit_vr,"SQ") == 0)
{
/*
If we're entering a sequence, push the current image parameters
onto the stack, so we can restore them at the end of the sequence.
*/
info_copy=(DCMInfo *) AcquireMagickMemory(sizeof(info));
if (info_copy == (DCMInfo *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memcpy(info_copy,&info,sizeof(info));
info_copy->scale=(Quantum *) AcquireQuantumMemory(
info_copy->scale_size+1,sizeof(*info_copy->scale));
if (info_copy->scale == (Quantum *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memcpy(info_copy->scale,info.scale,info_copy->scale_size*
sizeof(*info_copy->scale));
AppendValueToLinkedList(stack,info_copy);
sequence_depth++;
}
datum=0;
if (quantum == 4)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if (quantum == 2)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
quantum=0;
length=1;
if (datum != 0)
{
if ((strncmp(implicit_vr,"OW",2) == 0) ||
(strncmp(implicit_vr,"SS",2) == 0) ||
(strncmp(implicit_vr,"US",2) == 0))
quantum=2;
else
if ((strncmp(implicit_vr,"FL",2) == 0) ||
(strncmp(implicit_vr,"OF",2) == 0) ||
(strncmp(implicit_vr,"SL",2) == 0) ||
(strncmp(implicit_vr,"UL",2) == 0))
quantum=4;
else
if (strncmp(implicit_vr,"FD",2) == 0)
quantum=8;
else
quantum=1;
if (datum != ~0)
length=(size_t) datum/quantum;
else
{
/*
Sequence and item of undefined length.
*/
quantum=0;
length=0;
}
}
if (image_info->verbose != MagickFalse)
{
/*
Display Dicom info.
*/
if (use_explicit == MagickFalse)
explicit_vr[0]='\0';
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
(void) FormatLocaleFile(stdout,
"0x%04lX %4ld S%ld %s-%s (0x%04lx,0x%04lx)",
(unsigned long) image->offset,(long) length,(long) sequence_depth,
implicit_vr,explicit_vr,(unsigned long) group,
(unsigned long) element);
if (dicom_info[i].description != (char *) NULL)
(void) FormatLocaleFile(stdout," %s",dicom_info[i].description);
(void) FormatLocaleFile(stdout,": ");
}
if ((group == 0x7FE0) && (element == 0x0010))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"\n");
break;
}
/*
Allocate space and read an array.
*/
data=(unsigned char *) NULL;
if ((length == 1) && (quantum == 1))
datum=ReadBlobByte(image);
else
if ((length == 1) && (quantum == 2))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
else
if ((length == 1) && (quantum == 4))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if ((quantum != 0) && (length != 0))
{
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,
"InsufficientImageDataInFile")
if (~length >= 1)
data=(unsigned char *) AcquireQuantumMemory(length+1,quantum*
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowDCMException(ResourceLimitError,
"MemoryAllocationFailed")
count=ReadBlob(image,(size_t) quantum*length,data);
if (count != (ssize_t) (quantum*length))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"count=%d quantum=%d "
"length=%d group=%d\n",(int) count,(int) quantum,(int)
length,(int) group);
ThrowDCMException(CorruptImageError,
"InsufficientImageDataInFile")
}
data[length*quantum]='\0';
}
if ((((unsigned int) group << 16) | element) == 0xFFFEE0DD)
{
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
continue;
}
switch (group)
{
case 0x0002:
{
switch (element)
{
case 0x0010:
{
char
transfer_syntax[MagickPathExtent];
/*
Transfer Syntax.
*/
if ((datum == 0) && (explicit_retry == MagickFalse))
{
explicit_retry=MagickTrue;
(void) SeekBlob(image,(MagickOffsetType) 0,SEEK_SET);
group=0;
element=0;
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,
"Corrupted image - trying explicit format\n");
break;
}
*transfer_syntax='\0';
if (data != (unsigned char *) NULL)
(void) CopyMagickString(transfer_syntax,(char *) data,
MagickPathExtent);
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"transfer_syntax=%s\n",
(const char *) transfer_syntax);
if (strncmp(transfer_syntax,"1.2.840.10008.1.2",17) == 0)
{
int
subtype,
type;
type=1;
subtype=0;
if (strlen(transfer_syntax) > 17)
{
count=(ssize_t) sscanf(transfer_syntax+17,".%d.%d",&type,
&subtype);
if (count < 1)
ThrowDCMException(CorruptImageError,
"ImproperImageHeader")
}
switch (type)
{
case 1:
{
image->endian=LSBEndian;
break;
}
case 2:
{
image->endian=MSBEndian;
break;
}
case 4:
{
if ((subtype >= 80) && (subtype <= 81))
image->compression=JPEGCompression;
else
if ((subtype >= 90) && (subtype <= 93))
image->compression=JPEG2000Compression;
else
image->compression=JPEGCompression;
break;
}
case 5:
{
image->compression=RLECompression;
break;
}
}
}
break;
}
default:
break;
}
break;
}
case 0x0028:
{
switch (element)
{
case 0x0002:
{
/*
Samples per pixel.
*/
info.samples_per_pixel=(size_t) datum;
if ((info.samples_per_pixel == 0) || (info.samples_per_pixel > 4))
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
break;
}
case 0x0004:
{
/*
Photometric interpretation.
*/
if (data == (unsigned char *) NULL)
break;
for (i=0; i < (ssize_t) MagickMin(length,MagickPathExtent-1); i++)
photometric[i]=(char) data[i];
photometric[i]='\0';
info.polarity=LocaleCompare(photometric,"MONOCHROME1 ") == 0 ?
MagickTrue : MagickFalse;
break;
}
case 0x0006:
{
/*
Planar configuration.
*/
if (datum == 1)
image->interlace=PlaneInterlace;
break;
}
case 0x0008:
{
/*
Number of frames.
*/
if (data == (unsigned char *) NULL)
break;
number_scenes=StringToUnsignedLong((char *) data);
break;
}
case 0x0010:
{
/*
Image rows.
*/
info.height=(size_t) datum;
break;
}
case 0x0011:
{
/*
Image columns.
*/
info.width=(size_t) datum;
break;
}
case 0x0100:
{
/*
Bits allocated.
*/
info.bits_allocated=(size_t) datum;
info.bytes_per_pixel=1;
if (datum > 8)
info.bytes_per_pixel=2;
info.depth=info.bits_allocated;
if ((info.depth == 0) || (info.depth > 32))
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
info.max_value=(1UL << info.bits_allocated)-1;
image->depth=info.depth;
break;
}
case 0x0101:
{
/*
Bits stored.
*/
info.significant_bits=(size_t) datum;
info.bytes_per_pixel=1;
if (info.significant_bits > 8)
info.bytes_per_pixel=2;
info.depth=info.significant_bits;
if ((info.depth == 0) || (info.depth > 16))
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
info.max_value=(1UL << info.significant_bits)-1;
info.mask=(size_t) GetQuantumRange(info.significant_bits);
image->depth=info.depth;
break;
}
case 0x0102:
{
/*
High bit.
*/
break;
}
case 0x0103:
{
/*
Pixel representation.
*/
info.signed_data=(size_t) datum;
break;
}
case 0x1050:
{
/*
Visible pixel range: center.
*/
if (data != (unsigned char *) NULL)
info.window_center=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1051:
{
/*
Visible pixel range: width.
*/
if (data != (unsigned char *) NULL)
info.window_width=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1052:
{
/*
Rescale intercept
*/
if (data != (unsigned char *) NULL)
info.rescale_intercept=StringToDouble((char *) data,
(char **) NULL);
break;
}
case 0x1053:
{
/*
Rescale slope
*/
if (data != (unsigned char *) NULL)
info.rescale_slope=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1200:
case 0x3006:
{
/*
Populate graymap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/info.bytes_per_pixel);
datum=(int) colors;
if (map.gray != (int *) NULL)
map.gray=(int *) RelinquishMagickMemory(map.gray);
map.gray=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*map.gray));
if (map.gray == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(map.gray,0,MagickMax(colors,65536)*
sizeof(*map.gray));
for (i=0; i < (ssize_t) colors; i++)
if (info.bytes_per_pixel == 1)
map.gray[i]=(int) data[i];
else
map.gray[i]=(int) ((short *) data)[i];
break;
}
case 0x1201:
{
unsigned short
index;
/*
Populate redmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/info.bytes_per_pixel);
datum=(int) colors;
if (map.red != (int *) NULL)
map.red=(int *) RelinquishMagickMemory(map.red);
map.red=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*map.red));
if (map.red == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(map.red,0,MagickMax(colors,65536)*
sizeof(*map.red));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
map.red[i]=(int) index;
p+=2;
}
break;
}
case 0x1202:
{
unsigned short
index;
/*
Populate greenmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/info.bytes_per_pixel);
datum=(int) colors;
if (map.green != (int *) NULL)
map.green=(int *) RelinquishMagickMemory(map.green);
map.green=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*map.green));
if (map.green == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(map.green,0,MagickMax(colors,65536)*
sizeof(*map.green));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
map.green[i]=(int) index;
p+=2;
}
break;
}
case 0x1203:
{
unsigned short
index;
/*
Populate bluemap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/info.bytes_per_pixel);
datum=(int) colors;
if (map.blue != (int *) NULL)
map.blue=(int *) RelinquishMagickMemory(map.blue);
map.blue=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*map.blue));
if (map.blue == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(map.blue,0,MagickMax(colors,65536)*
sizeof(*map.blue));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
map.blue[i]=(int) index;
p+=2;
}
break;
}
default:
break;
}
break;
}
case 0x2050:
{
switch (element)
{
case 0x0020:
{
if ((data != (unsigned char *) NULL) &&
(strncmp((char *) data,"INVERSE",7) == 0))
info.polarity=MagickTrue;
break;
}
default:
break;
}
break;
}
default:
break;
}
if (data != (unsigned char *) NULL)
{
char
*attribute;
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
if (dicom_info[i].description != (char *) NULL)
{
attribute=AcquireString("dcm:");
(void) ConcatenateString(&attribute,dicom_info[i].description);
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == 0)
break;
if ((i == (ssize_t) length) || (length > 4))
{
(void) SubstituteString(&attribute," ","");
(void) SetImageProperty(image,attribute,(char *) data,
exception);
}
attribute=DestroyString(attribute);
}
}
if (image_info->verbose != MagickFalse)
{
if (data == (unsigned char *) NULL)
(void) FormatLocaleFile(stdout,"%d\n",datum);
else
{
/*
Display group data.
*/
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == 0)
break;
if ((i != (ssize_t) length) && (length <= 4))
{
ssize_t
j;
datum=0;
for (j=(ssize_t) length-1; j >= 0; j--)
datum=(256*datum+data[j]);
(void) FormatLocaleFile(stdout,"%d",datum);
}
else
for (i=0; i < (ssize_t) length; i++)
if (isprint((int) data[i]) != 0)
(void) FormatLocaleFile(stdout,"%c",data[i]);
else
(void) FormatLocaleFile(stdout,"%c",'.');
(void) FormatLocaleFile(stdout,"\n");
}
}
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
group=0xfffc;
break;
}
}
if ((group == 0xfffc) && (element == 0xfffc))
{
Image
*last;
last=RemoveLastImageFromList(&image);
if (last != (Image *) NULL)
last=DestroyImage(last);
break;
}
if ((info.width == 0) || (info.height == 0))
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
image->columns=info.width;
image->rows=info.height;
if (info.signed_data == 0xffff)
info.signed_data=(size_t) (info.significant_bits == 16 ? 1 : 0);
if ((image->compression == JPEGCompression) ||
(image->compression == JPEG2000Compression))
{
Image
*images;
ImageInfo
*read_info;
int
c;
/*
Read offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) (((ssize_t) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image));
length=(size_t) ReadBlobLSBLong(image);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile")
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *) RelinquishMagickMemory(
stream_info->offsets);
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image);
offset=TellBlob(image);
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
/*
Handle non-native image formats.
*/
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
images=NewImageList();
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
char
filename[MagickPathExtent];
const char
*property;
FILE
*file;
Image
*jpeg_image;
int
unique_file;
unsigned int
tag;
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
length=(size_t) ReadBlobLSBLong(image);
if (length > (size_t) GetBlobSize(image))
{
images=DestroyImageList(images);
read_info=DestroyImageInfo(read_info);
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile")
}
if (EOFBlob(image) != MagickFalse)
{
status=MagickFalse;
break;
}
if (tag == 0xFFFEE0DD)
break; /* sequence delimiter tag */
if (tag != 0xFFFEE000)
{
status=MagickFalse;
break;
}
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if (file == (FILE *) NULL)
{
(void) RelinquishUniqueFileResource(filename);
ThrowFileException(exception,FileOpenError,
"UnableToCreateTemporaryFile",filename);
break;
}
for (c=EOF; length != 0; length--)
{
c=ReadBlobByte(image);
if (c == EOF)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
if (fputc(c,file) != c)
break;
}
(void) fclose(file);
if (c == EOF)
break;
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"jpeg:%s",filename);
if (image->compression == JPEG2000Compression)
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"j2k:%s",filename);
jpeg_image=ReadImage(read_info,exception);
if (jpeg_image != (Image *) NULL)
{
ResetImagePropertyIterator(image);
property=GetNextImageProperty(image);
while (property != (const char *) NULL)
{
(void) SetImageProperty(jpeg_image,property,
GetImageProperty(image,property,exception),exception);
property=GetNextImageProperty(image);
}
AppendImageToList(&images,jpeg_image);
}
(void) RelinquishUniqueFileResource(filename);
}
read_info=DestroyImageInfo(read_info);
image=DestroyImageList(image);
if ((status == MagickFalse) && (exception->severity < ErrorException))
{
images=DestroyImageList(images);
ThrowDCMException(CorruptImageError,"CorruptImageError")
}
else
RelinquishDCMMemory(&info,&map,stream_info,stack,data);
return(GetFirstImageInList(images));
}
if (info.depth != (1UL*MAGICKCORE_QUANTUM_DEPTH))
{
QuantumAny
range;
/*
Compute pixel scaling table.
*/
length=(size_t) (GetQuantumRange(info.depth)+1);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile")
if (info.scale != (Quantum *) NULL)
info.scale=(Quantum *) RelinquishMagickMemory(info.scale);
info.scale_size=MagickMax(length,MaxMap)+1;
info.scale=(Quantum *) AcquireQuantumMemory(info.scale_size,
sizeof(*info.scale));
if (info.scale == (Quantum *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
(void) memset(info.scale,0,(MagickMax(length,MaxMap)+1)*
sizeof(*info.scale));
range=GetQuantumRange(info.depth);
for (i=0; i <= (ssize_t) GetQuantumRange(info.depth); i++)
info.scale[i]=ScaleAnyToQuantum((size_t) i,range);
}
if (image->compression == RLECompression)
{
unsigned int
tag;
/*
Read RLE offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
}
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
(void) tag;
length=(size_t) ReadBlobLSBLong(image);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile")
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
{
offset=(MagickOffsetType) ReadBlobLSBSignedLong(image);
if (offset > (MagickOffsetType) GetBlobSize(image))
ThrowDCMException(CorruptImageError,
"InsufficientImageDataInFile")
stream_info->offsets[i]=(ssize_t) offset;
if (EOFBlob(image) != MagickFalse)
break;
}
offset=TellBlob(image)+8;
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
}
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
image->columns=info.width;
image->rows=info.height;
image->depth=info.depth;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
image->colorspace=RGBColorspace;
(void) SetImageBackgroundColor(image,exception);
if ((image->colormap == (PixelInfo *) NULL) &&
(info.samples_per_pixel == 1))
{
int
index;
size_t
one;
one=1;
if (colors == 0)
colors=one << info.depth;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed")
if (map.red != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=map.red[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].red=(MagickRealType) index;
}
if (map.green != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=map.green[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].green=(MagickRealType) index;
}
if (map.blue != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=map.blue[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].blue=(MagickRealType) index;
}
if (map.gray != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=map.gray[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].red=(MagickRealType) index;
image->colormap[i].green=(MagickRealType) index;
image->colormap[i].blue=(MagickRealType) index;
}
}
if (image->compression == RLECompression)
{
unsigned int
tag;
/*
Read RLE segment table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
}
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
stream_info->remaining=(size_t) ReadBlobLSBLong(image);
if ((tag != 0xFFFEE000) || (stream_info->remaining <= 64) ||
(EOFBlob(image) != MagickFalse))
{
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
ThrowDCMException(CorruptImageError,"ImproperImageHeader")
}
stream_info->count=0;
stream_info->segment_count=ReadBlobLSBLong(image);
for (i=0; i < 15; i++)
stream_info->segments[i]=(ssize_t) ReadBlobLSBSignedLong(image);
stream_info->remaining-=64;
if (stream_info->segment_count > 1)
{
info.bytes_per_pixel=1;
info.depth=8;
if (stream_info->offset_count > 0)
(void) SeekBlob(image,(MagickOffsetType)
stream_info->offsets[0]+stream_info->segments[0],SEEK_SET);
}
}
if ((info.samples_per_pixel > 1) && (image->interlace == PlaneInterlace))
{
Quantum
*q;
ssize_t
x,
y;
/*
Convert Planar RGB DCM Medical image to pixel packets.
*/
for (i=0; i < (ssize_t) info.samples_per_pixel; i++)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch ((int) i)
{
case 0:
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 1:
{
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 2:
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 3:
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
default:
break;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
}
else
{
const char
*option;
/*
Convert DCM Medical image to pixel packets.
*/
option=GetImageOption(image_info,"dcm:display-range");
if (option != (const char *) NULL)
{
if (LocaleCompare(option,"reset") == 0)
info.window_width=0;
}
option=GetImageOption(image_info,"dcm:window");
if (option != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(option,&geometry_info);
if (flags & RhoValue)
info.window_center=geometry_info.rho;
if (flags & SigmaValue)
info.window_width=geometry_info.sigma;
info.rescale=MagickTrue;
}
option=GetImageOption(image_info,"dcm:rescale");
if (option != (char *) NULL)
info.rescale=IsStringTrue(option);
if ((info.window_center != 0) && (info.window_width == 0))
info.window_width=info.window_center;
status=ReadDCMPixels(image,&info,stream_info,MagickTrue,exception);
if ((status != MagickFalse) && (stream_info->segment_count > 1))
{
if (stream_info->offset_count > 0)
(void) SeekBlob(image,(MagickOffsetType)
stream_info->offsets[0]+stream_info->segments[1],SEEK_SET);
(void) ReadDCMPixels(image,&info,stream_info,MagickFalse,
exception);
}
}
if (IdentifyImageCoderGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (scene < (ssize_t) (number_scenes-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
if (TellBlob(image) < (MagickOffsetType) GetBlobSize(image))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
/*
Free resources.
*/
RelinquishDCMMemory(&info,&map,stream_info,stack,data);
if (image == (Image *) NULL)
return(image);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| null | null | 197,068
|
48091781169139670330064371852708553305
| 1,309
|
Coders: https://github.com/ImageMagick/ImageMagick/issues/4947
|
other
|
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
| 1
|
inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const T* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const T* unswitched_input2_data,
const RuntimeShape& output_shape,
T* output_data, ElementwiseF elementwise_f,
ScalarBroadcastF scalar_broadcast_f) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const T* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const T* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
T* output_data_ptr = output_data;
const T* input1_data_ptr = input1_data;
const T* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for
// input 2. Put another way, input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
elementwise_f(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else {
// Special case of y4 == 1, in which the innermost loop is a single
// element and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except
// simplified for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
scalar_broadcast_f(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
}
| null | null | 197,095
|
339735487769984087035979800063883830183
| 92
|
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
|
other
|
tensorflow
|
bc9c546ce7015c57c2f15c168b3d9201de679a1d
| 1
|
void Compute(OpKernelContext* c) override {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
OP_REQUIRES_OK(c, EnsureSparseVariableAccess<Device, T>(c, v.get()));
// NOTE: We hold the lock for the whole gather operation instead
// of increasing the reference count of v->tensor() to avoid a
// situation where a write to the same variable will see a
// reference count greater than one and make a copy of the
// (potentially very large) tensor buffer.
tf_shared_lock ml(*v->mu());
const Tensor& params = *v->tensor();
const Tensor& indices = c->input(1);
OP_REQUIRES(
c, TensorShapeUtils::IsVectorOrHigher(params.shape()),
errors::InvalidArgument("params must be at least 1 dimensional"));
// Check that we have enough index space
const int64_t N = indices.NumElements();
OP_REQUIRES(
c, params.dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.dim_size(0), " > ",
std::numeric_limits<Index>::max()));
// The result shape is params.shape[:batch_dims] +
// indices.shape[batch_dims:] + params.shape[batch_dims+1:].
TensorShape result_shape;
for (int i = 0; i < batch_dims_; ++i) {
result_shape.AddDim(params.dim_size(i));
}
for (int i = batch_dims_; i < indices.dims(); ++i) {
result_shape.AddDim(indices.dim_size(i));
}
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
result_shape.AddDim(params.dim_size(i));
}
Tensor* out = nullptr;
Tensor tmp;
if (params.dtype() == DT_VARIANT) {
tmp = Tensor(DT_VARIANT, result_shape);
c->set_output(0, tmp);
out = &tmp;
} else {
OP_REQUIRES_OK(c, c->allocate_output(0, result_shape, &out));
}
if (N > 0) {
Tensor tmp_indices;
// Points to the original or updated (if batch_dims is set) indices.
const Tensor* op_indices = &indices;
if (batch_dims_ > 0) {
OP_REQUIRES_OK(c, c->allocate_temp(indices.dtype(), indices.shape(),
&tmp_indices));
functor::DenseUpdate<Device, Index, ASSIGN> copy_functor;
copy_functor(c->eigen_device<Device>(), tmp_indices.flat<Index>(),
indices.flat<Index>());
AddBatchOffsets(&tmp_indices, params);
op_indices = &tmp_indices;
}
int64_t gather_dim_size = 1;
for (int idx = 0; idx <= batch_dims_; ++idx) {
gather_dim_size *= params.dim_size(idx);
}
int64_t inner_size = 1;
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
inner_size *= params.dim_size(i);
}
auto params_flat = params.shaped<T, 3>({1, gather_dim_size, inner_size});
const auto indices_flat = op_indices->flat<Index>();
auto out_flat = out->shaped<T, 3>({1, N, out->NumElements() / N});
functor::GatherFunctor<Device, T, Index> functor;
int64_t bad_i = functor(c, params_flat, indices_flat, out_flat);
OP_REQUIRES(
c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i), " = ",
indices_flat(bad_i), " is not in [0, ", params.dim_size(0), ")"));
}
}
| null | null | 197,110
|
294361236653337986849576392701719119932
| 86
|
Prevent heap oob access in `resource_variable_ops.cc`
PiperOrigin-RevId: 387936433
Change-Id: I9e71ddaa8dbd51ec6afbf163a6b3b591f193b4f6
|
other
|
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
| 1
|
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
| null | null | 197,111
|
270881477601119047421793150845336761803
| 277
|
Make line_no with too large value(2**20) invalid. Fixes #124
|
other
|
mruby
|
f72315575f78a9a773adbce0ee7d3ec33434cb76
| 1
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
if (n == 14) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
push();
n = 15;
}
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
}
else {
pop_n(2);
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
push();
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 14) {
n++;
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_vmassignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
| null | null | 197,128
|
258543986172995812660215951144295239857
| 174
|
codegen.c: fix a argument generation bug in array assignment.
|
other
|
linux
|
d6f5e358452479fa8a773b5c6ccc9e4ec5a20880
| 1
|
smb2_ioctl_query_info(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p)
{
struct iqi_vars *vars;
struct smb_rqst *rqst;
struct kvec *rsp_iov;
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server = cifs_pick_channel(ses);
char __user *arg = (char __user *)p;
struct smb_query_info qi;
struct smb_query_info __user *pqi;
int rc = 0;
int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_ioctl_rsp *io_rsp = NULL;
void *buffer = NULL;
int resp_buftype[3];
struct cifs_open_parms oparms;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_fid fid;
unsigned int size[2];
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
if (vars == NULL)
return -ENOMEM;
rqst = &vars->rqst[0];
rsp_iov = &vars->rsp_iov[0];
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
goto e_fault;
if (qi.output_buffer_length > 1024) {
kfree(vars);
return -EINVAL;
}
if (!ses || !server) {
kfree(vars);
return -EIO;
}
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
if (qi.output_buffer_length) {
buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
if (IS_ERR(buffer)) {
kfree(vars);
return PTR_ERR(buffer);
}
}
/* Open */
rqst[0].rq_iov = &vars->open_iov[0];
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
memset(&oparms, 0, sizeof(oparms));
oparms.tcon = tcon;
oparms.disposition = FILE_OPEN;
oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
if (qi.flags & PASSTHRU_FSCTL) {
switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
break;
case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
oparms.desired_access = GENERIC_ALL;
break;
case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
oparms.desired_access = GENERIC_READ;
break;
case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
oparms.desired_access = GENERIC_WRITE;
break;
}
} else if (qi.flags & PASSTHRU_SET_INFO) {
oparms.desired_access = GENERIC_WRITE;
} else {
oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
}
rc = SMB2_open_init(tcon, server,
&rqst[0], &oplock, &oparms, path);
if (rc)
goto iqinf_exit;
smb2_set_next_command(tcon, &rqst[0]);
/* Query */
if (qi.flags & PASSTHRU_FSCTL) {
/* Can eventually relax perm check since server enforces too */
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else {
rqst[1].rq_iov = &vars->io_iov[0];
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, server,
&rqst[1],
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer,
qi.output_buffer_length,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
}
} else if (qi.flags == PASSTHRU_SET_INFO) {
/* Can eventually relax perm check since server enforces too */
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else if (qi.output_buffer_length < 8)
rc = -EINVAL;
else {
rqst[1].rq_iov = &vars->si_iov[0];
rqst[1].rq_nvec = 1;
/* MS-FSCC 2.4.13 FileEndOfFileInformation */
size[0] = 8;
data[0] = buffer;
rc = SMB2_set_info_init(tcon, server,
&rqst[1],
COMPOUND_FID, COMPOUND_FID,
current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
}
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
rqst[1].rq_iov = &vars->qi_iov[0];
rqst[1].rq_nvec = 1;
rc = SMB2_query_info_init(tcon, server,
&rqst[1], COMPOUND_FID,
COMPOUND_FID, qi.file_info_class,
qi.info_type, qi.additional_information,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
} else { /* unknown flags */
cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
qi.flags);
rc = -EINVAL;
}
if (rc)
goto iqinf_exit;
smb2_set_next_command(tcon, &rqst[1]);
smb2_set_related(&rqst[1]);
/* Close */
rqst[2].rq_iov = &vars->close_iov[0];
rqst[2].rq_nvec = 1;
rc = SMB2_close_init(tcon, server,
&rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto iqinf_exit;
smb2_set_related(&rqst[2]);
rc = compound_send_recv(xid, ses, server,
flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc)
goto iqinf_exit;
/* No need to bump num_remote_opens since handle immediately closed */
if (qi.flags & PASSTHRU_FSCTL) {
pqi = (struct smb_query_info __user *)arg;
io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
if (qi.input_buffer_length > 0 &&
le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
> rsp_iov[1].iov_len)
goto e_fault;
if (copy_to_user(&pqi->input_buffer_length,
&qi.input_buffer_length,
sizeof(qi.input_buffer_length)))
goto e_fault;
if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
(const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
qi.input_buffer_length))
goto e_fault;
} else {
pqi = (struct smb_query_info __user *)arg;
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
if (copy_to_user(&pqi->input_buffer_length,
&qi.input_buffer_length,
sizeof(qi.input_buffer_length)))
goto e_fault;
if (copy_to_user(pqi + 1, qi_rsp->Buffer,
qi.input_buffer_length))
goto e_fault;
}
iqinf_exit:
cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
kfree(vars);
kfree(buffer);
return rc;
e_fault:
rc = -EFAULT;
goto iqinf_exit;
}
| null | null | 197,129
|
137651590128181455340751167140279772289
| 223
|
cifs: fix NULL ptr dereference in smb2_ioctl_query_info()
When calling smb2_ioctl_query_info() with invalid
smb_query_info::flags, a NULL ptr dereference is triggered when trying
to kfree() uninitialised rqst[n].rq_iov array.
This also fixes leaked paths that are created in SMB2_open_init()
which required SMB2_open_free() to properly free them.
Here is a small C reproducer that triggers it
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#define die(s) perror(s), exit(1)
#define QUERY_INFO 0xc018cf07
int main(int argc, char *argv[])
{
int fd;
if (argc < 2)
exit(1);
fd = open(argv[1], O_RDONLY);
if (fd == -1)
die("open");
if (ioctl(fd, QUERY_INFO, (uint32_t[]) { 0, 0, 0, 4, 0, 0}) == -1)
die("ioctl");
close(fd);
return 0;
}
mount.cifs //srv/share /mnt -o ...
gcc repro.c && ./a.out /mnt/f0
[ 1832.124468] CIFS: VFS: \\w22-dc.zelda.test\test Invalid passthru query flags: 0x4
[ 1832.125043] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI
[ 1832.125764] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
[ 1832.126241] CPU: 3 PID: 1133 Comm: a.out Not tainted 5.17.0-rc8 #2
[ 1832.126630] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014
[ 1832.127322] RIP: 0010:smb2_ioctl_query_info+0x7a3/0xe30 [cifs]
[ 1832.127749] Code: 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 00 0f 85 6c 05 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 74 24 28 4c 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 cb 04 00 00 49 8b 3e e8 bb fc fa ff 48 89 da 48
[ 1832.128911] RSP: 0018:ffffc90000957b08 EFLAGS: 00010256
[ 1832.129243] RAX: dffffc0000000000 RBX: ffff888117e9b850 RCX: ffffffffa020580d
[ 1832.129691] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffa043a2c0
[ 1832.130137] RBP: ffff888117e9b878 R08: 0000000000000001 R09: 0000000000000003
[ 1832.130585] R10: fffffbfff4087458 R11: 0000000000000001 R12: ffff888117e9b800
[ 1832.131037] R13: 00000000ffffffea R14: 0000000000000000 R15: ffff888117e9b8a8
[ 1832.131485] FS: 00007fcee9900740(0000) GS:ffff888151a00000(0000) knlGS:0000000000000000
[ 1832.131993] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1832.132354] CR2: 00007fcee9a1ef5e CR3: 0000000114cd2000 CR4: 0000000000350ee0
[ 1832.132801] Call Trace:
[ 1832.132962] <TASK>
[ 1832.133104] ? smb2_query_reparse_tag+0x890/0x890 [cifs]
[ 1832.133489] ? cifs_mapchar+0x460/0x460 [cifs]
[ 1832.133822] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.134125] ? cifs_strndup_to_utf16+0x15b/0x250 [cifs]
[ 1832.134502] ? lock_downgrade+0x6f0/0x6f0
[ 1832.134760] ? cifs_convert_path_to_utf16+0x198/0x220 [cifs]
[ 1832.135170] ? smb2_check_message+0x1080/0x1080 [cifs]
[ 1832.135545] cifs_ioctl+0x1577/0x3320 [cifs]
[ 1832.135864] ? lock_downgrade+0x6f0/0x6f0
[ 1832.136125] ? cifs_readdir+0x2e60/0x2e60 [cifs]
[ 1832.136468] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.136769] ? __rseq_handle_notify_resume+0x80b/0xbe0
[ 1832.137096] ? __up_read+0x192/0x710
[ 1832.137327] ? __ia32_sys_rseq+0xf0/0xf0
[ 1832.137578] ? __x64_sys_openat+0x11f/0x1d0
[ 1832.137850] __x64_sys_ioctl+0x127/0x190
[ 1832.138103] do_syscall_64+0x3b/0x90
[ 1832.138378] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 1832.138702] RIP: 0033:0x7fcee9a253df
[ 1832.138937] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00
[ 1832.140107] RSP: 002b:00007ffeba94a8a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[ 1832.140606] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fcee9a253df
[ 1832.141058] RDX: 00007ffeba94a910 RSI: 00000000c018cf07 RDI: 0000000000000003
[ 1832.141503] RBP: 00007ffeba94a930 R08: 00007fcee9b24db0 R09: 00007fcee9b45c4e
[ 1832.141948] R10: 00007fcee9918d40 R11: 0000000000000246 R12: 00007ffeba94aa48
[ 1832.142396] R13: 0000000000401176 R14: 0000000000403df8 R15: 00007fcee9b78000
[ 1832.142851] </TASK>
[ 1832.142994] Modules linked in: cifs cifs_arc4 cifs_md4 bpf_preload [last unloaded: cifs]
Cc: [email protected]
Signed-off-by: Paulo Alcantara (SUSE) <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
other
|
linux
|
505d9dcb0f7ddf9d075e729523a33d38642ae680
| 1
|
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
struct ccp_dm_workarea key, ctx, final_wa, tag;
struct ccp_data src, dst;
struct ccp_data aad;
struct ccp_op op;
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
__be64 *final;
int ret;
struct scatterlist *p_inp, sg_inp[2];
struct scatterlist *p_tag, sg_tag[2];
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
if (!aes->iv)
return -EINVAL;
if (!((aes->key_len == AES_KEYSIZE_128) ||
(aes->key_len == AES_KEYSIZE_192) ||
(aes->key_len == AES_KEYSIZE_256)))
return -EINVAL;
if (!aes->key) /* Gotta have a key SGL */
return -EINVAL;
/* Zero defaults to 16 bytes, the maximum size */
authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
switch (authsize) {
case 16:
case 15:
case 14:
case 13:
case 12:
case 8:
case 4:
break;
default:
return -EINVAL;
}
/* First, decompose the source buffer into AAD & PT,
* and the destination buffer into AAD, CT & tag, or
* the input into CT & tag.
* It is expected that the input and output SGs will
* be valid, even if the AAD and input lengths are 0.
*/
p_aad = aes->src;
p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
ilen = aes->src_len;
p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
} else {
/* Input length for decryption includes tag */
ilen = aes->src_len - authsize;
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}
jobid = CCP_NEW_JOBID(cmd_q->ccp);
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = jobid;
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.init = 1;
op.u.aes.type = aes->type;
/* Copy the key to the LSB */
ret = ccp_init_dm_workarea(&key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
/* Copy the context (IV) to the LSB.
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHAAD;
while (aad.sg_wa.bytes_left) {
ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
ccp_process_data(&aad, NULL, &op);
op.init = 0;
}
}
op.u.aes.mode = CCP_AES_MODE_GCTR;
op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
if (ret)
goto e_ctx;
if (in_place) {
dst = src;
} else {
ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
op.soc = 0;
op.eom = 0;
op.init = 1;
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
op.eom = 1;
op.u.aes.size = (nbytes * 8) - 1;
}
}
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
ccp_process_data(&src, &dst, &op);
op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
final = (__be64 *)final_wa.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = jobid;
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.init = 1;
op.u.aes.type = aes->type;
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHFINAL;
op.src.type = CCP_MEMTYPE_SYSTEM;
op.src.u.dma.address = final_wa.dma.address;
op.src.u.dma.length = AES_BLOCK_SIZE;
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = final_wa.dma.address;
op.dst.u.dma.length = AES_BLOCK_SIZE;
op.eom = 1;
op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret)
goto e_dst;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
if (ret)
goto e_tag;
ret = crypto_memneq(tag.address, final_wa.address,
authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
e_tag:
ccp_dm_free(&final_wa);
e_dst:
if (ilen > 0 && !in_place)
ccp_free_data(&dst, cmd_q);
e_src:
if (ilen > 0)
ccp_free_data(&src, cmd_q);
e_aad:
if (aes->aad_len)
ccp_free_data(&aad, cmd_q);
e_ctx:
ccp_dm_free(&ctx);
e_key:
ccp_dm_free(&key);
return ret;
}
| null | null | 197,135
|
106903489194098505880029645783744122226
| 280
|
crypto: ccp - fix resource leaks in ccp_run_aes_gcm_cmd()
There are three bugs in this code:
1) If we ccp_init_data() fails for &src then we need to free aad.
Use goto e_aad instead of goto e_ctx.
2) The label to free the &final_wa was named incorrectly as "e_tag" but
it should have been "e_final_wa". One error path leaked &final_wa.
3) The &tag was leaked on one error path. In that case, I added a free
before the goto because the resource was local to that block.
Fixes: 36cf515b9bbe ("crypto: ccp - Enable support for AES GCM on v5 CCPs")
Reported-by: "minihanshen(沈明航)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: John Allen <[email protected]>
Tested-by: John Allen <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
other
|
tensorflow
|
6da6620efad397c85493b8f8667b821403516708
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
const Tensor& input_min_range = ctx->input(1);
const Tensor& input_max_range = ctx->input(2);
int num_slices = 1;
if (axis_ > -1) {
num_slices = input.dim_size(axis_);
}
const TensorShape& minmax_shape = ctx->input(1).shape();
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
Tensor* output_min_tensor = nullptr;
Tensor* output_max_tensor = nullptr;
if (num_slices == 1) {
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {}, &output_min_tensor));
OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {}, &output_max_tensor));
const float min_range = input_min_range.template flat<float>()(0);
const float max_range = input_max_range.template flat<float>()(0);
QuantizeTensor(ctx, input, min_range, max_range, output,
output_min_tensor, output_max_tensor);
return;
}
OP_REQUIRES(ctx, mode_ != QUANTIZE_MODE_MIN_FIRST,
errors::Unimplemented("MIN_FIRST mode is not implemented for "
"Quantize with axis != -1."));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, minmax_shape, &output_min_tensor));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, minmax_shape, &output_max_tensor));
auto input_tensor =
input.template flat_inner_outer_dims<float, 3>(axis_ - 1);
int64_t pre_dim = 1, post_dim = 1;
for (int i = 0; i < axis_; ++i) {
pre_dim *= output->dim_size(i);
}
for (int i = axis_ + 1; i < output->dims(); ++i) {
post_dim *= output->dim_size(i);
}
auto output_tensor = output->template bit_casted_shaped<T, 3>(
{pre_dim, num_slices, post_dim});
auto min_ranges = input_min_range.template vec<float>();
auto max_ranges = input_max_range.template vec<float>();
for (int i = 0; i < num_slices; ++i) {
QuantizeSlice(ctx->eigen_device<Device>(), ctx,
input_tensor.template chip<1>(i), min_ranges(i),
max_ranges(i), output_tensor.template chip<1>(i),
&output_min_tensor->flat<float>()(i),
&output_max_tensor->flat<float>()(i));
}
}
| null | null | 197,142
|
302073335954432238978657804240958936922
| 56
|
Secure tf.raw_ops.QuantizeV2
Validate size and shape of min_range and max_range
Ensure axis is within input dims limits
PiperOrigin-RevId: 387232799
Change-Id: I36975281f7b5758e9e31a8dcc73fe610ef456318
|
other
|
v4l2loopback
|
64a216af4c09c9ba9326057d7e78994271827eff
| 1
|
static void vidioc_fill_name(char *buf, int len, int nr)
{
if (card_label[nr] != NULL) {
snprintf(buf, len, card_label[nr]);
} else {
snprintf(buf, len, "Dummy video device (0x%04X)", nr);
}
}
| null | null | 197,144
|
226841089703403806195698755509426251551
| 8
|
add explicit format specifier to printf() invocations
CWE-134
|
other
|
Crow
|
79eec91f00b5fb7df890d4e84b2f048b0841e044
| 1
|
inline int qs_parse(char* qs, char* qs_kv[], int qs_kv_size, bool parse_url = true)
{
int i, j;
char * substr_ptr;
for(i=0; i<qs_kv_size; i++) qs_kv[i] = NULL;
// find the beginning of the k/v substrings or the fragment
substr_ptr = parse_url ? qs + strcspn(qs, "?#") : qs;
if (parse_url)
{
if (substr_ptr[0] != '\0')
substr_ptr++;
else
return 0; // no query or fragment
}
i=0;
while(i<qs_kv_size)
{
qs_kv[i] = substr_ptr;
j = strcspn(substr_ptr, "&");
if ( substr_ptr[j] == '\0' ) { break; }
substr_ptr += j + 1;
i++;
}
i++; // x &'s -> means x iterations of this loop -> means *x+1* k/v pairs
// we only decode the values in place, the keys could have '='s in them
// which will hose our ability to distinguish keys from values later
for(j=0; j<i; j++)
{
substr_ptr = qs_kv[j] + strcspn(qs_kv[j], "=&#");
if ( substr_ptr[0] == '&' || substr_ptr[0] == '\0') // blank value: skip decoding
substr_ptr[0] = '\0';
else
qs_decode(++substr_ptr);
}
#ifdef _qsSORTING
// TODO: qsort qs_kv, using qs_strncmp() for the comparison
#endif
return i;
}
| null | null | 197,149
|
118915588844303374042925937713372011380
| 45
|
Fixed issue where an index in qs_parse is incorrectly incremented beyond the maximum possible value
|
other
|
njs
|
86c2c8270240d0a7a1bc4757fd2010fd989e8037
| 1
|
njs_typed_array_alloc(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_bool_t zeroing, njs_object_type_t type)
{
double num;
int64_t i, length;
uint32_t element_size;
uint64_t size, offset;
njs_int_t ret;
njs_value_t *value, prop;
njs_array_t *src_array;
njs_typed_array_t *array, *src_tarray;
njs_array_buffer_t *buffer;
size = 0;
length = 0;
offset = 0;
buffer = NULL;
src_array = NULL;
src_tarray = NULL;
element_size = njs_typed_array_element_size(type);
value = njs_arg(args, nargs, 0);
if (njs_is_array_buffer(value)) {
buffer = njs_array_buffer(value);
ret = njs_value_to_index(vm, njs_arg(args, nargs, 1), &offset);
if (njs_slow_path(ret != NJS_OK)) {
return NULL;
}
if (njs_slow_path((offset % element_size) != 0)) {
njs_range_error(vm, "start offset must be multiple of %uD",
element_size);
return NULL;
}
if (njs_is_defined(njs_arg(args, nargs, 2))) {
ret = njs_value_to_index(vm, njs_argument(args, 2), &size);
if (njs_slow_path(ret != NJS_OK)) {
return NULL;
}
}
if (njs_slow_path(njs_is_detached_buffer(buffer))) {
njs_type_error(vm, "detached buffer");
return NULL;
}
if (njs_is_defined(njs_arg(args, nargs, 2))) {
ret = njs_value_to_index(vm, njs_argument(args, 2), &size);
if (njs_slow_path(ret != NJS_OK)) {
return NULL;
}
size *= element_size;
if (njs_slow_path((offset + size) > buffer->size)) {
njs_range_error(vm, "Invalid typed array length: %uL", size);
return NULL;
}
} else {
if (njs_slow_path((buffer->size % element_size) != 0)) {
njs_range_error(vm, "byteLength of buffer must be "
"multiple of %uD", element_size);
return NULL;
}
if (offset > buffer->size) {
njs_range_error(vm, "byteOffset %uL is outside the bound of "
"the buffer", offset);
return NULL;
}
size = buffer->size - offset;
}
} else if (njs_is_typed_array(value)) {
src_tarray = njs_typed_array(value);
if (njs_slow_path(njs_is_detached_buffer(src_tarray->buffer))) {
njs_type_error(vm, "detached buffer");
return NULL;
}
size = (uint64_t) njs_typed_array_length(src_tarray) * element_size;
} else if (njs_is_object(value)) {
if (njs_is_fast_array(value)) {
src_array = njs_array(value);
length = src_array->length;
} else {
ret = njs_object_length(vm, value, &length);
if (njs_slow_path(ret == NJS_ERROR)) {
return NULL;
}
}
size = length * element_size;
} else {
ret = njs_value_to_index(vm, value, &size);
if (njs_slow_path(ret != NJS_OK)) {
return NULL;
}
size *= element_size;
}
if (buffer == NULL) {
buffer = njs_array_buffer_alloc(vm, size, zeroing);
if (njs_slow_path(buffer == NULL)) {
return NULL;
}
}
array = njs_mp_zalloc(vm->mem_pool, sizeof(njs_typed_array_t));
if (njs_slow_path(array == NULL)) {
goto memory_error;
}
array->buffer = buffer;
array->offset = offset / element_size;
array->byte_length = size;
array->type = type;
if (src_tarray != NULL) {
if (type != src_tarray->type) {
length = njs_typed_array_length(src_tarray);
for (i = 0; i < length; i++) {
njs_typed_array_prop_set(vm, array, i,
njs_typed_array_prop(src_tarray, i));
}
} else {
memcpy(&buffer->u.u8[0], &src_tarray->buffer->u.u8[0], size);
}
} else if (src_array != NULL) {
for (i = 0; i < length; i++) {
ret = njs_value_to_number(vm, &src_array->start[i], &num);
if (njs_slow_path(ret == NJS_ERROR)) {
return NULL;
}
if (ret == NJS_OK) {
njs_typed_array_prop_set(vm, array, i, num);
}
}
} else if (!njs_is_array_buffer(value) && njs_is_object(value)) {
for (i = 0; i < length; i++) {
ret = njs_value_property_i64(vm, value, i, &prop);
if (njs_slow_path(ret == NJS_ERROR)) {
return NULL;
}
num = NAN;
if (ret == NJS_OK) {
ret = njs_value_to_number(vm, &prop, &num);
if (njs_slow_path(ret == NJS_ERROR)) {
return NULL;
}
}
njs_typed_array_prop_set(vm, array, i, num);
}
}
njs_lvlhsh_init(&array->object.hash);
njs_lvlhsh_init(&array->object.shared_hash);
array->object.__proto__ = &vm->prototypes[type].object;
array->object.type = NJS_TYPED_ARRAY;
array->object.extensible = 1;
array->object.fast_array = 1;
return array;
memory_error:
njs_memory_error(vm);
return NULL;
}
| null | null | 197,173
|
106598782395441949366641061237863909526
| 188
|
Fixed typed-array ctor when source array is changed while iterating.
Previously, the function used optimization for ordinary arrays with no
gaps (so called fast arrays). For a fast array code took elements
directly from internal flat C array. The direct pointer may become
invalid as side-effect of custom valueOf() method for an element.
The fix is to eliminate the micro-optimization which uses direct
pointers.
The problem is similar to the 9578cc729205 (0.7.2) commit.
This closes #523 issue on Github.
|
other
|
mongo
|
07b8851825836911265e909d6842d4586832f9bb
| 1
|
DocumentSource::GetNextResult DocumentSourceGroup::initialize() {
const size_t numAccumulators = _accumulatedFields.size();
// Barring any pausing, this loop exhausts 'pSource' and populates '_groups'.
GetNextResult input = pSource->getNext();
for (; input.isAdvanced(); input = pSource->getNext()) {
if (_memoryTracker.shouldSpillWithAttemptToSaveMemory([this]() { return freeMemory(); })) {
_sortedFiles.push_back(spill());
}
// We release the result document here so that it does not outlive the end of this loop
// iteration. Not releasing could lead to an array copy when this group follows an unwind.
auto rootDocument = input.releaseDocument();
Value id = computeId(rootDocument);
// Look for the _id value in the map. If it's not there, add a new entry with a blank
// accumulator. This is done in a somewhat odd way in order to avoid hashing 'id' and
// looking it up in '_groups' multiple times.
const size_t oldSize = _groups->size();
vector<intrusive_ptr<AccumulatorState>>& group = (*_groups)[id];
const bool inserted = _groups->size() != oldSize;
if (inserted) {
_memoryTracker.memoryUsageBytes += id.getApproximateSize();
// Initialize and add the accumulators
Value expandedId = expandId(id);
Document idDoc =
expandedId.getType() == BSONType::Object ? expandedId.getDocument() : Document();
group.reserve(numAccumulators);
for (auto&& accumulatedField : _accumulatedFields) {
auto accum = accumulatedField.makeAccumulator();
Value initializerValue =
accumulatedField.expr.initializer->evaluate(idDoc, &pExpCtx->variables);
accum->startNewGroup(initializerValue);
group.push_back(accum);
}
} else {
for (auto&& groupObj : group) {
// subtract old mem usage. New usage added back after processing.
_memoryTracker.memoryUsageBytes -= groupObj->memUsageForSorter();
}
}
/* tickle all the accumulators for the group we found */
dassert(numAccumulators == group.size());
for (size_t i = 0; i < numAccumulators; i++) {
group[i]->process(
_accumulatedFields[i].expr.argument->evaluate(rootDocument, &pExpCtx->variables),
_doingMerge);
_memoryTracker.memoryUsageBytes += group[i]->memUsageForSorter();
}
if (kDebugBuild && !storageGlobalParams.readOnly) {
// In debug mode, spill every time we have a duplicate id to stress merge logic.
if (!inserted && // is a dup
!pExpCtx->inMongos && // can't spill to disk in mongos
!_memoryTracker.allowDiskUse && // don't change behavior when testing external sort
_sortedFiles.size() < 20) { // don't open too many FDs
_sortedFiles.push_back(spill());
}
}
}
switch (input.getStatus()) {
case DocumentSource::GetNextResult::ReturnStatus::kAdvanced: {
MONGO_UNREACHABLE; // We consumed all advances above.
}
case DocumentSource::GetNextResult::ReturnStatus::kPauseExecution: {
return input; // Propagate pause.
}
case DocumentSource::GetNextResult::ReturnStatus::kEOF: {
// Do any final steps necessary to prepare to output results.
if (!_sortedFiles.empty()) {
_spilled = true;
if (!_groups->empty()) {
_sortedFiles.push_back(spill());
}
// We won't be using groups again so free its memory.
_groups = pExpCtx->getValueComparator().makeUnorderedValueMap<Accumulators>();
_sorterIterator.reset(Sorter<Value, Value>::Iterator::merge(
_sortedFiles, SortOptions(), SorterComparator(pExpCtx->getValueComparator())));
// prepare current to accumulate data
_currentAccumulators.reserve(numAccumulators);
for (auto&& accumulatedField : _accumulatedFields) {
_currentAccumulators.push_back(accumulatedField.makeAccumulator());
}
verify(_sorterIterator->more()); // we put data in, we should get something out.
_firstPartOfNextGroup = _sorterIterator->next();
} else {
// start the group iterator
groupsIterator = _groups->begin();
}
// This must happen last so that, unless control gets here, we will re-enter
// initialization after getting a GetNextResult::ResultState::kPauseExecution.
_initialized = true;
return input;
}
}
MONGO_UNREACHABLE;
}
| null | null | 197,179
|
299639630867010953869050856327872645652
| 110
|
SERVER-60218-44: SERVER-60218 add initialize helper function for document_source_group (cherry picked from commit 867f52afbb79bc00e35c70f8e0681b7d602f97b2)
|
other
|
FFmpeg
|
9ffa49496d1aae4cbbb387aac28a9e061a6ab0a6
| 1
|
static int adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, const uint8_t *buf, int size)
{
GetBitContext gb;
PutBitContext pb;
MPEG4AudioConfig m4ac;
int off;
init_get_bits(&gb, buf, size * 8);
off = avpriv_mpeg4audio_get_config2(&m4ac, buf, size, 1, s);
if (off < 0)
return off;
skip_bits_long(&gb, off);
adts->objecttype = m4ac.object_type - 1;
adts->sample_rate_index = m4ac.sampling_index;
adts->channel_conf = m4ac.chan_config;
if (adts->objecttype > 3U) {
av_log(s, AV_LOG_ERROR, "MPEG-4 AOT %d is not allowed in ADTS\n", adts->objecttype+1);
return AVERROR_INVALIDDATA;
}
if (adts->sample_rate_index == 15) {
av_log(s, AV_LOG_ERROR, "Escape sample rate index illegal in ADTS\n");
return AVERROR_INVALIDDATA;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "960/120 MDCT window is not allowed in ADTS\n");
return AVERROR_INVALIDDATA;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "Scalable configurations are not allowed in ADTS\n");
return AVERROR_INVALIDDATA;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "Extension flag is not allowed in ADTS\n");
return AVERROR_INVALIDDATA;
}
if (!adts->channel_conf) {
init_put_bits(&pb, adts->pce_data, MAX_PCE_SIZE);
put_bits(&pb, 3, 5); //ID_PCE
adts->pce_size = (ff_copy_pce_data(&pb, &gb) + 3) / 8;
flush_put_bits(&pb);
}
adts->write_adts = 1;
return 0;
}
| null | null | 197,185
|
27134847231468977506511316145171824130
| 48
|
avformat/adtsenc: return value check for init_get_bits in adts_decode_extradata
As the second argument for init_get_bits (buf) can be crafted, a return value check for this function call is necessary.
'buf' is part of 'AVPacket pkt'.
replace init_get_bits with init_get_bits8.
Signed-off-by: Michael Niedermayer <[email protected]>
|
other
|
ompl
|
abb4fadcb4e4fe4c9cf41e5e7706143a66948eb7
| 1
|
ompl::geometric::VFRRT::Motion *ompl::geometric::VFRRT::extendTree(Motion *m, base::State *rstate,
const Eigen::VectorXd &v)
{
base::State *newState = si_->allocState();
si_->copyState(newState, m->state);
double d = si_->distance(m->state, rstate);
if (d > maxDistance_)
d = maxDistance_;
const base::StateSpacePtr &space = si_->getStateSpace();
for (unsigned int i = 0; i < vfdim_; i++)
*space->getValueAddressAtIndex(newState, i) += d * v[i];
if (!v.hasNaN() && si_->checkMotion(m->state, newState))
{
auto *motion = new Motion(si_);
motion->state = newState;
motion->parent = m;
updateExplorationEfficiency(motion);
nn_->add(motion);
return motion;
}
else
{
si_->freeState(newState);
inefficientCount_++;
return nullptr;
}
}
| null | null | 197,192
|
5968465012659553519179307063249642027
| 29
|
fix memory leak in VFRRT. closes #839
|
other
|
systemd-stable
|
b00674347337b7531c92fdb65590ab253bb57538
| 1
|
int unit_name_path_escape(const char *f, char **ret) {
char *p, *s;
assert(f);
assert(ret);
p = strdupa(f);
if (!p)
return -ENOMEM;
path_simplify(p, false);
if (empty_or_root(p))
s = strdup("-");
else {
if (!path_is_normalized(p))
return -EINVAL;
/* Truncate trailing slashes */
delete_trailing_chars(p, "/");
/* Truncate leading slashes */
p = skip_leading_chars(p, "/");
s = unit_name_escape(p);
}
if (!s)
return -ENOMEM;
*ret = s;
return 0;
}
| null | null | 197,197
|
185422117112056266441612678792600336702
| 32
|
basic/unit-name: do not use strdupa() on a path
The path may have unbounded length, for example through a fuse mount.
CVE-2021-33910: attacked controlled alloca() leads to crash in systemd and
ultimately a kernel panic. Systemd parses the content of /proc/self/mountinfo
and each mountpoint is passed to mount_setup_unit(), which calls
unit_name_path_escape() underneath. A local attacker who is able to mount a
filesystem with a very long path can crash systemd and the whole system.
https://bugzilla.redhat.com/show_bug.cgi?id=1970887
The resulting string length is bounded by UNIT_NAME_MAX, which is 256. But we
can't easily check the length after simplification before doing the
simplification, which in turns uses a copy of the string we can write to.
So we can't reject paths that are too long before doing the duplication.
Hence the most obvious solution is to switch back to strdup(), as before
7410616cd9dbbec97cf98d75324da5cda2b2f7a2.
(cherry picked from commit 441e0115646d54f080e5c3bb0ba477c892861ab9)
(cherry picked from commit 764b74113e36ac5219a4b82a05f311b5a92136ce)
(cherry picked from commit 4a1c5f34bd3e1daed4490e9d97918e504d19733b)
|
other
|
njs
|
ab1702c7af9959366a5ddc4a75b4357d4e9ebdc1
| 1
|
njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info)
{
char *p;
size_t length;
njs_bool_t trail;
char src[NJS_MAX_PATH + 1];
trail = 0;
length = info->name.length;
if (dir != NULL) {
length = dir->length;
if (length == 0) {
return NJS_DECLINED;
}
trail = (dir->start[dir->length - 1] != '/');
if (trail) {
length++;
}
}
if (njs_slow_path(length > NJS_MAX_PATH)) {
return NJS_ERROR;
}
p = &src[0];
if (dir != NULL) {
p = (char *) njs_cpymem(p, dir->start, dir->length);
if (trail) {
*p++ = '/';
}
}
p = (char *) njs_cpymem(p, info->name.start, info->name.length);
*p = '\0';
p = realpath(&src[0], &info->path[0]);
if (p == NULL) {
return NJS_DECLINED;
}
info->fd = open(&info->path[0], O_RDONLY);
if (info->fd < 0) {
return NJS_DECLINED;
}
info->file.start = (u_char *) &info->path[0];
info->file.length = njs_strlen(info->file.start);
return NJS_OK;
}
| null | null | 197,223
|
61727745120585183166403187671025402809
| 57
|
Fixed typo while calculating module path length.
The issue was introduced in 77c398f26d7e (not released yet).
|
other
|
subversion
|
e1b615840932fb46aefe1cd90d2115720af4600e
| 1
|
synchronized_authz_initialize(void *baton, apr_pool_t *pool)
{
#if APR_HAS_THREADS
svn_boolean_t multi_threaded = TRUE;
#else
svn_boolean_t multi_threaded = FALSE;
#endif
SVN_ERR(svn_object_pool__create(&authz_pool, multi_threaded, pool));
SVN_ERR(svn_object_pool__create(&filtered_pool, multi_threaded, pool));
return SVN_NO_ERROR;
}
| null | null | 197,238
|
179608994110544319539454664346224659273
| 13
|
Fix issue #4880 "Use-after-free of object-pools when used as httpd module"
Ensure that we initialize authz again if the pool which our authz
caches depend on is cleared. Apache HTTPD may run pre/post config
hooks multiple times and clear its global configuration pool which
our authz caching pools depend on.
Reported-by: Thomas Weißschuh (thomas {at} t-8ch dot de)
Thomas has also confirmed that this patch fixes the problem.
* subversion/libsvn_repos/authz.c
(deinit_authz): New pool cleanup handler which resets authz initialization
in case the parent pool of our authz caches is cleared.
(synchronized_authz_initialize): Register new pool cleanup handler.
git-svn-id: https://svn.apache.org/repos/asf/subversion/trunk@1894734 13f79535-47bb-0310-9956-ffa450edef68
|
other
|
tensorflow
|
203214568f5bc237603dbab6e1fd389f1572f5c9
| 1
|
void Compute(OpKernelContext* ctx) override {
try {
const Tensor& input = ctx->input(kInputTensorIndex);
const Tensor& input_min_vec = ctx->input(kInputMinVecIndex);
float* input_min_vec_data = (float*)const_cast<void*>(
static_cast<const void*>(input_min_vec.flat<float>().data()));
const Tensor& input_max_vec = ctx->input(kInputMaxVecIndex);
float* input_max_vec_data = (float*)const_cast<void*>(
static_cast<const void*>(input_max_vec.flat<float>().data()));
const Tensor& input_requested_min = ctx->input(this->kRequestMinIndex);
const float input_requested_min_float =
input_requested_min.flat<float>()(0);
const Tensor& input_requested_max = ctx->input(this->kRequestMaxIndex);
const float input_requested_max_float =
input_requested_max.flat<float>()(0);
size_t depth = input_min_vec.NumElements();
OP_REQUIRES(
ctx, input.dims() == 4,
errors::InvalidArgument("Current RequantizePerChannel operator"
"supports 4D tensors only."));
OP_REQUIRES(
ctx, input_min_vec.dim_size(0) == depth,
errors::InvalidArgument("input_min has incorrect size, expected ",
depth, " was ", input_min_vec.dim_size(0)));
OP_REQUIRES(
ctx, input_max_vec.dim_size(0) == depth,
errors::InvalidArgument("input_max has incorrect size, expected ",
depth, " was ", input_max_vec.dim_size(0)));
if (out_type_ == DT_QINT8) DCHECK(input_requested_min_float < 0.0f);
const float factor = (out_type_ == DT_QINT8) ? 127.0f : 255.0f;
const float requested_min_max =
std::max(std::abs(input_requested_min_float),
std::abs(input_requested_max_float));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputTensorIndex,
input.shape(), &output));
std::vector<float> scales(depth);
for (int i = 0; i < depth; ++i) {
float min_max_from_vec = std::max(std::abs(input_min_vec_data[i]),
std::abs(input_max_vec_data[i]));
scales[i] = factor * (min_max_from_vec / requested_min_max /
static_cast<float>(1L << 31));
}
mkldnn::primitive_attr reorder_attr;
reorder_attr.set_output_scales(2, scales);
memory::dims dims_mkl_order =
TFShapeToMklDnnDimsInNCHW(input.shape(), FORMAT_NHWC);
memory::desc input_md = memory::desc(dims_mkl_order, MklDnnType<qint32>(),
memory::format_tag::nhwc);
memory::desc output_md =
(out_type_ == DT_QINT8)
? memory::desc(dims_mkl_order, MklDnnType<qint8>(),
memory::format_tag::nhwc)
: memory::desc(dims_mkl_order, MklDnnType<quint8>(),
memory::format_tag::nhwc);
void* input_buf =
static_cast<void*>(const_cast<qint32*>(input.flat<qint32>().data()));
void* output_buf;
if (out_type_ == DT_QINT8) {
output_buf = static_cast<void*>(
const_cast<qint8*>(output->flat<qint8>().data()));
} else {
output_buf = static_cast<void*>(
const_cast<quint8*>(output->flat<quint8>().data()));
}
std::unique_ptr<memory> input_mem_prim(
new memory(input_md, cpu_engine_, input_buf));
std::unique_ptr<memory> output_mem_prim(
new memory(output_md, cpu_engine_, output_buf));
mkldnn::reorder::primitive_desc reorder_pd =
ReorderPd(cpu_engine_, input_mem_prim->get_desc(), cpu_engine_,
output_mem_prim->get_desc(), reorder_attr);
std::shared_ptr<stream> reorder_stream;
MklDnnThreadPool eigen_tp(ctx);
reorder_stream.reset(CreateStream(&eigen_tp, cpu_engine_));
std::unordered_map<int, mkldnn::memory> reorder_args = {
{MKLDNN_ARG_FROM, *input_mem_prim},
{MKLDNN_ARG_TO, *output_mem_prim}};
std::unique_ptr<mkldnn::primitive> reorder_prim(
new mkldnn::reorder(reorder_pd));
reorder_prim->execute(*reorder_stream, reorder_args);
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(kOutputMinIndex, {}, &output_min));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
output_min->flat<float>()(0) = input_requested_min_float;
output_max->flat<float>()(0) = input_requested_max_float;
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + std::string(e.message) + ", in file " +
std::string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
ctx, errors::Aborted("Operation received an exception:", error_msg));
}
}
| null | null | 197,239
|
103677223950190323637234469418983500216
| 109
|
Reorganize and add more validation to MKL requantization
PiperOrigin-RevId: 387901341
Change-Id: I2515b9034c64e113db0bcec8337d30643ab0a0f1
|
other
|
tensorflow
|
537bc7c723439b9194a358f64d871dd326c18887
| 1
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
int scratch_tensor_index = op_data->scratch_tensor_index;
// Check we have all the inputs and outputs we need.
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
TF_LITE_ENSURE(context,
input->type == kTfLiteFloat32 || input->type == kTfLiteInt8);
// Check all the parameters of tensor match within themselves and match the
// input configuration.
const int rank = params->rank;
const int batch_size = input->dims->data[0];
const int num_filters = weights_feature->dims->data[0];
TF_LITE_ENSURE(context, rank != 0);
TF_LITE_ENSURE_EQ(context, num_filters % rank, 0);
const int num_units = num_filters / rank;
const int memory_size = weights_time->dims->data[1];
TF_LITE_ENSURE_EQ(context, input->dims->data[1],
weights_feature->dims->data[1]);
TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters);
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
if (bias) {
TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units);
}
const TfLiteTensor* state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStateTensor, &state));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Check the shape of input state tensors.
TF_LITE_ENSURE_EQ(context, NumDimensions(state), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 0), batch_size);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 1),
memory_size * num_filters);
// Resize output.
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
// The weights are of consistent type, so it suffices to check one.
const bool is_hybrid_op = IsHybridOp(input, weights_feature);
const bool is_full_integer = input->type == kTfLiteInt8;
// Resize scratch.
TfLiteIntArrayFree(node->temporaries);
if (is_hybrid_op) {
node->temporaries = TfLiteIntArrayCreate(6);
} else if (is_full_integer) {
node->temporaries = TfLiteIntArrayCreate(2);
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
node->temporaries->data[0] = scratch_tensor_index;
TfLiteIntArray* scratch_size_array = TfLiteIntArrayCreate(2);
scratch_size_array->data[0] = batch_size;
scratch_size_array->data[1] = num_filters;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/0, &scratch_tensor));
// The scratch buffer is of type int32 for full integer svdf and it's of type
// float32 for hybrid and float case.
if (is_full_integer) {
scratch_tensor->type = kTfLiteInt32;
} else {
scratch_tensor->type = kTfLiteFloat32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor,
scratch_size_array));
if (is_hybrid_op) {
op_data->compute_row_sums = true;
// Tell interpreter to allocate temporary tensors to store quantized values
// of input tensors.
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&input_quantized));
input_quantized->type = weights_feature->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
// Tell interpreter to allocate temporary tensors to store scaling factors.
node->temporaries->data[2] = scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
// Used to store dequantized weights_time matrix for hybrid computation of
// matmul(state, weights_time), which occurs in floating point.
node->temporaries->data[3] = scratch_tensor_index + 3;
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3,
&float_weights_time));
float_weights_time->type = kTfLiteFloat32;
// Persistent so that we can compute the dequantized weights only once.
float_weights_time->allocation_type = kTfLiteArenaRwPersistent;
if (!TfLiteIntArrayEqual(float_weights_time->dims, weights_time->dims)) {
TfLiteIntArray* float_weights_time_size =
TfLiteIntArrayCopy(weights_time->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, float_weights_time,
float_weights_time_size));
}
node->temporaries->data[4] = scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/4, &zero_points));
zero_points->type = kTfLiteFloat32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = zero_points_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/5, &row_sums));
row_sums->type = kTfLiteFloat32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_filters};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
if (is_full_integer) {
// Allocated one extra tensor.
TfLiteIntArray* output_temp_size_array = TfLiteIntArrayCreate(2);
output_temp_size_array->data[0] = num_units;
output_temp_size_array->data[1] = batch_size;
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &output_temp));
output_temp->type = kTfLiteInt32;
output_temp->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_temp,
output_temp_size_array));
// Calculate effective scales.
auto* input_params =
reinterpret_cast<TfLiteAffineQuantization*>(input->quantization.params);
auto* weights_feature_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_feature->quantization.params);
auto* state_params =
reinterpret_cast<TfLiteAffineQuantization*>(state->quantization.params);
auto* weight_time_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_time->quantization.params);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
const double effective_scale_1 = input_params->scale->data[0] *
weights_feature_params->scale->data[0] /
state_params->scale->data[0];
const double effective_scale_2 = state_params->scale->data[0] *
weight_time_params->scale->data[0] /
output_params->scale->data[0];
QuantizeMultiplier(effective_scale_1, &op_data->effective_scale_1_a,
&op_data->effective_scale_1_b);
QuantizeMultiplier(effective_scale_2, &op_data->effective_scale_2_a,
&op_data->effective_scale_2_b);
}
return kTfLiteOk;
}
| null | null | 197,242
|
261286652604987048068530973039778075570
| 206
|
Fix a null pointer exception caused by branching on uninitialized data.
This is due to not checking that the params for the quantization exists. If there is no quantization, we should not access the `.params` field.
PiperOrigin-RevId: 385163909
Change-Id: I2beb8d50649b6542db224c163033fbcbaa49314f
|
other
|
tensorflow
|
ee119d4a498979525046fba1c3dd3f13a039fbb1
| 1
|
Status ShapeRefiner::InferShapesForFunctionSubNode(
const Node* node, InferenceContext* outer_context) {
TF_RETURN_IF_ERROR(AddNodeInternal(node, outer_context));
InferenceContext* node_context = CHECK_NOTNULL(GetContext(node));
if (StringPiece(node->type_string()) == kArgOp) {
// Handle special node: function input.
// Shapes for these nodes are provided in the outer inference
// context.
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_inputs() <= index) {
return errors::Internal(
"Function instantiation included invalid input index: ", index,
" not in [0, ", outer_context->num_inputs(), ").");
}
// TODO(b/134547156): TEMPORARY WORKAROUND. If input shape handle is not set
// in outer context, set _Arg node output shape to unknown.
if (outer_context->input(index).SameHandle(ShapeHandle())) {
VLOG(1) << "Function instantiation has undefined input shape at "
<< "index: " << index << " in the outer inference context.";
node_context->set_output(0, node_context->UnknownShape());
} else {
node_context->set_output(0, outer_context->input(index));
}
auto* resource = outer_context->input_handle_shapes_and_types(index);
if (resource) {
node_context->set_output_handle_shapes_and_types(0, *resource);
}
} else if (StringPiece(node->type_string()) == kRetvalOp) {
// Handle special node: function output.
// Shapes inferred for these nodes go into the outer inference
// context.
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_outputs() <= index) {
return errors::Internal(
"Function instantiation included invalid output index: ", index,
" not in [0, ", outer_context->num_outputs(), ").");
}
// outer_context outlives node_context, therefore we need to create
// a new shape handle owned by outer_context instead.
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(node_context->input(0), &proto);
TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle));
outer_context->set_output(index, handle);
auto* resource = node_context->input_handle_shapes_and_types(0);
if (resource) {
outer_context->set_output_handle_shapes_and_types(index, *resource);
}
}
return Status::OK();
}
| null | null | 197,247
|
293529933962281304259093229829962668360
| 63
|
Fix segmentation fault in shape inference logic.
When running shape functions, some functions (such as `MutableHashTableShape`)
produce extra output information in the form of a `ShapeAndType` struct. The
shapes embedded in this struct are owned by an inference context that is
cleaned up almost immediately; if the upstream code attempts to access this
shape information, it can trigger a segfault.
`ShapeRefiner` is mitigating this for normal output shapes by cloning them
(and thus putting the newly created shape under ownership of an inference
context that will not die), but we were not doing the same for shapes and
types. This commit fixes that by doing similar logic on output shapes and
types.
PiperOrigin-RevId: 384761124
Change-Id: I07c0c42d29dfbb55bfa13ec1f09ef825fb0a1a1d
|
other
|
tensorflow
|
e6cf28c72ba2eb949ca950d834dd6d66bb01cfae
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& a = ctx->input(0);
const Tensor& b = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
errors::InvalidArgument("a is not a matrix"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
errors::InvalidArgument("b is not a matrix"));
const int m = transpose_a_ ? a.dim_size(1) : a.dim_size(0);
const int k = transpose_a_ ? a.dim_size(0) : a.dim_size(1);
const int n = transpose_b_ ? b.dim_size(0) : b.dim_size(1);
const int k2 = transpose_b_ ? b.dim_size(1) : b.dim_size(0);
OP_REQUIRES(ctx, k == k2,
errors::InvalidArgument(
"Matrix size incompatible: a: ", a.shape().DebugString(),
", b: ", b.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({m, n}), &output));
if (k == 0) {
// If the inner dimension k in the matrix multiplication is zero, we fill
// the output with zeros.
functor::SetZeroFunctor<CPUDevice, float> f;
f(ctx->eigen_device<CPUDevice>(), output->flat<float>());
return;
}
auto out = output->matrix<float>();
std::unique_ptr<Tensor> a_float;
std::unique_ptr<Tensor> b_float;
if (!a_is_sparse_ && !b_is_sparse_) {
auto left = &a;
auto right = &b;
// TODO(agarwal): multi-thread the conversions from bfloat16 to float.
if (std::is_same<TL, bfloat16>::value) {
a_float.reset(new Tensor(DT_FLOAT, a.shape()));
BFloat16ToFloat(a.flat<bfloat16>().data(),
a_float->flat<float>().data(), a.NumElements());
left = a_float.get();
}
if (std::is_same<TR, bfloat16>::value) {
b_float.reset(new Tensor(DT_FLOAT, b.shape()));
BFloat16ToFloat(b.flat<bfloat16>().data(),
b_float->flat<float>().data(), b.NumElements());
right = b_float.get();
}
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0].first = transpose_a_ ? 0 : 1;
dim_pair[0].second = transpose_b_ ? 1 : 0;
out.device(ctx->template eigen_device<CPUDevice>()) =
left->matrix<float>().contract(right->matrix<float>(), dim_pair);
return;
}
auto left = &a;
auto right = &b;
bool transpose_output = false;
bool transpose_a = transpose_a_;
bool transpose_b = transpose_b_;
if (!a_is_sparse_) {
// Swap the order of multiplications using the identity:
// A * B = (B' * A')'.
std::swap(left, right);
std::swap(transpose_a, transpose_b);
transpose_a = !transpose_a;
transpose_b = !transpose_b;
transpose_output = !transpose_output;
}
std::unique_ptr<Tensor> right_tr;
if (transpose_b) {
// TODO(agarwal): avoid transposing the matrix here and directly handle
// transpose in CreateDenseSlices.
OP_REQUIRES(ctx, right->dim_size(0) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
OP_REQUIRES(ctx, right->dim_size(1) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
right_tr.reset(
new Tensor(right->dtype(),
TensorShape({right->dim_size(1), right->dim_size(0)})));
const auto perm = dsizes_10();
if (transpose_output) {
right_tr->matrix<TL>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TL>().shuffle(perm);
} else {
right_tr->matrix<TR>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TR>().shuffle(perm);
}
right = right_tr.get();
}
if (transpose_output) {
DoMatMul<TR, TL>::Compute(&this->cache_tr_, left->matrix<TR>(),
right->matrix<TL>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
} else {
DoMatMul<TL, TR>::Compute(&this->cache_nt_, left->matrix<TL>(),
right->matrix<TR>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
}
}
| null | null | 197,262
|
55113062603670285543450185774452114619
| 107
|
Validate that matrix dimension sizes in SparseMatMul are positive.
PiperOrigin-RevId: 401149683
Change-Id: Ib33eafc561a39c8741ece80b2edce6d4aae9a57d
|
other
|
linux
|
cc8f7fe1f5eab010191aa4570f27641876fa1267
| 1
|
static struct bio *bio_copy_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask, int reading)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
struct bio *bio;
void *p = data;
int nr_pages = 0;
/*
* Overflow, abort
*/
if (end < start)
return ERR_PTR(-EINVAL);
nr_pages = end - start;
bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
while (len) {
struct page *page;
unsigned int bytes = PAGE_SIZE;
if (bytes > len)
bytes = len;
page = alloc_page(GFP_NOIO | gfp_mask);
if (!page)
goto cleanup;
if (!reading)
memcpy(page_address(page), p, bytes);
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
break;
len -= bytes;
p += bytes;
}
if (reading) {
bio->bi_end_io = bio_copy_kern_endio_read;
bio->bi_private = data;
} else {
bio->bi_end_io = bio_copy_kern_endio;
}
return bio;
cleanup:
bio_free_pages(bio);
bio_put(bio);
return ERR_PTR(-ENOMEM);
}
| null | null | 197,302
|
175211225765149879157074417577663475920
| 56
|
block-map: add __GFP_ZERO flag for alloc_page in function bio_copy_kern
Add __GFP_ZERO flag for alloc_page in function bio_copy_kern to initialize
the buffer of a bio.
Signed-off-by: Haimin Zhang <[email protected]>
Reviewed-by: Chaitanya Kulkarni <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
other
|
pjproject
|
11559e49e65bdf00922ad5ae28913ec6a198d508
| 1
|
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_parse_rpsi(
const void *buf,
pj_size_t length,
pjmedia_rtcp_fb_rpsi *rpsi)
{
pjmedia_rtcp_common *hdr = (pjmedia_rtcp_common*) buf;
pj_uint8_t *p;
pj_uint8_t padlen;
pj_size_t rpsi_len;
PJ_ASSERT_RETURN(buf && rpsi, PJ_EINVAL);
PJ_ASSERT_RETURN(length >= sizeof(pjmedia_rtcp_common), PJ_ETOOSMALL);
/* RPSI uses pt==RTCP_PSFB and FMT==3 */
if (hdr->pt != RTCP_PSFB || hdr->count != 3)
return PJ_ENOTFOUND;
rpsi_len = (pj_ntohs((pj_uint16_t)hdr->length)-2) * 4;
if (length < rpsi_len + 12)
return PJ_ETOOSMALL;
p = (pj_uint8_t*)hdr + sizeof(*hdr);
padlen = *p++;
rpsi->pt = (*p++ & 0x7F);
rpsi->rpsi_bit_len = rpsi_len*8 - 16 - padlen;
pj_strset(&rpsi->rpsi, (char*)p, (rpsi->rpsi_bit_len + 7)/8);
return PJ_SUCCESS;
}
| null | null | 197,305
|
149489462462919088082154669395179087984
| 29
|
Merge pull request from GHSA-vhxv-phmx-g52q
* Prevent OOB read/write when parsing RTCP FB RPSI
* Add log information
* Modification based on comments.
|
other
|
janet
|
7fda7709ff15ab4b4cdea57619365eb2798f15c4
| 1
|
JANET_CORE_FN(cfun_array_new_filled,
"(array/new-filled count &opt value)",
"Creates a new array of `count` elements, all set to `value`, which defaults to nil. Returns the new array.") {
janet_arity(argc, 1, 2);
int32_t count = janet_getinteger(argv, 0);
Janet x = (argc == 2) ? argv[1] : janet_wrap_nil();
JanetArray *array = janet_array(count);
for (int32_t i = 0; i < count; i++) {
array->data[i] = x;
}
array->count = count;
return janet_wrap_array(array);
}
| null | null | 197,313
|
114433653419239968575237651226501775026
| 13
|
fix negative count passed to cfun_array_new_filled
|
other
|
tensorflow
|
cff267650c6a1b266e4b4500f69fbc49cdd773c5
| 1
|
void Compute(OpKernelContext* ctx) override {
const Tensor& handle = ctx->input(0);
const string& name = handle.scalar<tstring>()();
auto session_state = ctx->session_state();
OP_REQUIRES(ctx, session_state != nullptr,
errors::FailedPrecondition(
"DeleteSessionTensor called on null session state"));
OP_REQUIRES_OK(ctx, session_state->DeleteTensor(name));
}
| null | null | 197,318
|
241300971074573611654671024037362704586
| 9
|
Fix tf.raw_ops.DeleteSessionTensor vulnerability with invalid `handle`.
Check that `handle` input is actually a scalar before treating it as such.
PiperOrigin-RevId: 445228994
|
other
|
tensorflow
|
f09caa532b6e1ac8d2aa61b7832c78c5b79300c6
| 1
|
static Status ParseEquation(const string& equation,
OperandLabels* input_labels,
Labels* output_labels,
std::vector<DimensionType>* label_types,
OperandLabelCounts* input_label_counts,
LabelCounts* output_label_counts,
gtl::InlinedVector<bool, 2>* input_has_ellipsis,
bool* output_has_ellipsis) {
gtl::InlinedVector<string, 2> input_str;
string output_str;
TF_RETURN_IF_ERROR(ParseEinsumEquation(equation, &input_str, &output_str));
// Temporary map from single character labels to (consecutive) integer
// labels.
absl::flat_hash_map<char, int> label_mapping;
int num_inputs = input_str.size();
input_labels->resize(num_inputs);
// Map from single characters to integer labels.
for (int i = 0; i < num_inputs; ++i) {
MapToLabels(input_str[i], &input_labels->at(i), &label_mapping);
}
MapToLabels(output_str, output_labels, &label_mapping);
// Compute counts for input and output labels.
int num_labels = label_mapping.size();
input_label_counts->resize(num_inputs);
input_has_ellipsis->resize(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_label_counts->at(i).resize(num_labels);
for (const int label : input_labels->at(i)) {
if (label != kEllipsisLabel)
input_label_counts->at(i)[label] += 1;
else
input_has_ellipsis->at(i) = true;
}
}
output_label_counts->resize(num_labels);
for (const int label : *output_labels) {
if (label != kEllipsisLabel)
output_label_counts->at(label) += 1;
else
*output_has_ellipsis = true;
}
// Map each label to a unique DimensionType.
label_types->resize(num_labels);
for (int label = 0; label < num_labels; ++label) {
if (label == kEllipsisLabel) continue;
bool removed = (*output_label_counts)[label] == 0;
bool unique = num_inputs == 1 || (*input_label_counts)[0][label] == 0 ||
(*input_label_counts)[1][label] == 0;
(*label_types)[label] = GetDimensionType(removed, unique);
}
return Status::OK();
}
| null | null | 197,326
|
89619662301574140486589919659052622416
| 56
|
Fix EinsumHelper::ParseEquation to avoid uninitialized accesses.
EinsumHelper::ParseEquation is supposed to return true or false in
input_has_ellipsis and output_has_ellipsis to indicate whether there is
ellipsis in the inputs and output. Previously, when there is no ellipsis in the
inputs or output, the routine doesn't assign false to the variables. This
change initializes the two variables with false to fix the problem.
PiperOrigin-RevId: 391772004
Change-Id: I17b6c88aadef4131470378e48cced054bf252e86
|
other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.