project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
void WebProcessProxy::didBecomeResponsive(ResponsivenessTimer*)
{
Vector<RefPtr<WebPageProxy> > pages;
copyValuesToVector(m_pageMap, pages);
for (size_t i = 0, size = pages.size(); i < size; ++i)
pages[i]->processDidBecomeResponsive();
}
|
113377
| 177,169
|
305780027283176982755471671366831526154
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
void WebProcessProxy::didPerformClientRedirect(uint64_t pageID, const String& sourceURLString, const String& destinationURLString, uint64_t frameID)
{
WebPageProxy* page = webPage(pageID);
if (!page)
return;
if (sourceURLString.isEmpty() || destinationURLString.isEmpty())
return;
WebFrameProxy* frame = webFrame(frameID);
MESSAGE_CHECK(frame);
MESSAGE_CHECK(frame->page() == page);
MESSAGE_CHECK_URL(sourceURLString);
MESSAGE_CHECK_URL(destinationURLString);
m_context->historyClient().didPerformClientRedirect(m_context.get(), page, sourceURLString, destinationURLString, frame);
}
|
113385
| 177,170
|
142702692842184752535500144276331009047
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
void WebProcessProxy::didReceiveSyncMessage(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::MessageDecoder& decoder, OwnPtr<CoreIPC::MessageEncoder>& replyEncoder)
{
if (m_messageReceiverMap.dispatchSyncMessage(connection, messageID, decoder, replyEncoder))
return;
if (m_context->dispatchSyncMessage(connection, messageID, decoder, replyEncoder))
return;
if (decoder.messageReceiverName() == Messages::WebProcessProxy::messageReceiverName()) {
didReceiveSyncWebProcessProxyMessage(connection, messageID, decoder, replyEncoder);
return;
}
uint64_t pageID = decoder.destinationID();
if (!pageID)
return;
WebPageProxy* pageProxy = webPage(pageID);
if (!pageProxy)
return;
pageProxy->didReceiveSyncMessage(connection, messageID, decoder, replyEncoder);
}
|
113390
| 177,171
|
235702755550283162946333980419988241429
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
void WebProcessProxy::disconnect()
{
clearConnection();
if (m_webConnection) {
m_webConnection->invalidate();
m_webConnection = nullptr;
}
m_responsivenessTimer.stop();
Vector<RefPtr<WebFrameProxy> > frames;
copyValuesToVector(m_frameMap, frames);
for (size_t i = 0, size = frames.size(); i < size; ++i)
frames[i]->disconnect();
m_frameMap.clear();
if (m_downloadProxyMap)
m_downloadProxyMap->processDidClose();
m_context->disconnectProcess(this);
}
|
113392
| 177,172
|
94661919577932071721465793451301514748
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
Vector<WebPageProxy*> WebProcessProxy::pages() const
{
Vector<WebPageProxy*> result;
copyValuesToVector(m_pageMap, result);
return result;
}
|
113409
| 177,173
|
38082877908503068527589053296721164577
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
WebBackForwardListItem* WebProcessProxy::webBackForwardItem(uint64_t itemID) const
{
return m_backForwardListItemMap.get(itemID).get();
}
|
113418
| 177,174
|
226118711618182513803439833364509045492
| null | null | null |
|
Chrome
|
2bfb2b8299e2fb6a432390a93a99a85fed1d29c9
| 0
|
WebFrameProxy* WebProcessProxy::webFrame(uint64_t frameID) const
{
return isGoodKey<WebFrameProxyMap>(frameID) ? m_frameMap.get(frameID).get() : 0;
}
|
113419
| 177,175
|
225975636160752313306010496363367888990
| null | null | null |
|
Chrome
|
6d067124e87295721c62a77f0610e4b37f6098ad
| 0
|
v8::Handle<v8::Value> V8ThrowException::createGeneralError(v8::Isolate* isolate, const String& message)
{
return v8::Exception::Error(v8String(isolate, message.isNull() ? "Error" : message));
}
|
140864
| 180,232
|
44848457228119146434050153665842697577
| null | null | null |
|
Chrome
|
6d067124e87295721c62a77f0610e4b37f6098ad
| 0
|
v8::Handle<v8::Value> V8ThrowException::createReferenceError(v8::Isolate* isolate, const String& message)
{
return v8::Exception::ReferenceError(v8String(isolate, message.isNull() ? "Reference error" : message));
}
|
140866
| 180,233
|
52290048754550229978658600205251663944
| null | null | null |
|
Chrome
|
6d067124e87295721c62a77f0610e4b37f6098ad
| 0
|
v8::Handle<v8::Value> V8ThrowException::throwException(v8::Handle<v8::Value> exception, v8::Isolate* isolate)
{
if (!v8::V8::IsExecutionTerminating())
isolate->ThrowException(exception);
return v8::Undefined(isolate);
}
|
140872
| 180,234
|
224900571143525634117459138790512973631
| null | null | null |
|
Chrome
|
6d067124e87295721c62a77f0610e4b37f6098ad
| 0
|
v8::Handle<v8::Value> V8ThrowException::throwGeneralError(v8::Isolate* isolate, const String& message)
{
v8::Handle<v8::Value> exception = V8ThrowException::createGeneralError(isolate, message);
return V8ThrowException::throwException(exception, isolate);
}
|
140873
| 180,235
|
204267025340554794699645860412179276249
| null | null | null |
|
Chrome
|
6d067124e87295721c62a77f0610e4b37f6098ad
| 0
|
v8::Handle<v8::Value> V8ThrowException::throwRangeError(v8::Isolate* isolate, const String& message)
{
v8::Handle<v8::Value> exception = V8ThrowException::createRangeError(isolate, message);
return V8ThrowException::throwException(exception, isolate);
}
|
140874
| 180,236
|
132836214328273199556486423657508962834
| null | null | null |
|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 0
|
void OverlayWindowViews::ButtonPressed(views::Button* sender,
const ui::Event& event) {
if (sender == close_controls_view_.get())
controller_->Close(true /* should_pause_video */,
true /* should_reset_pip_player */);
if (sender == play_pause_controls_view_.get())
TogglePlayPause();
if (sender == first_custom_controls_view_.get())
controller_->CustomControlPressed(first_custom_controls_view_->id());
if (sender == second_custom_controls_view_.get())
controller_->CustomControlPressed(second_custom_controls_view_->id());
}
|
156009
| 181,940
|
335498214510436229672470102293461790668
| null | null | null |
|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 0
|
ui::Layer* OverlayWindowViews::GetCloseControlsLayer() {
return close_controls_view_->layer();
}
|
156017
| 181,941
|
21196728246339535552625390398810798276
| null | null | null |
|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 0
|
gfx::Size OverlayWindowViews::GetMinimumSize() const {
return min_size_;
}
|
156024
| 181,942
|
14470143283206508923724494390125041576
| null | null | null |
|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 0
|
OverlayWindowViews::OverlayWindowViews(
content::PictureInPictureWindowController* controller)
: controller_(controller),
window_background_view_(new views::View()),
video_view_(new views::View()),
controls_scrim_view_(new views::View()),
controls_parent_view_(new views::View()),
close_controls_view_(new views::CloseImageButton(this)),
#if defined(OS_CHROMEOS)
resize_handle_view_(new views::ResizeHandleButton(this)),
#endif
play_pause_controls_view_(new views::ToggleImageButton(this)),
hide_controls_timer_(
FROM_HERE,
base::TimeDelta::FromMilliseconds(2500 /* 2.5 seconds */),
base::BindRepeating(&OverlayWindowViews::UpdateControlsVisibility,
base::Unretained(this),
false /* is_visible */)) {
views::Widget::InitParams params(views::Widget::InitParams::TYPE_WINDOW);
params.ownership = views::Widget::InitParams::WIDGET_OWNS_NATIVE_WIDGET;
params.bounds = CalculateAndUpdateWindowBounds();
params.keep_on_top = true;
params.visible_on_all_workspaces = true;
params.remove_standard_frame = true;
params.name = "PictureInPictureWindow";
params.delegate = new OverlayWindowWidgetDelegate(this);
Init(params);
SetUpViews();
#if defined(OS_CHROMEOS)
GetNativeWindow()->SetProperty(ash::kWindowPipTypeKey, true);
#endif // defined(OS_CHROMEOS)
is_initialized_ = true;
}
|
156042
| 181,943
|
46030759196152667072034644490239875717
| null | null | null |
|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 0
|
gfx::Point OverlayWindowViews::close_image_position_for_testing() const {
return close_controls_view_->origin();
}
|
156058
| 181,944
|
104459102587256872729084319524681713097
| null | null | null |
|
ImageMagick6
|
dc070da861a015d3c97488fdcca6063b44d47a7b
| 1
|
static MagickBooleanType GetEXIFProperty(const Image *image,
const char *property)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define EXIF_FMT_BYTE 1
#define EXIF_FMT_STRING 2
#define EXIF_FMT_USHORT 3
#define EXIF_FMT_ULONG 4
#define EXIF_FMT_URATIONAL 5
#define EXIF_FMT_SBYTE 6
#define EXIF_FMT_UNDEFINED 7
#define EXIF_FMT_SSHORT 8
#define EXIF_FMT_SLONG 9
#define EXIF_FMT_SRATIONAL 10
#define EXIF_FMT_SINGLE 11
#define EXIF_FMT_DOUBLE 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_GPS_OFFSET 0x8825
#define TAG_INTEROP_OFFSET 0xa005
#define EXIFMultipleValues(size,format,arg) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",arg); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
#define EXIFMultipleFractions(size,format,arg1,arg2) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \
format", ",(arg1),(arg2)); \
if (length >= (MaxTextExtent-1)) \
length=MaxTextExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
typedef struct _DirectoryInfo
{
const unsigned char
*directory;
size_t
entry;
ssize_t
offset;
} DirectoryInfo;
typedef struct _TagInfo
{
size_t
tag;
const char
description[36];
} TagInfo;
static const TagInfo
EXIFTag[] =
{
{ 0x001, "exif:InteroperabilityIndex" },
{ 0x002, "exif:InteroperabilityVersion" },
{ 0x100, "exif:ImageWidth" },
{ 0x101, "exif:ImageLength" },
{ 0x102, "exif:BitsPerSample" },
{ 0x103, "exif:Compression" },
{ 0x106, "exif:PhotometricInterpretation" },
{ 0x10a, "exif:FillOrder" },
{ 0x10d, "exif:DocumentName" },
{ 0x10e, "exif:ImageDescription" },
{ 0x10f, "exif:Make" },
{ 0x110, "exif:Model" },
{ 0x111, "exif:StripOffsets" },
{ 0x112, "exif:Orientation" },
{ 0x115, "exif:SamplesPerPixel" },
{ 0x116, "exif:RowsPerStrip" },
{ 0x117, "exif:StripByteCounts" },
{ 0x11a, "exif:XResolution" },
{ 0x11b, "exif:YResolution" },
{ 0x11c, "exif:PlanarConfiguration" },
{ 0x11d, "exif:PageName" },
{ 0x11e, "exif:XPosition" },
{ 0x11f, "exif:YPosition" },
{ 0x118, "exif:MinSampleValue" },
{ 0x119, "exif:MaxSampleValue" },
{ 0x120, "exif:FreeOffsets" },
{ 0x121, "exif:FreeByteCounts" },
{ 0x122, "exif:GrayResponseUnit" },
{ 0x123, "exif:GrayResponseCurve" },
{ 0x124, "exif:T4Options" },
{ 0x125, "exif:T6Options" },
{ 0x128, "exif:ResolutionUnit" },
{ 0x12d, "exif:TransferFunction" },
{ 0x131, "exif:Software" },
{ 0x132, "exif:DateTime" },
{ 0x13b, "exif:Artist" },
{ 0x13e, "exif:WhitePoint" },
{ 0x13f, "exif:PrimaryChromaticities" },
{ 0x140, "exif:ColorMap" },
{ 0x141, "exif:HalfToneHints" },
{ 0x142, "exif:TileWidth" },
{ 0x143, "exif:TileLength" },
{ 0x144, "exif:TileOffsets" },
{ 0x145, "exif:TileByteCounts" },
{ 0x14a, "exif:SubIFD" },
{ 0x14c, "exif:InkSet" },
{ 0x14d, "exif:InkNames" },
{ 0x14e, "exif:NumberOfInks" },
{ 0x150, "exif:DotRange" },
{ 0x151, "exif:TargetPrinter" },
{ 0x152, "exif:ExtraSample" },
{ 0x153, "exif:SampleFormat" },
{ 0x154, "exif:SMinSampleValue" },
{ 0x155, "exif:SMaxSampleValue" },
{ 0x156, "exif:TransferRange" },
{ 0x157, "exif:ClipPath" },
{ 0x158, "exif:XClipPathUnits" },
{ 0x159, "exif:YClipPathUnits" },
{ 0x15a, "exif:Indexed" },
{ 0x15b, "exif:JPEGTables" },
{ 0x15f, "exif:OPIProxy" },
{ 0x200, "exif:JPEGProc" },
{ 0x201, "exif:JPEGInterchangeFormat" },
{ 0x202, "exif:JPEGInterchangeFormatLength" },
{ 0x203, "exif:JPEGRestartInterval" },
{ 0x205, "exif:JPEGLosslessPredictors" },
{ 0x206, "exif:JPEGPointTransforms" },
{ 0x207, "exif:JPEGQTables" },
{ 0x208, "exif:JPEGDCTables" },
{ 0x209, "exif:JPEGACTables" },
{ 0x211, "exif:YCbCrCoefficients" },
{ 0x212, "exif:YCbCrSubSampling" },
{ 0x213, "exif:YCbCrPositioning" },
{ 0x214, "exif:ReferenceBlackWhite" },
{ 0x2bc, "exif:ExtensibleMetadataPlatform" },
{ 0x301, "exif:Gamma" },
{ 0x302, "exif:ICCProfileDescriptor" },
{ 0x303, "exif:SRGBRenderingIntent" },
{ 0x320, "exif:ImageTitle" },
{ 0x5001, "exif:ResolutionXUnit" },
{ 0x5002, "exif:ResolutionYUnit" },
{ 0x5003, "exif:ResolutionXLengthUnit" },
{ 0x5004, "exif:ResolutionYLengthUnit" },
{ 0x5005, "exif:PrintFlags" },
{ 0x5006, "exif:PrintFlagsVersion" },
{ 0x5007, "exif:PrintFlagsCrop" },
{ 0x5008, "exif:PrintFlagsBleedWidth" },
{ 0x5009, "exif:PrintFlagsBleedWidthScale" },
{ 0x500A, "exif:HalftoneLPI" },
{ 0x500B, "exif:HalftoneLPIUnit" },
{ 0x500C, "exif:HalftoneDegree" },
{ 0x500D, "exif:HalftoneShape" },
{ 0x500E, "exif:HalftoneMisc" },
{ 0x500F, "exif:HalftoneScreen" },
{ 0x5010, "exif:JPEGQuality" },
{ 0x5011, "exif:GridSize" },
{ 0x5012, "exif:ThumbnailFormat" },
{ 0x5013, "exif:ThumbnailWidth" },
{ 0x5014, "exif:ThumbnailHeight" },
{ 0x5015, "exif:ThumbnailColorDepth" },
{ 0x5016, "exif:ThumbnailPlanes" },
{ 0x5017, "exif:ThumbnailRawBytes" },
{ 0x5018, "exif:ThumbnailSize" },
{ 0x5019, "exif:ThumbnailCompressedSize" },
{ 0x501a, "exif:ColorTransferFunction" },
{ 0x501b, "exif:ThumbnailData" },
{ 0x5020, "exif:ThumbnailImageWidth" },
{ 0x5021, "exif:ThumbnailImageHeight" },
{ 0x5022, "exif:ThumbnailBitsPerSample" },
{ 0x5023, "exif:ThumbnailCompression" },
{ 0x5024, "exif:ThumbnailPhotometricInterp" },
{ 0x5025, "exif:ThumbnailImageDescription" },
{ 0x5026, "exif:ThumbnailEquipMake" },
{ 0x5027, "exif:ThumbnailEquipModel" },
{ 0x5028, "exif:ThumbnailStripOffsets" },
{ 0x5029, "exif:ThumbnailOrientation" },
{ 0x502a, "exif:ThumbnailSamplesPerPixel" },
{ 0x502b, "exif:ThumbnailRowsPerStrip" },
{ 0x502c, "exif:ThumbnailStripBytesCount" },
{ 0x502d, "exif:ThumbnailResolutionX" },
{ 0x502e, "exif:ThumbnailResolutionY" },
{ 0x502f, "exif:ThumbnailPlanarConfig" },
{ 0x5030, "exif:ThumbnailResolutionUnit" },
{ 0x5031, "exif:ThumbnailTransferFunction" },
{ 0x5032, "exif:ThumbnailSoftwareUsed" },
{ 0x5033, "exif:ThumbnailDateTime" },
{ 0x5034, "exif:ThumbnailArtist" },
{ 0x5035, "exif:ThumbnailWhitePoint" },
{ 0x5036, "exif:ThumbnailPrimaryChromaticities" },
{ 0x5037, "exif:ThumbnailYCbCrCoefficients" },
{ 0x5038, "exif:ThumbnailYCbCrSubsampling" },
{ 0x5039, "exif:ThumbnailYCbCrPositioning" },
{ 0x503A, "exif:ThumbnailRefBlackWhite" },
{ 0x503B, "exif:ThumbnailCopyRight" },
{ 0x5090, "exif:LuminanceTable" },
{ 0x5091, "exif:ChrominanceTable" },
{ 0x5100, "exif:FrameDelay" },
{ 0x5101, "exif:LoopCount" },
{ 0x5110, "exif:PixelUnit" },
{ 0x5111, "exif:PixelPerUnitX" },
{ 0x5112, "exif:PixelPerUnitY" },
{ 0x5113, "exif:PaletteHistogram" },
{ 0x1000, "exif:RelatedImageFileFormat" },
{ 0x1001, "exif:RelatedImageLength" },
{ 0x1002, "exif:RelatedImageWidth" },
{ 0x800d, "exif:ImageID" },
{ 0x80e3, "exif:Matteing" },
{ 0x80e4, "exif:DataType" },
{ 0x80e5, "exif:ImageDepth" },
{ 0x80e6, "exif:TileDepth" },
{ 0x828d, "exif:CFARepeatPatternDim" },
{ 0x828e, "exif:CFAPattern2" },
{ 0x828f, "exif:BatteryLevel" },
{ 0x8298, "exif:Copyright" },
{ 0x829a, "exif:ExposureTime" },
{ 0x829d, "exif:FNumber" },
{ 0x83bb, "exif:IPTC/NAA" },
{ 0x84e3, "exif:IT8RasterPadding" },
{ 0x84e5, "exif:IT8ColorTable" },
{ 0x8649, "exif:ImageResourceInformation" },
{ 0x8769, "exif:ExifOffset" }, /* specs as "Exif IFD Pointer"? */
{ 0x8773, "exif:InterColorProfile" },
{ 0x8822, "exif:ExposureProgram" },
{ 0x8824, "exif:SpectralSensitivity" },
{ 0x8825, "exif:GPSInfo" }, /* specs as "GPSInfo IFD Pointer"? */
{ 0x8827, "exif:PhotographicSensitivity" },
{ 0x8828, "exif:OECF" },
{ 0x8829, "exif:Interlace" },
{ 0x882a, "exif:TimeZoneOffset" },
{ 0x882b, "exif:SelfTimerMode" },
{ 0x8830, "exif:SensitivityType" },
{ 0x8831, "exif:StandardOutputSensitivity" },
{ 0x8832, "exif:RecommendedExposureIndex" },
{ 0x8833, "exif:ISOSpeed" },
{ 0x8834, "exif:ISOSpeedLatitudeyyy" },
{ 0x8835, "exif:ISOSpeedLatitudezzz" },
{ 0x9000, "exif:ExifVersion" },
{ 0x9003, "exif:DateTimeOriginal" },
{ 0x9004, "exif:DateTimeDigitized" },
{ 0x9010, "exif:OffsetTime" },
{ 0x9011, "exif:OffsetTimeOriginal" },
{ 0x9012, "exif:OffsetTimeDigitized" },
{ 0x9101, "exif:ComponentsConfiguration" },
{ 0x9102, "exif:CompressedBitsPerPixel" },
{ 0x9201, "exif:ShutterSpeedValue" },
{ 0x9202, "exif:ApertureValue" },
{ 0x9203, "exif:BrightnessValue" },
{ 0x9204, "exif:ExposureBiasValue" },
{ 0x9205, "exif:MaxApertureValue" },
{ 0x9206, "exif:SubjectDistance" },
{ 0x9207, "exif:MeteringMode" },
{ 0x9208, "exif:LightSource" },
{ 0x9209, "exif:Flash" },
{ 0x920a, "exif:FocalLength" },
{ 0x920b, "exif:FlashEnergy" },
{ 0x920c, "exif:SpatialFrequencyResponse" },
{ 0x920d, "exif:Noise" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9211, "exif:ImageNumber" },
{ 0x9212, "exif:SecurityClassification" },
{ 0x9213, "exif:ImageHistory" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9215, "exif:ExposureIndex" },
{ 0x9216, "exif:TIFF-EPStandardID" },
{ 0x927c, "exif:MakerNote" },
{ 0x9286, "exif:UserComment" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0x9400, "exif:Temperature" },
{ 0x9401, "exif:Humidity" },
{ 0x9402, "exif:Pressure" },
{ 0x9403, "exif:WaterDepth" },
{ 0x9404, "exif:Acceleration" },
{ 0x9405, "exif:CameraElevationAngle" },
{ 0x9C9b, "exif:WinXP-Title" },
{ 0x9C9c, "exif:WinXP-Comments" },
{ 0x9C9d, "exif:WinXP-Author" },
{ 0x9C9e, "exif:WinXP-Keywords" },
{ 0x9C9f, "exif:WinXP-Subject" },
{ 0xa000, "exif:FlashPixVersion" },
{ 0xa001, "exif:ColorSpace" },
{ 0xa002, "exif:PixelXDimension" },
{ 0xa003, "exif:PixelYDimension" },
{ 0xa004, "exif:RelatedSoundFile" },
{ 0xa005, "exif:InteroperabilityOffset" },
{ 0xa20b, "exif:FlashEnergy" },
{ 0xa20c, "exif:SpatialFrequencyResponse" },
{ 0xa20d, "exif:Noise" },
{ 0xa20e, "exif:FocalPlaneXResolution" },
{ 0xa20f, "exif:FocalPlaneYResolution" },
{ 0xa210, "exif:FocalPlaneResolutionUnit" },
{ 0xa214, "exif:SubjectLocation" },
{ 0xa215, "exif:ExposureIndex" },
{ 0xa216, "exif:TIFF/EPStandardID" },
{ 0xa217, "exif:SensingMethod" },
{ 0xa300, "exif:FileSource" },
{ 0xa301, "exif:SceneType" },
{ 0xa302, "exif:CFAPattern" },
{ 0xa401, "exif:CustomRendered" },
{ 0xa402, "exif:ExposureMode" },
{ 0xa403, "exif:WhiteBalance" },
{ 0xa404, "exif:DigitalZoomRatio" },
{ 0xa405, "exif:FocalLengthIn35mmFilm" },
{ 0xa406, "exif:SceneCaptureType" },
{ 0xa407, "exif:GainControl" },
{ 0xa408, "exif:Contrast" },
{ 0xa409, "exif:Saturation" },
{ 0xa40a, "exif:Sharpness" },
{ 0xa40b, "exif:DeviceSettingDescription" },
{ 0xa40c, "exif:SubjectDistanceRange" },
{ 0xa420, "exif:ImageUniqueID" },
{ 0xa430, "exif:CameraOwnerName" },
{ 0xa431, "exif:BodySerialNumber" },
{ 0xa432, "exif:LensSpecification" },
{ 0xa433, "exif:LensMake" },
{ 0xa434, "exif:LensModel" },
{ 0xa435, "exif:LensSerialNumber" },
{ 0xc4a5, "exif:PrintImageMatching" },
{ 0xa500, "exif:Gamma" },
{ 0xc640, "exif:CR2Slice" },
{ 0x10000, "exif:GPSVersionID" },
{ 0x10001, "exif:GPSLatitudeRef" },
{ 0x10002, "exif:GPSLatitude" },
{ 0x10003, "exif:GPSLongitudeRef" },
{ 0x10004, "exif:GPSLongitude" },
{ 0x10005, "exif:GPSAltitudeRef" },
{ 0x10006, "exif:GPSAltitude" },
{ 0x10007, "exif:GPSTimeStamp" },
{ 0x10008, "exif:GPSSatellites" },
{ 0x10009, "exif:GPSStatus" },
{ 0x1000a, "exif:GPSMeasureMode" },
{ 0x1000b, "exif:GPSDop" },
{ 0x1000c, "exif:GPSSpeedRef" },
{ 0x1000d, "exif:GPSSpeed" },
{ 0x1000e, "exif:GPSTrackRef" },
{ 0x1000f, "exif:GPSTrack" },
{ 0x10010, "exif:GPSImgDirectionRef" },
{ 0x10011, "exif:GPSImgDirection" },
{ 0x10012, "exif:GPSMapDatum" },
{ 0x10013, "exif:GPSDestLatitudeRef" },
{ 0x10014, "exif:GPSDestLatitude" },
{ 0x10015, "exif:GPSDestLongitudeRef" },
{ 0x10016, "exif:GPSDestLongitude" },
{ 0x10017, "exif:GPSDestBearingRef" },
{ 0x10018, "exif:GPSDestBearing" },
{ 0x10019, "exif:GPSDestDistanceRef" },
{ 0x1001a, "exif:GPSDestDistance" },
{ 0x1001b, "exif:GPSProcessingMethod" },
{ 0x1001c, "exif:GPSAreaInformation" },
{ 0x1001d, "exif:GPSDateStamp" },
{ 0x1001e, "exif:GPSDifferential" },
{ 0x1001f, "exif:GPSHPositioningError" },
{ 0x00000, "" }
}; /* http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */
const StringInfo
*profile;
const unsigned char
*directory,
*exif;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
MagickBooleanType
status;
ssize_t
i;
size_t
entry,
length,
number_entries,
tag,
tag_value;
SplayTreeInfo
*exif_resources;
ssize_t
all,
id,
level,
offset,
tag_offset;
static int
tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
/*
If EXIF data exists, then try to parse the request for a tag.
*/
profile=GetImageProfile(image,"exif");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
while (isspace((int) ((unsigned char) *property)) != 0)
property++;
if (strlen(property) <= 5)
return(MagickFalse);
all=0;
tag=(~0UL);
switch (*(property+5))
{
case '*':
{
/*
Caller has asked for all the tags in the EXIF data.
*/
tag=0;
all=1; /* return the data in description=value format */
break;
}
case '!':
{
tag=0;
all=2; /* return the data in tagid=value format */
break;
}
case '#':
case '@':
{
int
c;
size_t
n;
/*
Check for a hex based tag specification first.
*/
tag=(*(property+5) == '@') ? 1UL : 0UL;
property+=6;
n=strlen(property);
if (n != 4)
return(MagickFalse);
/*
Parse tag specification as a hex number.
*/
n/=4;
do
{
for (i=(ssize_t) n-1L; i >= 0; i--)
{
c=(*property++);
tag<<=4;
if ((c >= '0') && (c <= '9'))
tag|=(c-'0');
else
if ((c >= 'A') && (c <= 'F'))
tag|=(c-('A'-10));
else
if ((c >= 'a') && (c <= 'f'))
tag|=(c-('a'-10));
else
return(MagickFalse);
}
} while (*property != '\0');
break;
}
default:
{
/*
Try to match the text with a tag name instead.
*/
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (LocaleCompare(EXIFTag[i].description,property) == 0)
{
tag=(size_t) EXIFTag[i].tag;
break;
}
}
break;
}
}
if (tag == (~0UL))
return(MagickFalse);
length=GetStringInfoLength(profile);
if (length < 6)
return(MagickFalse);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadPropertyByte(&exif,&length) != 0x45)
continue;
if (ReadPropertyByte(&exif,&length) != 0x78)
continue;
if (ReadPropertyByte(&exif,&length) != 0x69)
continue;
if (ReadPropertyByte(&exif,&length) != 0x66)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadPropertySignedLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
/*
Set the pointer to the first IFD and follow it were it leads.
*/
status=MagickFalse;
directory=exif+offset;
level=0;
entry=0;
tag_offset=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
/*
If there is anything on the stack then pop it off.
*/
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
tag_offset=directory_stack[level].offset;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
unsigned char
*p,
*q;
size_t
format;
ssize_t
number_bytes,
components;
q=(unsigned char *) (directory+(12*entry)+2);
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset;
format=(size_t) ReadPropertyUnsignedShort(endian,q+2);
if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes)))
break;
if (format == 0)
break; /* corrupt EXIF */
components=(ssize_t) ReadPropertySignedLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*tag_bytes[format];
if (number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
dir_offset;
/*
The directory entry contains an offset.
*/
dir_offset=(ssize_t) ReadPropertySignedLong(endian,q+8);
if ((dir_offset < 0) || (size_t) dir_offset >= length)
continue;
if (((size_t) dir_offset+number_bytes) < (size_t) dir_offset)
continue; /* prevent overflow */
if (((size_t) dir_offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+dir_offset);
}
if ((all != 0) || (tag == (size_t) tag_value))
{
char
buffer[MaxTextExtent],
*value;
if ((p < exif) || (p > (exif+length-tag_bytes[format])))
break;
value=(char *) NULL;
*buffer='\0';
switch (format)
{
case EXIF_FMT_BYTE:
case EXIF_FMT_UNDEFINED:
{
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if (isprint((int) p[i]) != 0)
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
case EXIF_FMT_SBYTE:
{
EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1));
break;
}
case EXIF_FMT_SSHORT:
{
EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1));
break;
}
case EXIF_FMT_USHORT:
{
EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1));
break;
}
case EXIF_FMT_ULONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertyUnsignedLong(endian,p1));
break;
}
case EXIF_FMT_SLONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_URATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertyUnsignedLong(endian,p1),(double)
ReadPropertyUnsignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SRATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertySignedLong(endian,p1),(double)
ReadPropertySignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SINGLE:
{
EXIFMultipleValues(4,"%f",(double) *(float *) p1);
break;
}
case EXIF_FMT_DOUBLE:
{
EXIFMultipleValues(8,"%f",*(double *) p1);
break;
}
case EXIF_FMT_STRING:
default:
{
if ((p < exif) || (p > (exif+length-number_bytes)))
break;
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
ssize_t
i;
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if ((isprint((int) p[i]) != 0) || (p[i] == '\0'))
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
}
if (value != (char *) NULL)
{
char
*key;
const char
*p;
key=AcquireString(property);
switch (all)
{
case 1:
{
const char
*description;
ssize_t
i;
description="unknown";
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (EXIFTag[i].tag == tag_value)
{
description=EXIFTag[i].description;
break;
}
}
(void) FormatLocaleString(key,MaxTextExtent,"%s",
description);
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
break;
}
case 2:
{
if (tag_value < 0x10000)
(void) FormatLocaleString(key,MaxTextExtent,"#%04lx",
(unsigned long) tag_value);
else
if (tag_value < 0x20000)
(void) FormatLocaleString(key,MaxTextExtent,"@%04lx",
(unsigned long) (tag_value & 0xffff));
else
(void) FormatLocaleString(key,MaxTextExtent,"unknown");
break;
}
default:
{
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
}
}
p=(const char *) NULL;
if (image->properties != (void *) NULL)
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,key);
if (p == (const char *) NULL)
(void) SetImageProperty((Image *) image,key,value);
value=DestroyString(value);
key=DestroyString(key);
status=MagickTrue;
}
}
if ((tag_value == TAG_EXIF_OFFSET) ||
(tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ReadPropertySignedLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
ssize_t
tag_offset1;
tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 :
0);
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
directory_stack[level].offset=tag_offset;
level++;
/*
Check for duplicate tag.
*/
for (i=0; i < level; i++)
if (directory_stack[i].directory == (exif+tag_offset1))
break;
if (i < level)
break; /* duplicate tag */
directory_stack[level].directory=exif+offset;
directory_stack[level].offset=tag_offset1;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)+4) > (exif+length))
break;
offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
directory_stack[level].offset=tag_offset1;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(status);
}
| null | null | 194,963
|
48271581080146755098392552117960858012
| 869
|
https://github.com/ImageMagick/ImageMagick/pull/5034
|
other
|
ImageMagick6
|
450949ed017f009b399c937cf362f0058eacc5fa
| 1
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
| null | null | 194,989
|
147803926290496005105703364084246921561
| 75
|
Pull request: https://github.com/ImageMagick/ImageMagick/pull/4963
|
other
|
tensorflow
|
c79ccba517dbb1a0ccb9b01ee3bd2a63748b60dd
| 1
|
Status ImmutableExecutorState::Initialize(const Graph& graph) {
TF_RETURN_IF_ERROR(gview_.Initialize(&graph));
// Build the information about frames in this subgraph.
ControlFlowInfo cf_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(&graph, &cf_info));
for (auto& it : cf_info.unique_frame_names) {
EnsureFrameInfo(it)->nodes =
absl::make_unique<std::vector<const NodeItem*>>();
}
root_frame_info_ = frame_info_[""].get();
pending_ids_.resize(gview_.num_nodes());
// Preprocess every node in the graph to create an instance of op
// kernel for each node.
requires_control_flow_ = false;
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
if (IsSwitch(n) || IsMerge(n) || IsEnter(n) || IsExit(n)) {
requires_control_flow_ = true;
} else if (IsRecv(n)) {
// A Recv node from a different device may produce dead tensors from
// non-local control-flow nodes.
//
// TODO(mrry): Track whether control flow was present in the
// pre-partitioned graph, and enable the caller (e.g.
// `DirectSession`) to relax this constraint.
string send_device;
string recv_device;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "send_device", &send_device));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "recv_device", &recv_device));
if (send_device != recv_device) {
requires_control_flow_ = true;
}
}
const int id = n->id();
const string& frame_name = cf_info.frame_names[id];
FrameInfo* frame_info = EnsureFrameInfo(frame_name);
NodeItem* item = gview_.node(id);
item->node_id = id;
item->input_start = frame_info->total_inputs;
frame_info->total_inputs += n->num_inputs();
Status s = params_.create_kernel(n->properties(), &item->kernel);
if (!s.ok()) {
item->kernel = nullptr;
s = AttachDef(s, *n);
return s;
}
CHECK(item->kernel);
item->kernel_is_async = (item->kernel->AsAsync() != nullptr);
item->is_merge = IsMerge(n);
item->is_any_consumer_merge_or_control_trigger = false;
for (const Node* consumer : n->out_nodes()) {
if (IsMerge(consumer) || IsControlTrigger(consumer)) {
item->is_any_consumer_merge_or_control_trigger = true;
break;
}
}
const Tensor* const_tensor = item->kernel->const_tensor();
if (const_tensor) {
// Hold onto a shallow copy of the constant tensor in `*this` so that the
// reference count does not drop to 1. This prevents the constant tensor
// from being forwarded, and its buffer reused.
const_tensors_.emplace_back(*const_tensor);
}
item->const_tensor = const_tensor;
item->is_noop = (item->kernel->type_string_view() == "NoOp");
item->is_enter = IsEnter(n);
if (item->is_enter) {
bool is_constant_enter;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "is_constant", &is_constant_enter));
item->is_constant_enter = is_constant_enter;
string frame_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &frame_name));
FrameInfo* frame_info = frame_info_[frame_name].get();
int parallel_iterations;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "parallel_iterations", ¶llel_iterations));
if (frame_info->parallel_iterations == -1) {
frame_info->parallel_iterations = parallel_iterations;
} else if (frame_info->parallel_iterations != parallel_iterations) {
LOG(WARNING) << "Loop frame \"" << frame_name
<< "\" had two different values for parallel_iterations: "
<< frame_info->parallel_iterations << " vs. "
<< parallel_iterations << ".";
}
if (enter_frame_info_.size() <= id) {
enter_frame_info_.resize(id + 1);
}
enter_frame_info_[id] = frame_info;
} else {
item->is_constant_enter = false;
}
item->is_exit = IsExit(n);
item->is_control_trigger = IsControlTrigger(n);
item->is_source = IsSource(n);
item->is_enter_exit_or_next_iter =
(IsEnter(n) || IsExit(n) || IsNextIteration(n));
item->is_transfer_node = IsTransferNode(n);
item->is_initialization_op = IsInitializationOp(n);
item->is_recv_or_switch = IsRecv(n) || IsSwitch(n);
item->is_next_iteration = IsNextIteration(n);
item->is_distributed_communication = IsDistributedCommunication(n);
// Compute the maximum values we'll store for this node in the
// pending counts data structure, and allocate a handle in
// that frame's pending counts data structure that has enough
// space to store these maximal count values.
size_t max_pending, max_dead;
GetMaxPendingCounts(n, &max_pending, &max_dead);
pending_ids_[id] =
frame_info->pending_counts_layout.CreateHandle(max_pending, max_dead);
// See if this node is a root node, and if so, add item to root_nodes_.
if (n->in_edges().empty()) {
root_nodes_.push_back(item);
}
// Initialize static information about the frames in the graph.
frame_info->nodes->push_back(item);
if (item->is_enter) {
string enter_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &enter_name));
EnsureFrameInfo(enter_name)->input_count++;
}
// Record information about whether each output of the op is used.
std::unique_ptr<bool[]> outputs_required(new bool[n->num_outputs()]);
std::fill(&outputs_required[0], &outputs_required[n->num_outputs()], false);
int32_t unused_outputs = n->num_outputs();
for (const Edge* e : n->out_edges()) {
if (IsSink(e->dst())) continue;
if (e->src_output() >= 0) {
if (!outputs_required[e->src_output()]) {
--unused_outputs;
outputs_required[e->src_output()] = true;
}
}
}
if (unused_outputs > 0) {
for (int i = 0; i < n->num_outputs(); ++i) {
if (!outputs_required[i]) {
metrics::RecordUnusedOutput(n->type_string());
}
}
item->outputs_required = std::move(outputs_required);
}
}
// Rewrite each `EdgeInfo::input_slot` member to refer directly to the input
// location.
for (const Node* n : graph.nodes()) {
if (IsSink(n)) continue;
const int id = n->id();
NodeItem* item = gview_.node(id);
for (EdgeInfo& e : item->mutable_output_edges()) {
const int dst_id = e.dst_id;
NodeItem* dst_item = gview_.node(dst_id);
e.input_slot += dst_item->input_start;
}
}
// Initialize PendingCounts only after pending_ids_[node.id] is initialized
// for all nodes.
InitializePending(&graph, cf_info);
return gview_.SetAllocAttrs(&graph, params_.device);
}
| null | null | 194,994
|
282942385653353331018173647685166819573
| 179
|
Fix memory leak when a graph node is invalid.
If a graph node is invalid but a kernel is created then we set the kernel back to `nullptr` but we forget to delete it. Hence, we get a memory leak.
PiperOrigin-RevId: 408968108
Change-Id: I1d8a9d0d8988ed5e08be8b9f2004ce1b4cd11b7c
|
other
|
tensorflow
|
4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
| 1
|
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def,
string* init_op_name) {
const auto& sig_def_map = meta_graph_def.signature_def();
const auto& init_op_sig_it =
meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey);
if (init_op_sig_it != sig_def_map.end()) {
*init_op_name = init_op_sig_it->second.outputs()
.find(kSavedModelInitOpSignatureKey)
->second.name();
return Status::OK();
}
const auto& collection_def_map = meta_graph_def.collection_def();
string init_op_collection_key;
if (collection_def_map.find(kSavedModelMainOpKey) !=
collection_def_map.end()) {
init_op_collection_key = kSavedModelMainOpKey;
} else {
init_op_collection_key = kSavedModelLegacyInitOpKey;
}
const auto init_op_it = collection_def_map.find(init_op_collection_key);
if (init_op_it != collection_def_map.end()) {
if (init_op_it->second.node_list().value_size() != 1) {
return errors::FailedPrecondition(
strings::StrCat("Expected exactly one main op in : ", export_dir));
}
*init_op_name = init_op_it->second.node_list().value(0);
}
return Status::OK();
}
| null | null | 194,996
|
266489658062819110072215039330251041310
| 31
|
Prevent null dereference read in `GetInitOp`.
We have a map of maps. We test that the key exists in the first map but then we don't have any validation that this also means the second map has the needed key. In the scenarios where this is not the case, we'll dereference a nullptr, if we don't have this check
PiperOrigin-RevId: 408739325
Change-Id: If9bb7ed759aba1f3b56a34913f209508dbaf65ce
|
other
|
tensorflow
|
240655511cd3e701155f944a972db71b6c0b1bb6
| 1
|
Status ConstantFolding::IsSimplifiableReshape(
const NodeDef& node, const GraphProperties& properties) const {
if (!IsReshape(node)) {
return errors::Internal("Node ", node.name(), " is not a Reshape node");
}
if (2 > node.input_size()) {
return errors::Internal("Node ", node.name(),
" must have at most 2 inputs but has ",
node.input_size());
}
const NodeDef* new_shape = node_map_->GetNode(node.input(1));
if (!IsReallyConstant(*new_shape)) {
return errors::Internal("Node ", node.name(), " has shape ",
new_shape->DebugString(),
" which is not a constant");
}
TensorVector outputs;
auto outputs_cleanup = gtl::MakeCleanup([&outputs] {
for (const auto& output : outputs) {
delete output.tensor;
}
});
Status s = EvaluateNode(*new_shape, TensorVector(), &outputs);
if (!s.ok()) {
return errors::Internal("Could not evaluate node ", node.name());
}
if (outputs.size() != 1) {
return errors::Internal("Node ", node.name(),
" must have exactly 1 output but has ",
outputs.size());
}
const std::vector<OpInfo::TensorProperties>& props =
properties.GetInputProperties(node.name());
if (props.empty()) {
return errors::Internal("Node ", node.name(), " has no properties");
}
const OpInfo::TensorProperties& prop = props[0];
if (prop.dtype() == DT_INVALID) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with invalid dtype");
}
const PartialTensorShape shape(prop.shape());
if (!shape.IsFullyDefined()) {
return errors::Internal("Node ", node.name(), " has property ",
prop.DebugString(), " with shape ",
shape.DebugString(), " which is not fully defined");
}
PartialTensorShape new_dims;
if (outputs[0]->dtype() == DT_INT32) {
std::vector<int32> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int32_t dim = outputs[0]->flat<int32>()(i);
shp.push_back(dim);
}
TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims));
} else {
std::vector<int64_t> shp;
for (int i = 0; i < outputs[0]->NumElements(); ++i) {
int64_t dim = outputs[0]->flat<int64_t>()(i);
shp.push_back(dim);
}
TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims));
}
if (!shape.IsCompatibleWith(new_dims)) {
return errors::Internal("Expected shape ", shape.DebugString(),
"to be compatible with ", new_dims.DebugString());
}
return Status::OK();
}
| null | null | 194,998
|
202372901582726306674507712947570819518
| 74
|
Eliminate `CHECK`-fails from `IsSimplifiableReshape` via `MakeShape(<invalid shape>)`
PiperOrigin-RevId: 409166738
Change-Id: I7f0a3590b8acae3f3e3e2fe636e1f5ef285693cf
|
other
|
gpac
|
ad18ece95fa064efc0995c4ab2c985f77fb166ec
| 1
|
u32 GetHintFormat(GF_TrackBox *trak)
{
GF_HintMediaHeaderBox *hmhd = (GF_HintMediaHeaderBox *)trak->Media->information->InfoHeader;
if (hmhd->type != GF_ISOM_BOX_TYPE_HMHD)
return 0;
if (!hmhd || !hmhd->subType) {
GF_Box *a = (GF_Box *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, 0);
if (!hmhd) return a ? a->type : 0;
if (a) hmhd->subType = a->type;
return hmhd->subType;
}
return hmhd->subType;
}
| null | null | 195,017
|
296790611538125275779975149143922065747
| 14
|
fixed #1904
|
other
|
tensorflow
|
6b5adc0877de832b2a7c189532dbbbc64622eeb6
| 1
|
Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node,
std::vector<NodeDef>* outputs,
bool* result_too_large) {
TensorVector inputs;
TensorVector output_tensors;
auto inputs_cleanup = gtl::MakeCleanup([&inputs, &output_tensors] {
for (const auto& input : inputs) {
delete input.tensor;
}
for (const auto& output : output_tensors) {
if (output.tensor) {
delete output.tensor;
}
}
});
size_t total_inputs_size = 0;
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
if (input_tensor.index() < 0) {
// Control dependency
break;
}
const NodeDef* input_node = node_map_->GetNode(input);
if (!IsReallyConstant(*input_node)) {
return Status(error::INVALID_ARGUMENT,
strings::StrCat("Can't fold ", node.name(), ", its ", input,
" isn't constant"));
}
TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value"));
const TensorProto& raw_val = input_node->attr().at("value").tensor();
if (raw_val.dtype() == DT_INVALID) {
return Status(
error::INVALID_ARGUMENT,
strings::StrCat("A tensor in the input node, with TensorId of ",
input_tensor.ToString(),
" has a dtype of DT_INVALID."));
}
Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape());
if (!value->FromProto(raw_val)) {
delete (value);
return errors::InvalidArgument("Unable to make Tensor from proto for ",
node.name(), " with shape ",
raw_val.tensor_shape().DebugString());
}
inputs.emplace_back(value);
total_inputs_size += value->TotalBytes();
}
TF_RETURN_IF_ERROR(EvaluateNode(node, inputs, &output_tensors));
if (output_tensors.empty()) {
return Status(error::INVALID_ARGUMENT, "Expected at least one output.");
}
outputs->resize(output_tensors.size());
for (size_t i = 0; i < output_tensors.size(); i++) {
string node_name = OptimizedNodeName(node, "-folded");
if (output_tensors.size() > 1) {
node_name = strings::StrCat(node_name, "-", i);
}
if (output_tensors[i].tensor) {
Status s = CreateNodeDef(node_name, output_tensors[i], &outputs->at(i),
total_inputs_size);
if (!s.ok()) {
*result_too_large = true;
return s;
}
} else {
// Create an empty NodeDef to identify dead outputs (e.g. the output of a
// switch that's not selected by the switch predicate).
outputs->at(i) = NodeDef();
}
}
return Status::OK();
}
| null | null | 195,019
|
297485749252295872832567439059697111493
| 75
|
Prevent `CHECK`-fail when building reference tensor.
The tensor constructor does not allow reference dtypes, as these should not show up explicitly. However, when passed these invalid types instead of building an invalid object the constructor crashes via a `CHECK`-fail. We have a static builder that properly handles this case but is not applicable given current usage.
Instead, before calling the constructor, we can check that the dtype is not a reference type and return an error otherwise, given that the dtype is user controlled so malicious users can trigger denial of service.
PiperOrigin-RevId: 409662503
Change-Id: I5892f831fde7f276cd7ab34519cf6b8061c71a59
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 1
|
int callback_glewlwyd_user_auth (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
const char * ip_source = get_ip_source(request);
char * issued_for = get_client_hostname(request);
char * session_uid, expires[129];
time_t now;
struct tm ts;
time(&now);
now += GLEWLWYD_DEFAULT_SESSION_EXPIRATION_COOKIE;
gmtime_r(&now, &ts);
strftime(expires, 128, "%a, %d %b %Y %T %Z", &ts);
if (j_param != NULL) {
if (json_string_length(json_object_get(j_param, "username"))) {
if (json_object_get(j_param, "scheme_type") == NULL || 0 == o_strcmp(json_string_value(json_object_get(j_param, "scheme_type")), "password")) {
if (json_string_length(json_object_get(j_param, "password"))) {
j_result = auth_check_user_credentials(config, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "password")));
if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (1)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with password", json_string_value(json_object_get(j_param, "username")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", "password", NULL);
} else {
if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
}
if ((session_uid = get_session_id(config, request)) != NULL && user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (2)");
}
o_free(session_uid);
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", "password", NULL);
}
json_decref(j_result);
} else if (json_object_get(j_param, "password") != NULL && !json_is_string(json_object_get(j_param, "password"))) {
ulfius_set_string_body_response(response, 400, "password must be a string");
} else {
session_uid = get_session_id(config, request);
j_result = get_users_for_session(config, session_uid);
if (check_result_value(j_result, G_OK)) {
// Refresh username to set as default
if (user_session_update(config, u_map_get(request->map_cookie, config->session_key), u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), NULL, 0) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (3)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
}
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 401;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error get_users_for_session");
response->status = 500;
}
o_free(session_uid);
json_decref(j_result);
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username %s at IP Address %s", json_string_value(json_object_get(j_param, "username")), ip_source);
response->status = 401;
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_INVALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID, 1, NULL);
glewlwyd_metrics_increment_counter_va(config, GLWD_METRICS_AUTH_USER_VALID_SCHEME, 1, "scheme_type", json_string_value(json_object_get(j_param, "scheme_type")), "scheme_name", json_string_value(json_object_get(j_param, "scheme_name")), NULL);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "scheme_type, scheme_name and value are mandatory");
}
}
} else {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name")) && json_is_object(json_object_get(j_param, "value"))) {
j_result = auth_check_identify_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
y_log_message(Y_LOG_LEVEL_WARNING, "Security - Authorization invalid for username <UNKNOWN> at IP Address %s", ip_source);
response->status = 401;
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_OK)) {
if ((session_uid = get_session_id(config, request)) == NULL) {
session_uid = generate_session_id();
}
if (user_session_update(config, session_uid, u_map_get_case(request->map_header, "user-agent"), issued_for, json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_name")), 1) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error user_session_update (4)");
response->status = 500;
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' authenticated with scheme '%s/%s'", json_string_value(json_object_get(j_result, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
}
o_free(session_uid);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "username is mandatory");
}
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
o_free(issued_for);
return U_CALLBACK_CONTINUE;
}
| null | null | 195,022
|
285857209784991775346799142884643791412
| 141
|
Fix update session when auth fail
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
OP_REQUIRES(
context,
input_values->shape().dim_size(0) == input_indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
input_values->shape().dim_size(0),
" values, indices shape: ", input_indices->shape().DebugString()));
OP_REQUIRES(
context,
input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", input_shape->shape().dim_size(0),
" dimensions, indices shape: ",
input_indices->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
int new_num_elements = 1;
bool overflow_ocurred = false;
for (int i = 0; i < input_shape_vec.size(); i++) {
new_num_elements =
MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i));
if (new_num_elements < 0) {
overflow_ocurred = true;
break;
}
}
OP_REQUIRES(
context, !overflow_ocurred,
errors::Internal("Encountered overflow from large input shape."));
TensorShape tensor_input_shape(input_shape_vec);
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
| null | null | 195,023
|
139072167298589600029731810982356585607
| 144
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 1
|
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
return nfs_open(inode, filp);
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
default:
goto out_put_ctx;
case -ENOENT:
case -ESTALE:
case -EISDIR:
case -ENOTDIR:
case -ELOOP:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
| null | null | 195,026
|
169109460952889627998429081302789014514
| 78
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 1
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output));
} else {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output));
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
| null | null | 195,028
|
299107358683663029962127482414664269910
| 94
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 1
|
void Node::RunForwardTypeInference() {
VLOG(4) << "Forward type inference: " << props_->node_def.DebugString();
if (props_->fwd_type_fn == nullptr) {
return;
}
std::vector<Node*> input_nodes(props_->input_types.size(), nullptr);
std::vector<int> input_idx(props_->input_types.size(), 0);
for (const auto& edge : in_edges_) {
if (edge->IsControlEdge()) {
continue;
}
DCHECK(edge->dst_input() < input_nodes.size()) << DebugString();
int i = edge->dst_input();
input_nodes.at(i) = edge->src();
input_idx.at(i) = edge->src_output();
}
// Note: technically, we could use a very generic type when some of the inputs
// are unknown. But there is an expectation that a node will have complete
// inputs soon, so updating intermediate types is largely unnecessary.
for (const auto* node : input_nodes) {
if (node == nullptr) {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (int i = 0; i < input_nodes.size(); i++) {
const auto* node = input_nodes[i];
if (node->def().has_experimental_type()) {
const auto& node_t = node->def().experimental_type();
if (node_t.type_id() != TFT_UNSET) {
int ix = input_idx[i];
DCHECK(ix < node_t.args_size())
<< "input " << i << " should have an output " << ix
<< " but instead only has " << node_t.args_size()
<< " outputs: " << node_t.DebugString();
input_types.emplace_back(node_t.args(ix));
} else {
input_types.emplace_back(*no_type);
}
} else {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
const auto infer_type = props_->fwd_type_fn(input_types);
const FullTypeDef infer_typedef = infer_type.ValueOrDie();
if (infer_typedef.type_id() != TFT_UNSET) {
MaybeCopyOnWrite();
*(props_->node_def.mutable_experimental_type()) = infer_typedef;
}
}
| null | null | 195,029
|
154152553733466173941655321722110922102
| 62
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
b51b82fe65ebace4475e3c54eb089c18a4403f1c
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
int new_num_elements = 1;
bool overflow_ocurred = false;
for (int i = 0; i < input_shape_vec.size(); i++) {
new_num_elements =
MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i));
if (new_num_elements < 0) {
overflow_ocurred = true;
break;
}
}
OP_REQUIRES(
context, !overflow_ocurred,
errors::Internal("Encountered overflow from large input shape."));
TensorShape tensor_input_shape(input_shape_vec);
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
| null | null | 195,037
|
195340685438288442360248962876366345932
| 131
|
Add missing validation to `AddManySparseToTensorsMap`.
Sparse tensors have a set of requirements for the 3 components and not all of them were checked.
PiperOrigin-RevId: 415358027
Change-Id: I96cbb672999cd1da772c22fabbd15507e32e12dc
|
other
|
mruby
|
27d1e0132a0804581dca28df042e7047fd27eaa8
| 1
|
mrb_ary_shift_m(mrb_state *mrb, mrb_value self)
{
struct RArray *a = mrb_ary_ptr(self);
mrb_int len = ARY_LEN(a);
mrb_int n;
mrb_value val;
if (mrb_get_args(mrb, "|i", &n) == 0) {
return mrb_ary_shift(mrb, self);
};
ary_modify_check(mrb, a);
if (len == 0 || n == 0) return mrb_ary_new(mrb);
if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift");
if (n > len) n = len;
val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a));
if (ARY_SHARED_P(a)) {
L_SHIFT:
a->as.heap.ptr+=n;
a->as.heap.len-=n;
return val;
}
if (len > ARY_SHIFT_SHARED_MIN) {
ary_make_shared(mrb, a);
goto L_SHIFT;
}
else if (len == n) {
ARY_SET_LEN(a, 0);
}
else {
mrb_value *ptr = ARY_PTR(a);
mrb_int size = len-n;
while (size--) {
*ptr = *(ptr+n);
++ptr;
}
ARY_SET_LEN(a, len-n);
}
return val;
}
| null | null | 195,038
|
186115206980918508383398425499895977362
| 40
|
array.c: fix `mrb_ary_shift_m` initialization bug.
The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
|
other
|
tensorflow
|
e7f497570abb6b4ae5af4970620cd880e4c0c904
| 1
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
DCHECK(data_format == FORMAT_NHWC)
<< "Grouped conv implementation only "
"supports NHWC tensor format for now.";
const int64_t in_depth = input.dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
const int64_t num_groups = in_depth / patch_depth;
// Shuffle input/filter tensors to have group as a leading dimension.
std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4});
// Compute pre shuffle dimemnsions.
auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2),
num_groups, tensor.dim_size(3) / num_groups};
};
// Compute post shuffle dimemnsions.
auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {num_groups, tensor.dim_size(0), tensor.dim_size(1),
tensor.dim_size(2), tensor.dim_size(3) / num_groups};
};
auto& device = ctx->eigen_device<CPUDevice>();
absl::BlockingCounter shuffles_completed(2);
auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); };
// Shuffle input into temporary tensor.
Tensor input_shuffled(input.dtype(), TensorShape(post_shuffle(input)));
input_shuffled.tensor<T, 5>().device(device, on_shuffled) =
input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle);
// Shuffle filter into temporary tensor.
Tensor filter_shuffled(filter.dtype(), TensorShape(post_shuffle(filter)));
filter_shuffled.tensor<T, 5>().device(device, on_shuffled) =
filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle);
// Wait for the completion of input/filter shuffles.
shuffles_completed.Wait();
// Write group convolution results into temporary output tensor.
Tensor output_shuffled(output->dtype(), TensorShape(post_shuffle(*output)));
for (int64_t i = 0; i < num_groups; ++i) {
// TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor
// will lead to deadlock, SpatialConvolution has to use async Eigen
// assignment). This requires small changes to Eigen to support async
// exeuction for tensor chipping operation.
// TODO(ezhulenev): Grouped convolution should also support 1x1 filter
// optimization.
auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i);
auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i);
auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i);
if (padding == EXPLICIT) {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
BrainPadding2EigenPadding(padding));
}
}
// Shuffle temporary output back into pre-shuffled shape.
std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4});
output->shaped<T, 5>(pre_shuffle(*output)).device(device) =
output_shuffled.tensor<T, 5>().shuffle(rev_shuffle);
}
| null | null | 195,039
|
123158977024485616158249331978168134352
| 83
|
Fix segfault on OOM in Conv2D.
PiperOrigin-RevId: 404655317
Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
|
other
|
tensorflow
|
e21af685e1828f7ca65038307df5cc06de4479e8
| 1
|
Status BuildXlaCompilationCache(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
XlaCompilationCache** cache) {
if (platform_info.xla_device_metadata()) {
*cache = new XlaCompilationCache(
platform_info.xla_device_metadata()->client(),
platform_info.xla_device_metadata()->jit_device_type());
return Status::OK();
}
auto platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.ValueOrDie());
if (!compiler_for_platform.ok()) {
// In some rare cases (usually in unit tests with very small clusters) we
// may end up transforming an XLA cluster with at least one GPU operation
// (which would normally force the cluster to be compiled using XLA:GPU)
// into an XLA cluster with no GPU operations (i.e. containing only CPU
// operations). Such a cluster can fail compilation (in way that
// MarkForCompilation could not have detected) if the CPU JIT is not linked
// in.
//
// So bail out of _XlaCompile in this case, and let the executor handle the
// situation for us.
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.ValueOrDie()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.ValueOrDie());
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(absl::optional<std::set<int>> gpu_ids,
ParseVisibleDeviceList(allowed_gpus));
client_options.set_allowed_devices(gpu_ids);
auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
if (!client.ok()) {
return client.status();
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(platform_info.device_type().type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_info.device_type().type());
}
*cache = new XlaCompilationCache(
client.ValueOrDie(), DeviceType(registration->compilation_device_name));
return Status::OK();
}
| null | null | 195,040
|
186510393432452834538936646068986071997
| 62
|
Fix Null-pointer dereference in BuildXlaCompilationCache
If ConfigProto is not used, then use the default settings which is to allow all devices.
PiperOrigin-RevId: 420391800
Change-Id: I88161ad7042990aef678e77b597a2fb2c8f815be
|
other
|
tensorflow
|
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& indices = context->input(0);
const Tensor& values = context->input(1);
const Tensor& shape = context->input(2);
const Tensor& weights = context->input(3);
bool use_weights = weights.NumElements() > 0;
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()),
errors::InvalidArgument(
"Input indices must be a 2-dimensional tensor. Got: ",
indices.shape().DebugString()));
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
OP_REQUIRES(context, shape.NumElements() != 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
bool is_1d = shape.NumElements() == 1;
auto shape_vector = shape.flat<int64_t>();
int num_batches = is_1d ? 1 : shape_vector(0);
int num_values = values.NumElements();
for (int b = 0; b < shape_vector.size(); b++) {
OP_REQUIRES(context, shape_vector(b) >= 0,
errors::InvalidArgument(
"Elements in dense_shape must be >= 0. Instead got:",
shape.DebugString()));
}
OP_REQUIRES(context, num_values == indices.shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices.",
"Got ", num_values,
" values, indices shape: ", indices.shape().DebugString()));
const auto indices_values = indices.matrix<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
OP_REQUIRES(context, num_values <= indices.shape().dim_size(0),
errors::InvalidArgument(
"The first dimension of indices must be equal to or "
"greather than number of values. ( ",
indices.shape().dim_size(0), " vs. ", num_values, " )"));
OP_REQUIRES(context, indices.shape().dim_size(1) > 0,
errors::InvalidArgument("The second dimension of indices must "
"be greater than 0. Received: ",
indices.shape().dim_size(1)));
for (int idx = 0; idx < num_values; ++idx) {
int batch = is_1d ? 0 : indices_values(idx, 0);
if (batch >= num_batches) {
OP_REQUIRES(context, batch < num_batches,
errors::InvalidArgument(
"Indices value along the first dimension must be ",
"lower than the first index of the shape.", "Got ",
batch, " as batch and ", num_batches,
" as the first dimension of the shape."));
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch][value] = 1;
} else if (use_weights) {
per_batch_counts[batch][value] += weight_values(idx);
} else {
per_batch_counts[batch][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
| null | null | 195,055
|
7436579207726196641950130726785204925
| 90
|
Cleanup and remove duplicate validation in `SparseCount`.
We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs.
PiperOrigin-RevId: 414886981
Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
|
other
|
tensorflow
|
8c6f391a2282684a25cbfec7687bd5d35261a209
| 1
|
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
// This turned out to severely regress performance: +4ms (i.e. 8%) on
// MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
TFLITE_DCHECK_EQ((array_size % bias_size), 0);
#ifdef USE_NEON
float* array_ptr = array_data;
float* array_end_ptr = array_ptr + array_size;
const auto clamp_min_vec = vdupq_n_f32(clamp_min);
const auto clamp_max_vec = vdupq_n_f32(clamp_max);
for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
int i = 0;
for (; i <= bias_size - 16; i += 16) {
auto b0 = vld1q_f32(bias_data + i);
auto b1 = vld1q_f32(bias_data + i + 4);
auto b2 = vld1q_f32(bias_data + i + 8);
auto b3 = vld1q_f32(bias_data + i + 12);
auto a0 = vld1q_f32(array_ptr + i);
auto a1 = vld1q_f32(array_ptr + i + 4);
auto a2 = vld1q_f32(array_ptr + i + 8);
auto a3 = vld1q_f32(array_ptr + i + 12);
auto x0 = vaddq_f32(a0, b0);
auto x1 = vaddq_f32(a1, b1);
auto x2 = vaddq_f32(a2, b2);
auto x3 = vaddq_f32(a3, b3);
x0 = vmaxq_f32(clamp_min_vec, x0);
x1 = vmaxq_f32(clamp_min_vec, x1);
x2 = vmaxq_f32(clamp_min_vec, x2);
x3 = vmaxq_f32(clamp_min_vec, x3);
x0 = vminq_f32(clamp_max_vec, x0);
x1 = vminq_f32(clamp_max_vec, x1);
x2 = vminq_f32(clamp_max_vec, x2);
x3 = vminq_f32(clamp_max_vec, x3);
vst1q_f32(array_ptr + i, x0);
vst1q_f32(array_ptr + i + 4, x1);
vst1q_f32(array_ptr + i + 8, x2);
vst1q_f32(array_ptr + i + 12, x3);
}
for (; i <= bias_size - 4; i += 4) {
auto b = vld1q_f32(bias_data + i);
auto a = vld1q_f32(array_ptr + i);
auto x = vaddq_f32(a, b);
x = vmaxq_f32(clamp_min_vec, x);
x = vminq_f32(clamp_max_vec, x);
vst1q_f32(array_ptr + i, x);
}
for (; i < bias_size; i++) {
array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
clamp_min, clamp_max);
}
}
#else // not NEON
for (int array_offset = 0; array_offset < array_size;
array_offset += bias_size) {
for (int i = 0; i < bias_size; i++) {
array_data[array_offset + i] = ActivationFunctionWithMinMax(
array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
}
}
#endif
}
| null | null | 195,056
|
152147893540118444732409121016508718907
| 65
|
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check
PiperOrigin-RevId: 416383645
Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
|
other
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 1
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
| null | null | 195,059
|
280470408197015060590448712190364740247
| 40
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
gpac
|
5f2c2a16d30229b6241f02fa28e3d6b810d64858
| 1
|
GF_Err mpgviddmx_process(GF_Filter *filter)
{
GF_MPGVidDmxCtx *ctx = gf_filter_get_udta(filter);
GF_FilterPacket *pck, *dst_pck;
u64 byte_offset;
s64 vosh_start = -1;
s64 vosh_end = -1;
GF_Err e;
char *data;
u8 *start;
u32 pck_size;
s32 remain;
//always reparse duration
if (!ctx->duration.num)
mpgviddmx_check_dur(filter, ctx);
pck = gf_filter_pid_get_packet(ctx->ipid);
if (!pck) {
if (gf_filter_pid_is_eos(ctx->ipid)) {
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_TRUE);
if (ctx->opid)
gf_filter_pid_set_eos(ctx->opid);
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = NULL;
return GF_EOS;
}
return GF_OK;
}
data = (char *) gf_filter_pck_get_data(pck, &pck_size);
byte_offset = gf_filter_pck_get_byte_offset(pck);
start = data;
remain = pck_size;
//input pid sets some timescale - we flushed pending data , update cts
if (!ctx->resume_from && ctx->timescale) {
u64 ts = gf_filter_pck_get_cts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->cts || !ctx->recompute_cts)
ctx->cts = ts;
}
ts = gf_filter_pck_get_dts(pck);
if (ts != GF_FILTER_NO_TS) {
if (!ctx->dts || !ctx->recompute_cts)
ctx->dts = ts;
if (!ctx->prev_dts) ctx->prev_dts = ts;
else if (ctx->prev_dts != ts) {
u64 diff = ts;
diff -= ctx->prev_dts;
if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff;
else if (ctx->cur_fps.den > diff)
ctx->cur_fps.den = (u32) diff;
}
}
gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, &ctx->input_is_au_end);
//this will force CTS recomput of each frame
if (ctx->recompute_cts) ctx->input_is_au_start = GF_FALSE;
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
ctx->src_pck = pck;
gf_filter_pck_ref_props(&ctx->src_pck);
}
//we stored some data to find the complete vosh, aggregate this packet with current one
if (!ctx->resume_from && ctx->hdr_store_size) {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size) {
ctx->hdr_store_alloc = ctx->hdr_store_size + pck_size;
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data, sizeof(char)*pck_size);
if (byte_offset != GF_FILTER_NO_BO) {
if (byte_offset >= ctx->hdr_store_size)
byte_offset -= ctx->hdr_store_size;
else
byte_offset = GF_FILTER_NO_BO;
}
ctx->hdr_store_size += pck_size;
start = data = ctx->hdr_store;
remain = pck_size = ctx->hdr_store_size;
}
if (ctx->resume_from) {
if (gf_filter_pid_would_block(ctx->opid))
return GF_OK;
//resume from data copied internally
if (ctx->hdr_store_size) {
assert(ctx->resume_from <= ctx->hdr_store_size);
start = data = ctx->hdr_store + ctx->resume_from;
remain = pck_size = ctx->hdr_store_size - ctx->resume_from;
} else {
assert(remain >= (s32) ctx->resume_from);
start += ctx->resume_from;
remain -= ctx->resume_from;
}
ctx->resume_from = 0;
}
if (!ctx->bs) {
ctx->bs = gf_bs_new(start, remain, GF_BITSTREAM_READ);
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
}
if (!ctx->vparser) {
ctx->vparser = gf_m4v_parser_bs_new(ctx->bs, ctx->is_mpg12);
}
while (remain) {
Bool full_frame;
u8 *pck_data;
s32 current;
u8 sc_type, forced_sc_type=0;
Bool sc_type_forced = GF_FALSE;
Bool skip_pck = GF_FALSE;
u8 ftype;
u32 tinc;
u64 size=0;
u64 fstart;
Bool is_coded;
u32 bytes_from_store = 0;
u32 hdr_offset = 0;
Bool copy_last_bytes = GF_FALSE;
//not enough bytes to parse start code
if (remain<5) {
memcpy(ctx->hdr_store, start, remain);
ctx->bytes_in_header = remain;
break;
}
current = -1;
//we have some potential bytes of a start code in the store, copy some more bytes and check if valid start code.
//if not, dispatch these bytes as continuation of the data
if (ctx->bytes_in_header) {
memcpy(ctx->hdr_store + ctx->bytes_in_header, start, 8 - ctx->bytes_in_header);
current = mpgviddmx_next_start_code(ctx->hdr_store, 8);
//no start code in stored buffer
if ((current<0) || (current >= (s32) ctx->bytes_in_header) ) {
if (ctx->opid) {
dst_pck = gf_filter_pck_new_alloc(ctx->opid, ctx->bytes_in_header, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
memcpy(pck_data, ctx->hdr_store, ctx->bytes_in_header);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - ctx->bytes_in_header);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
if (current<0) current = -1;
else current -= ctx->bytes_in_header;
ctx->bytes_in_header = 0;
} else {
//we have a valid start code, check which byte in our store or in the packet payload is the start code type
//and remember its location to reinit the parser from there
hdr_offset = 4 - ctx->bytes_in_header + current;
//bytes still to dispatch
bytes_from_store = ctx->bytes_in_header;
ctx->bytes_in_header = 0;
if (!hdr_offset) {
forced_sc_type = ctx->hdr_store[current+3];
} else {
forced_sc_type = start[hdr_offset-1];
}
sc_type_forced = GF_TRUE;
}
}
//no starcode in store, look for startcode in packet
if (current == -1) {
//locate next start code
current = mpgviddmx_next_start_code(start, remain);
//no start code, dispatch the block
if (current<0) {
u8 b3, b2, b1;
if (! ctx->frame_started) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[MPGVid] no start code in block and no frame started, discarding data\n" ));
break;
}
size = remain;
b3 = start[remain-3];
b2 = start[remain-2];
b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
memcpy(pck_data, start, (size_t) size);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
}
assert(current>=0);
//if we are in the middle of parsing the vosh, skip over bytes remaining from previous obj not parsed
if ((vosh_start>=0) && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//also skip if no output pid
if (!ctx->opid && current) {
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
//dispatch remaining bytes
if (current>0) {
//flush remaining
dst_pck = gf_filter_pck_new_alloc(ctx->opid, current, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
gf_filter_pck_set_cts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_dts(dst_pck, GF_FILTER_NO_TS);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_TRUE);
//bytes were partly in store, partly in packet
if (bytes_from_store) {
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
assert(bytes_from_store>=(u32) current);
bytes_from_store -= current;
memcpy(pck_data, ctx->hdr_store, current);
} else {
//bytes were only in packet
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset);
}
memcpy(pck_data, start, current);
assert(remain>=current);
start += current;
remain -= current;
current = 0;
}
gf_filter_pck_set_carousel_version(dst_pck, 1);
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
}
//parse headers
//we have a start code loaded, eg the data packet does not have a full start code at the beginning
if (sc_type_forced) {
gf_bs_reassign_buffer(ctx->bs, start + hdr_offset, remain - hdr_offset);
sc_type = forced_sc_type;
} else {
gf_bs_reassign_buffer(ctx->bs, start, remain);
gf_bs_read_int(ctx->bs, 24);
sc_type = gf_bs_read_int(ctx->bs, 8);
}
if (ctx->is_mpg12) {
switch (sc_type) {
case M2V_SEQ_START_CODE:
case M2V_EXT_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
mpgviddmx_check_pid(filter, ctx, 0, NULL);
}
break;
case M2V_PIC_START_CODE:
break;
default:
break;
}
} else {
u8 PL;
switch (sc_type) {
case M4V_VOS_START_CODE:
ctx->dsi.VideoPL = (u8) gf_bs_read_u8(ctx->bs);
vosh_start = start - (u8 *)data;
skip_pck = GF_TRUE;
assert(remain>=5);
start += 5;
remain -= 5;
break;
case M4V_VOL_START_CODE:
gf_bs_reassign_buffer(ctx->bs, start, remain);
PL = ctx->dsi.VideoPL;
e = gf_m4v_parse_config(ctx->vparser, &ctx->dsi);
ctx->dsi.VideoPL = PL;
//not enough data, accumulate until we can parse the full header
if (e==GF_EOS) {
if (vosh_start<0) vosh_start = 0;
if (data == ctx->hdr_store) {
memmove(ctx->hdr_store, start, remain);
ctx->hdr_store_size = remain;
} else {
if (ctx->hdr_store_alloc < ctx->hdr_store_size + pck_size - vosh_start) {
ctx->hdr_store_alloc = (u32) (ctx->hdr_store_size + pck_size - (u32) vosh_start);
ctx->hdr_store = gf_realloc(ctx->hdr_store, sizeof(char)*ctx->hdr_store_alloc);
}
memcpy(ctx->hdr_store + ctx->hdr_store_size, data + vosh_start, (size_t) (pck_size - vosh_start) );
ctx->hdr_store_size += pck_size - (u32) vosh_start;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
} else if (e != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Failed to parse VOS header: %s\n", gf_error_to_string(e) ));
} else {
u32 obj_size = (u32) gf_m4v_get_object_start(ctx->vparser);
if (vosh_start<0) vosh_start = 0;
vosh_end = start - (u8 *)data + obj_size;
vosh_end -= vosh_start;
mpgviddmx_check_pid(filter, ctx,(u32) vosh_end, data+vosh_start);
skip_pck = GF_TRUE;
assert(remain>=(s32) obj_size);
start += obj_size;
remain -= obj_size;
}
break;
case M4V_VOP_START_CODE:
case M4V_GOV_START_CODE:
break;
case M4V_VO_START_CODE:
case M4V_VISOBJ_START_CODE:
default:
if (vosh_start>=0) {
skip_pck = GF_TRUE;
assert(remain>=4);
start += 4;
remain -= 4;
}
break;
}
}
if (skip_pck) {
continue;
}
if (!ctx->opid) {
assert(remain>=4);
start += 4;
remain -= 4;
continue;
}
if (!ctx->is_playing) {
ctx->resume_from = (u32) ((char *)start - (char *)data);
return GF_OK;
}
//at this point, we no longer reaggregate packets
ctx->hdr_store_size = 0;
if (ctx->in_seek) {
u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num);
if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) {
//u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek;
ctx->in_seek = GF_FALSE;
}
}
//may happen that after all our checks, only 4 bytes are left, continue to store these 4 bytes
if (remain<5)
continue;
//good to go
gf_m4v_parser_reset(ctx->vparser, sc_type_forced ? forced_sc_type + 1 : 0);
size = 0;
e = gf_m4v_parse_frame(ctx->vparser, &ctx->dsi, &ftype, &tinc, &size, &fstart, &is_coded);
//true if we strip VO and VISOBJ assert(!fstart);
//we skipped bytes already in store + end of start code present in packet, so the size of the first object
//needs adjustement
if (bytes_from_store) {
size += bytes_from_store + hdr_offset;
}
if ((e == GF_EOS) && !ctx->input_is_au_end) {
u8 b3 = start[remain-3];
u8 b2 = start[remain-2];
u8 b1 = start[remain-1];
//we may have a startcode at the end of the packet, store it and don't dispatch the last 3 bytes !
if (!b1 || !b2 || !b3) {
copy_last_bytes = GF_TRUE;
assert(size >= 3);
size -= 3;
ctx->bytes_in_header = 3;
}
full_frame = GF_FALSE;
} else {
full_frame = GF_TRUE;
}
if (!is_coded) {
/*if prev is B and we're parsing a packed bitstream discard n-vop*/
if (ctx->forced_packed && ctx->b_frames) {
ctx->is_packed = GF_TRUE;
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to import at variable frame rate, skip*/
if (ctx->vfr) {
ctx->is_vfr = GF_TRUE;
mpgviddmx_update_time(ctx);
assert(remain>=size);
start += size;
remain -= (s32) size;
continue;
}
/*policy is to keep non coded frame (constant frame rate), add*/
}
if (ftype==2) {
//count number of B-frames since last ref
ctx->b_frames++;
ctx->nb_b++;
} else {
//flush all pending packets
mpgviddmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE, GF_FALSE);
//remeber the CTS of the last ref
ctx->last_ref_cts = ctx->cts;
if (ctx->max_b < ctx->b_frames) ctx->max_b = ctx->b_frames;
ctx->b_frames = 0;
if (ftype)
ctx->nb_p++;
else
ctx->nb_i++;
}
ctx->nb_frames++;
dst_pck = gf_filter_pck_new_alloc(ctx->opid, (u32) size, &pck_data);
if (!dst_pck) return GF_OUT_OF_MEM;
if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck);
//bytes come from both our store and the data packet
if (bytes_from_store) {
memcpy(pck_data, ctx->hdr_store+current, bytes_from_store);
assert(size >= bytes_from_store);
size -= bytes_from_store;
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset - bytes_from_store);
}
memcpy(pck_data + bytes_from_store, start, (size_t) size);
} else {
//bytes only come the data packet
memcpy(pck_data, start, (size_t) size);
if (byte_offset != GF_FILTER_NO_BO) {
gf_filter_pck_set_byte_offset(dst_pck, byte_offset + start - (u8 *) data);
}
}
assert(pck_data[0] == 0);
assert(pck_data[1] == 0);
assert(pck_data[2] == 0x01);
gf_filter_pck_set_framing(dst_pck, GF_TRUE, (full_frame || ctx->input_is_au_end) ? GF_TRUE : GF_FALSE);
gf_filter_pck_set_cts(dst_pck, ctx->cts);
gf_filter_pck_set_dts(dst_pck, ctx->dts);
if (ctx->input_is_au_start) {
ctx->input_is_au_start = GF_FALSE;
} else {
//we use the carousel flag temporarly to indicate the cts must be recomputed
gf_filter_pck_set_carousel_version(dst_pck, 1);
}
gf_filter_pck_set_sap(dst_pck, ftype ? GF_FILTER_SAP_NONE : GF_FILTER_SAP_1);
gf_filter_pck_set_duration(dst_pck, ctx->cur_fps.den);
if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE);
ctx->frame_started = GF_TRUE;
mpgviddmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE, GF_FALSE);
mpgviddmx_update_time(ctx);
if (!full_frame) {
if (copy_last_bytes) {
memcpy(ctx->hdr_store, start+remain-3, 3);
}
break;
}
assert(remain>=size);
start += size;
remain -= (s32) size;
}
gf_filter_pid_drop_packet(ctx->ipid);
return GF_OK;
}
| null | null | 195,063
|
259164032268704415953276140717238966823
| 535
|
fixed #1905
|
other
|
tensorflow
|
8a513cec4bec15961fbfdedcaa5376522980455c
| 1
|
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs,
const OpDef& op_def) {
FullTypeDef ft;
ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < op_def.output_arg_size(); i++) {
auto* t = ft.add_args();
*t = op_def.output_arg(i).experimental_full_type();
// Resolve dependent types. The convention for op registrations is to use
// attributes as type variables.
// See https://www.tensorflow.org/guide/create_op#type_polymorphism.
// Once the op signature can be defined entirely in FullType, this
// convention can be deprecated.
//
// Note: While this code performs some basic verifications, it generally
// assumes consistent op defs and attributes. If more complete
// verifications are needed, they should be done by separately, and in a
// way that can be reused for type inference.
for (int j = 0; j < t->args_size(); j++) {
auto* arg = t->mutable_args(i);
if (arg->type_id() == TFT_VAR) {
const auto* attr = attrs.Find(arg->s());
DCHECK(attr != nullptr);
if (attr->value_case() == AttrValue::kList) {
const auto& attr_list = attr->list();
arg->set_type_id(TFT_PRODUCT);
for (int i = 0; i < attr_list.type_size(); i++) {
map_dtype_to_tensor(attr_list.type(i), arg->add_args());
}
} else if (attr->value_case() == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), arg);
} else {
return Status(error::UNIMPLEMENTED,
absl::StrCat("unknown attribute type",
attrs.DebugString(), " key=", arg->s()));
}
arg->clear_s();
}
}
}
return ft;
}
| null | null | 195,067
|
127871006948263872569838116397374697164
| 48
|
Prevent null dereference read in `SpecializeType()`
For some adversarial protos, the attribute for a key might not exist.
PiperOrigin-RevId: 408382090
Change-Id: Ie7eabe532c9ff280fce5dce1f6cdb93c76c2e040
|
other
|
gpac
|
f1ae01d745200a258cdf62622f71754c37cb6c30
| 1
|
static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si)
{
s32 pps_id;
/*s->current_picture.reference= h->nal_ref_idc != 0;*/
gf_bs_read_ue_log(bs, "first_mb_in_slice");
si->slice_type = gf_bs_read_ue_log(bs, "slice_type");
if (si->slice_type > 9) return -1;
pps_id = gf_bs_read_ue_log(bs, "pps_id");
if (pps_id > 255)
return -1;
si->pps = &avc->pps[pps_id];
si->pps->id = pps_id;
if (!si->pps->slice_group_count)
return -2;
si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT];
if (!si->sps->log2_max_frame_num)
return -2;
si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num");
si->field_pic_flag = 0;
if (si->sps->frame_mbs_only_flag) {
/*s->picture_structure= PICT_FRAME;*/
}
else {
si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag");
if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag");
}
if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag)
si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id");
if (si->sps->poc_type == 0) {
si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb");
if (si->pps->pic_order_present && !si->field_pic_flag) {
si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom");
}
}
else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) {
si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0");
if ((si->pps->pic_order_present == 1) && !si->field_pic_flag)
si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1");
}
if (si->pps->redundant_pic_cnt_present) {
si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt");
}
return 0;
}
| null | null | 195,069
|
208044153905002020314691558039299026990
| 49
|
fixed #1900
|
other
|
tensorflow
|
e746adbfcfee15e9cfdb391ff746c765b99bdf9b
| 1
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
png::CommonFreeDecode(&decode);
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
Status status;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output);
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (!status.ok()) png::CommonFreeDecode(&decode);
OP_REQUIRES_OK(context, status);
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
| null | null | 195,073
|
252612233053847900650928075257334441435
| 95
|
Prevent use after free in `DecodePng` kernel.
We are cleaning up the memory in `decode` and then we are using an `OP_REQUIRES` to check an invariant on the `decode` data.
PiperOrigin-RevId: 409299145
Change-Id: I4eb93aaca52483eb202e89b78df07fbb2f6cb254
|
other
|
gpac
|
a69b567b8c95c72f9560c873c5ab348be058f340
| 1
|
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size)
{
#ifndef GPAC_DISABLE_AV_PARSERS
AV1State state;
u8 reserved;
GF_AV1Config *cfg;
if (!size) size = (u32) gf_bs_available(bs);
if (!size) return NULL;
cfg = gf_odf_av1_cfg_new();
gf_av1_init_state(&state);
state.config = cfg;
cfg->marker = gf_bs_read_int(bs, 1);
cfg->version = gf_bs_read_int(bs, 7);
cfg->seq_profile = gf_bs_read_int(bs, 3);
cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5);
cfg->seq_tier_0 = gf_bs_read_int(bs, 1);
cfg->high_bitdepth = gf_bs_read_int(bs, 1);
cfg->twelve_bit = gf_bs_read_int(bs, 1);
cfg->monochrome = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1);
cfg->chroma_sample_position = gf_bs_read_int(bs, 2);
reserved = gf_bs_read_int(bs, 3);
if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version));
gf_odf_av1_cfg_del(cfg);
return NULL;
}
cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1);
if (cfg->initial_presentation_delay_present) {
cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4);
} else {
/*reserved = */gf_bs_read_int(bs, 4);
cfg->initial_presentation_delay_minus_one = 0;
}
size -= 4;
while (size) {
u64 pos, obu_size;
ObuType obu_type;
GF_AV1_OBUArrayEntry *a;
pos = gf_bs_get_position(bs);
obu_size = 0;
if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos));
break;
}
assert(obu_size == gf_bs_get_position(bs) - pos);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos));
if (!av1_is_obu_header(obu_type)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos));
}
GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry);
if (!a) break;
a->obu = gf_malloc((size_t)obu_size);
if (!a->obu) {
gf_free(a);
break;
}
gf_bs_seek(bs, pos);
gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size);
a->obu_length = obu_size;
a->obu_type = obu_type;
gf_list_add(cfg->obu_array, a);
if (size<obu_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size));
break;
}
size -= (u32) obu_size;
}
gf_av1_reset_state(& state, GF_TRUE);
return cfg;
#else
return NULL;
#endif
}
| null | null | 195,074
|
94331888032186617444846251549702110835
| 83
|
fixed #1895
|
other
|
linux
|
c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
| 1
|
void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h, *g;
unsigned int i;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
c = &svm->vmcb->control;
h = &svm->vmcb01.ptr->control;
g = &svm->nested.ctl;
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] = h->intercepts[i];
if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
/*
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
* affect any interrupt we may want to inject; therefore,
* interrupt window vmexits are irrelevant to L0.
*/
vmcb_clr_intercept(c, INTERCEPT_VINTR);
}
/* We don't want to see VMMCALLs from a nested guest */
vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
/* If SMI is not intercepted, ignore guest SMI intercept as well */
if (!intercept_smi)
vmcb_clr_intercept(c, INTERCEPT_SMI);
}
| null | null | 195,082
|
336087104388780214431418689750592481904
| 40
|
KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
possible by making L0 intercept these instructions.
Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
and thus read/write portions of the host physical memory.
Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
Suggested-by: Paolo Bonzini <[email protected]>
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
other
|
tensorflow
|
5b491cd5e41ad63735161cec9c2a568172c8b6a3
| 1
|
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) {
CHECK_NOTNULL(a);
TensorBuffer* p = nullptr;
if (!TensorShape::IsValid(proto.tensor_shape())) return false;
if (proto.dtype() == DT_INVALID) return false;
TensorShape shape(proto.tensor_shape());
const int64_t N = shape.num_elements();
if (N > 0 && proto.dtype()) {
bool dtype_error = false;
if (!proto.tensor_content().empty()) {
const auto& content = proto.tensor_content();
CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N),
dtype_error = true, dtype_error = true);
} else {
CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N),
dtype_error = true, dtype_error = true);
}
if (dtype_error || p == nullptr) return false;
}
shape_ = shape;
set_dtype(proto.dtype());
UnrefIfNonNull(buf_);
buf_ = p;
// TODO(misard) add tracking of which kernels and steps are calling
// FromProto.
if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) {
LogMemory::RecordTensorAllocation("Unknown (from Proto)",
LogMemory::UNKNOWN_STEP_ID, *this);
}
return true;
}
| null | null | 195,083
|
293865285807198215508518477303207322028
| 31
|
Validate `proto.dtype()` before calling `set_dtype()`.
This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape.
PiperOrigin-RevId: 408369083
Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
|
other
|
flatpak
|
89ae9fe74c6d445bb1b3a40e568d77cf5de47e48
| 1
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (pivot_root), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| null | null | 195,085
|
75659961309132277975542703793572192184
| 250
|
run: Add cross-references for some other seccomp syscall filters
Signed-off-by: Simon McVittie <[email protected]>
|
other
|
tensorflow
|
35f0fabb4c178253a964d7aabdbb15c6a398b69a
| 1
|
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis,
ValueRange shapes, Location loc,
OpBuilder* builder) {
// First find the input shape with the largest rank.
SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found;
size_t maxRank = 0;
for (const auto &shape : llvm::enumerate(shapes)) {
auto found_shape = analysis.GetValueInfo(shape.value());
if (!found_shape) return {};
shapes_found.push_back(*found_shape);
maxRank = std::max(maxRank, found_shape->size());
}
SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions(
maxRank);
SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank);
for (const auto &shape : llvm::enumerate(shapes_found)) {
for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) {
// 1 dimensions don't contribute to the final result.
if (dim.value().isConstant(1)) continue;
// If it's not a 1 dimension it will be present in the result. Remember
// where it came from.
auto index = maxRank - dim.index() - 1;
if (!joined_dimensions[index]) {
joined_dimensions[index] = &dim.value();
shape_and_rank_for_dim[index] =
std::make_pair(shapes[shape.index()], shape.value().size());
continue;
}
// Bail if the dimensions are neither equal nor 1.
if (*joined_dimensions[index] != dim.value()) return {};
}
}
// If the output is the same as one of the inputs just return that.
if (llvm::is_splat(shape_and_rank_for_dim) &&
shape_and_rank_for_dim[0].first) {
return shape_and_rank_for_dim[0].first;
}
// Otherwise rematerialize the shape from the pieces we have.
SmallVector<Value> elements;
for (int i = 0; i != maxRank; ++i) {
// 1 dimensions are filtered above, recreate the constant.
if (!shape_and_rank_for_dim[i].first) {
auto one = builder->getIntegerAttr(
shapes[0].getType().cast<RankedTensorType>().getElementType(), 1);
elements.push_back(builder->create<ConstantOp>(loc, one));
continue;
}
// Extract from one of the shapes, accounting for the reverse indexing
// performed by broadcast.
Value index = builder->create<ConstantIndexOp>(
loc, i - maxRank + shape_and_rank_for_dim[i].second);
elements.push_back(builder->create<tensor::ExtractOp>(
loc, shape_and_rank_for_dim[i].first, index));
}
return Value(builder->create<tensor::FromElementsOp>(loc, elements));
}
| null | null | 195,091
|
30985650941665667852243683012863242334
| 57
|
Avoid Segfault for scalar shapes.
Calling tensor::FromElementsOp with an empty vector of elements and no type
causes a segfault. We need to let the FromElementsOp know which scalar type it
should have.
Also add back the DynamicBroadcastInDimOp canonicalization patterns, which
previously prevented this bug from happening.
Add a regression test that demonstrates the bug.
PiperOrigin-RevId: 417561444
Change-Id: I6d1d6cfb71aabbad6102422625a00bbe253ac95a
|
other
|
hermes
|
55e1b2343f4deb1a1b5726cfe1e23b2068217ff2
| 1
|
Literal *hermes::evalUnaryOperator(
UnaryOperatorInst::OpKind kind,
IRBuilder &builder,
Literal *operand) {
switch (kind) {
case UnaryOperatorInst::OpKind::MinusKind:
// Negate constant integers.
switch (operand->getKind()) {
case ValueKind::LiteralNumberKind:
if (auto *literalNum = llvh::dyn_cast<LiteralNumber>(operand)) {
auto V = -literalNum->getValue();
return builder.getLiteralNumber(V);
}
break;
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralNaN();
case ValueKind::LiteralBoolKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralNumber(-1);
} else { // evalIsFalse(operand)
return builder.getLiteralNegativeZero();
}
case ValueKind::LiteralNullKind:
return builder.getLiteralNegativeZero();
default:
break;
}
break;
case UnaryOperatorInst::OpKind::TypeofKind:
switch (operand->getKind()) {
case ValueKind::GlobalObjectKind:
case ValueKind::LiteralNullKind:
return builder.getLiteralString("object");
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralString("undefined");
case ValueKind::LiteralBoolKind:
return builder.getLiteralString("boolean");
case ValueKind::LiteralNumberKind:
return builder.getLiteralString("number");
case ValueKind::LiteralStringKind:
return builder.getLiteralString("string");
default:
llvm_unreachable("Invalid literal kind.");
}
break;
case UnaryOperatorInst::OpKind::BangKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralBool(false);
}
if (evalIsFalse(builder, operand)) {
return builder.getLiteralBool(true);
}
break;
case UnaryOperatorInst::OpKind::VoidKind:
return builder.getLiteralUndefined();
default:
break;
}
return nullptr;
}
| null | null | 195,092
|
97081669521512223508213999568273283816
| 64
|
Handle typeof applied to empty in InstSimplify
Summary:
Do not simplify `typeof` if it is applied to an invalid type. This
handles a case like the one in the added test, where `typeof` is called
on a literal empty in unreachable code.
Reviewed By: kodafb
Differential Revision: D31000173
fbshipit-source-id: 2d7f69cbcc9c1bb0a916585c07171089444c85dc
|
other
|
e2guardian
|
eae46a7e2a57103aadca903c4a24cca94dc502a2
| 1
|
int Socket::startSslClient(const std::string &certificate_path, String hostname)
{
if (isssl) {
stopSsl();
}
ERR_clear_error();
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ctx = SSL_CTX_new(SSLv23_client_method());
#else
ctx = SSL_CTX_new(TLS_client_method());
#endif
if (ctx == NULL) {
#ifdef NETDEBUG
std::cout << thread_id << "Error ssl context is null (check that openssl has been inited)" << std::endl;
#endif
log_ssl_errors("Error ssl context is null for %s", hostname.c_str());
return -1;
}
//set the timeout for the ssl session
if (SSL_CTX_set_timeout(ctx, 130l) < 1) {
SSL_CTX_free(ctx);
ctx = NULL;
return -1;
}
//load certs
ERR_clear_error();
if (certificate_path.length()) {
if (!SSL_CTX_load_verify_locations(ctx, NULL, certificate_path.c_str())) {
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load certificates from %s", certificate_path.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
} else if (!SSL_CTX_set_default_verify_paths(ctx)) //use default if no certPpath given
{
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load default certificates for %s", hostname.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
// add validation params
ERR_clear_error();
X509_VERIFY_PARAM *x509_param = X509_VERIFY_PARAM_new();
if (!x509_param) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
//X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!X509_VERIFY_PARAM_set_flags(x509_param, X509_V_FLAG_TRUSTED_FIRST)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!SSL_CTX_set1_param(ctx, x509_param)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
X509_VERIFY_PARAM_free(x509_param); // try not freeing this as SSL_CTX_free seems to be ring to free it
//hand socket over to ssl lib
ERR_clear_error();
ssl = SSL_new(ctx);
SSL_set_options(ssl, SSL_OP_ALL);
SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
SSL_set_connect_state(ssl);
//fcntl(this->getFD() ,F_SETFL, O_NONBLOCK); // blocking mode used currently
SSL_set_fd(ssl, this->getFD());
SSL_set_tlsext_host_name(ssl, hostname.c_str());
//make io non blocking as select wont tell us if we can do a read without blocking
//BIO_set_nbio(SSL_get_rbio(ssl),1l); // blocking mode used currently
//BIO_set_nbio(SSL_get_wbio(ssl),1l); // blocking mode used currently
ERR_clear_error();
int rc = SSL_connect(ssl);
if (rc < 0) {
log_ssl_errors("ssl_connect failed to %s", hostname.c_str());
#ifdef NETDEBUG
std::cout << thread_id << "ssl_connect failed with error " << SSL_get_error(ssl, rc) << std::endl;
#endif
// tidy up
SSL_free(ssl);
ssl = NULL;
SSL_CTX_free(ctx);
ctx = NULL;
return -3;
}
//should be safer to do this last as nothing will ever try to use a ssl socket that isnt fully setup
isssl = true;
issslserver = false;
return 0;
}
| null | null | 195,095
|
68333959152744630548805483157733462358
| 118
|
Fix bug #707 cert hostnames not being checked
- only happened when openssl v1.1 is used
|
other
|
tensorflow
|
3d89911481ba6ebe8c88c1c0b595412121e6c645
| 1
|
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
CHECK_GE(dtypes.size(), size_t{1});
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
DCHECK_EQ(arg_index, result_.nodes.size());
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
}
| null | null | 195,216
|
29878528959453575292588491883602084734
| 43
|
Eliminate `CHECK`-fail from `function.cc`.
PiperOrigin-RevId: 409414744
Change-Id: Ic854e12ab2edb88b165d32e2d632c4ee654d71ad
|
other
|
mruby
|
0849a2885f81cfd82134992c06df3ccd59052ac7
| 1
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
push();
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 14) {
n++;
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_vmassignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
| null | null | 195,218
|
181597252020936416012057949618374135930
| 168
|
codegen.c: stack position may be wrong on assignments.
When `[]=` access includes keyword arguments.
|
other
|
tmate-ssh-server
|
1c020d1f5ca462f5b150b46a027aaa1bbe3c9596
| 1
|
int main(int argc, char **argv, char **envp)
{
int opt;
while ((opt = getopt(argc, argv, "b:h:k:p:q:w:z:xv")) != -1) {
switch (opt) {
case 'b':
tmate_settings->bind_addr = xstrdup(optarg);
break;
case 'h':
tmate_settings->tmate_host = xstrdup(optarg);
break;
case 'k':
tmate_settings->keys_dir = xstrdup(optarg);
break;
case 'p':
tmate_settings->ssh_port = atoi(optarg);
break;
case 'q':
tmate_settings->ssh_port_advertized = atoi(optarg);
break;
case 'w':
tmate_settings->websocket_hostname = xstrdup(optarg);
break;
case 'z':
tmate_settings->websocket_port = atoi(optarg);
break;
case 'x':
tmate_settings->use_proxy_protocol = true;
break;
case 'v':
tmate_settings->log_level++;
break;
default:
usage();
return 1;
}
}
init_logging(tmate_settings->log_level);
setup_locale();
if (!tmate_settings->tmate_host)
tmate_settings->tmate_host = get_full_hostname();
cmdline = *argv;
cmdline_end = *envp;
tmate_preload_trace_lib();
tmate_catch_sigsegv();
tmate_init_rand();
if ((mkdir(TMATE_WORKDIR, 0701) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/sessions", 0703) < 0 && errno != EEXIST) ||
(mkdir(TMATE_WORKDIR "/jail", 0700) < 0 && errno != EEXIST))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
/* The websocket server needs to access the /session dir to rename sockets */
if ((chmod(TMATE_WORKDIR, 0701) < 0) ||
(chmod(TMATE_WORKDIR "/sessions", 0703) < 0) ||
(chmod(TMATE_WORKDIR "/jail", 0700) < 0))
tmate_fatal("Cannot prepare session in " TMATE_WORKDIR);
tmate_ssh_server_main(tmate_session,
tmate_settings->keys_dir, tmate_settings->bind_addr, tmate_settings->ssh_port);
return 0;
}
| null | null | 195,220
|
103962397187887394013536935095033198609
| 68
|
Harden /tmp/tmate directory
Suggested by Matthias Gerstner
|
other
|
pjproject
|
f74c1fc22b760d2a24369aa72c74c4a9ab985859
| 1
|
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess,
const void *pkt,
pj_size_t size)
{
const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt;
const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL;
const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL;
const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL;
const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL;
const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*)
rtcp_xr->buf;
unsigned pkt_len, rb_len;
if (rtcp_xr->common.pt != RTCP_XR)
return;
pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length);
if ((pkt_len + 1) > (size / 4))
return;
/* Parse report rpt_types */
while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len)
{
rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length);
/* Just skip any block with length == 0 (no report content) */
if (rb_len) {
switch (rb_hdr->bt) {
case BT_RR_TIME:
rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*) rb_hdr;
break;
case BT_DLRR:
rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*) rb_hdr;
break;
case BT_STATS:
rb_stats = (pjmedia_rtcp_xr_rb_stats*) rb_hdr;
break;
case BT_VOIP_METRICS:
rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*) rb_hdr;
break;
default:
break;
}
}
rb_hdr = (pjmedia_rtcp_xr_rb_header*)
((pj_int32_t*)rb_hdr + rb_len + 1);
}
/* Receiving RR Time */
if (rb_rr_time) {
/* Save LRR from NTP timestamp of the RR time block report */
sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) |
((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF);
/* Calculate RR arrival time for DLRR */
pj_get_timestamp(&sess->rx_lrr_time);
TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr,
(pj_uint32_t)(sess->rx_lrr_time.u64*65536/
sess->rtcp_session->ts_freq.u64)));
}
/* Receiving DLRR */
if (rb_dlrr) {
pj_uint32_t lrr, now, dlrr;
pj_uint64_t eedelay;
pjmedia_rtcp_ntp_rec ntp;
/* LRR is the middle 32bit of NTP. It has 1/65536 second
* resolution
*/
lrr = pj_ntohl(rb_dlrr->item.lrr);
/* DLRR is delay since LRR, also in 1/65536 resolution */
dlrr = pj_ntohl(rb_dlrr->item.dlrr);
/* Get current time, and convert to 1/65536 resolution */
pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp);
now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16);
/* End-to-end delay is (now-lrr-dlrr) */
eedelay = now - lrr - dlrr;
/* Convert end to end delay to usec (keeping the calculation in
* 64bit space)::
* sess->ee_delay = (eedelay * 1000) / 65536;
*/
if (eedelay < 4294) {
eedelay = (eedelay * 1000000) >> 16;
} else {
eedelay = (eedelay * 1000) >> 16;
eedelay *= 1000;
}
TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), "
"now=%p, rtt=%p",
lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536,
now, (pj_uint32_t)eedelay));
/* Only save calculation if "now" is greater than lrr, or
* otherwise rtt will be invalid
*/
if (now-dlrr >= lrr) {
unsigned rtt = (pj_uint32_t)eedelay;
/* Check that eedelay value really makes sense.
* We allow up to 30 seconds RTT!
*/
if (eedelay <= 30 * 1000 * 1000UL) {
/* "Normalize" rtt value that is exceptionally high.
* For such values, "normalize" the rtt to be three times
* the average value.
*/
if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0)
{
unsigned orig_rtt = rtt;
rtt = (unsigned)sess->stat.rtt.mean*3;
PJ_LOG(5,(sess->name,
"RTT value %d usec is normalized to %d usec",
orig_rtt, rtt));
}
TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt));
pj_math_stat_update(&sess->stat.rtt, rtt);
}
} else {
PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: "
"lrr=%p, now=%p, dlrr=%p (%d:%03dms), "
"diff=%d",
lrr, now, dlrr, dlrr/65536,
(dlrr%65536)*1000/65536,
dlrr-(now-lrr)));
}
}
/* Receiving Statistics Summary */
if (rb_stats) {
pj_uint8_t flags = rb_stats->header.specific;
pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum));
/* Range of packets sequence reported in this blocks */
sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq);
sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq);
/* Get flags of valid fields */
sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0;
sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0;
sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0;
sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0;
/* Fetch the reports info */
if (sess->stat.tx.stat_sum.l) {
sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost);
}
if (sess->stat.tx.stat_sum.d) {
sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup);
}
if (sess->stat.tx.stat_sum.j) {
sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min);
sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max);
sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean);
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter,
pj_ntohl(rb_stats->jitter_dev));
}
if (sess->stat.tx.stat_sum.t) {
sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min;
sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max;
sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean;
pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh,
pj_ntohl(rb_stats->toh_dev));
}
pj_gettimeofday(&sess->stat.tx.stat_sum.update);
}
/* Receiving VoIP Metrics */
if (rb_voip_mtc) {
sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate;
sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate;
sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den;
sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den;
sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur);
sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur);
sess->stat.tx.voip_mtc.rnd_trip_delay =
pj_ntohs(rb_voip_mtc->rnd_trip_delay);
sess->stat.tx.voip_mtc.end_sys_delay =
pj_ntohs(rb_voip_mtc->end_sys_delay);
/* signal & noise level encoded in two's complement form */
sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t)
((rb_voip_mtc->signal_lvl > 127)?
((int)rb_voip_mtc->signal_lvl - 256) :
rb_voip_mtc->signal_lvl);
sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t)
((rb_voip_mtc->noise_lvl > 127)?
((int)rb_voip_mtc->noise_lvl - 256) :
rb_voip_mtc->noise_lvl);
sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl;
sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin;
sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor;
sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor;
sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq;
sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq;
sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config;
sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom);
sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max);
sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max);
pj_gettimeofday(&sess->stat.tx.voip_mtc.update);
}
}
| null | null | 195,230
|
85948404708100571851941114580767602190
| 215
|
Merge pull request from GHSA-r374-qrwv-86hh
|
other
|
gpac
|
893fb99b606eebfae46cde151846a980e689039b
| 1
|
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc)
{
u8 idr_flag;
s32 slice, ret;
u32 nal_hdr;
AVCSliceInfo n_state;
gf_bs_enable_emulation_byte_removal(bs, GF_TRUE);
nal_hdr = gf_bs_read_u8(bs);
slice = 0;
memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo));
avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F;
n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3;
idr_flag = 0;
switch (n_state.nal_unit_type) {
case GF_AVC_NALU_ACCESS_UNIT:
case GF_AVC_NALU_END_OF_SEQ:
case GF_AVC_NALU_END_OF_STREAM:
ret = 1;
break;
case GF_AVC_NALU_SVC_SLICE:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
// slice buffer - read the info and compare.
/*ret = */svc_parse_slice(bs, avc, &n_state);
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
avc_compute_poc(&n_state);
if (avc->s_info.poc != n_state.poc) {
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 1;
}
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 0;
case GF_AVC_NALU_SVC_PREFIX_NALU:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
return 0;
case GF_AVC_NALU_IDR_SLICE:
case GF_AVC_NALU_NON_IDR_SLICE:
case GF_AVC_NALU_DP_A_SLICE:
case GF_AVC_NALU_DP_B_SLICE:
case GF_AVC_NALU_DP_C_SLICE:
slice = 1;
/* slice buffer - read the info and compare.*/
ret = avc_parse_slice(bs, avc, idr_flag, &n_state);
if (ret < 0) return ret;
ret = 0;
if (
((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE))
&& (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE)
) {
break;
}
if (avc->s_info.frame_num != n_state.frame_num) {
ret = 1;
break;
}
if (avc->s_info.field_pic_flag != n_state.field_pic_flag) {
ret = 1;
break;
}
if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) &&
(!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) {
ret = 1;
break;
}
assert(avc->s_info.sps);
if (avc->s_info.sps->poc_type == n_state.sps->poc_type) {
if (!avc->s_info.sps->poc_type) {
if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) {
ret = 1;
break;
}
if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) {
ret = 1;
break;
}
}
else if (avc->s_info.sps->poc_type == 1) {
if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) {
ret = 1;
break;
}
if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) {
ret = 1;
break;
}
}
}
if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) {
if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/
ret = 1;
break;
}
else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/
ret = 1;
break;
}
}
break;
case GF_AVC_NALU_SEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_PIC_PARAM:
avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEQ_PARAM_EXT:
avc->last_ps_idx = (s32) gf_bs_read_ue(bs);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEI:
case GF_AVC_NALU_FILLER_DATA:
return 0;
default:
if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1;
//To detect change of AU when multiple sps and pps in stream
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else
ret = 0;
break;
}
/* save _prev values */
if (ret && avc->s_info.sps) {
n_state.frame_num_offset_prev = avc->s_info.frame_num_offset;
if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0))
n_state.frame_num_prev = avc->s_info.frame_num;
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
}
if (slice)
avc_compute_poc(&n_state);
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return ret;
}
| null | null | 195,231
|
162649322913866055859668015587465898752
| 161
|
fixed #1902
|
other
|
tensorflow
|
97282c6d0d34476b6ba033f961590b783fa184cd
| 1
|
Status SetUnknownShape(const NodeDef* node, int output_port) {
shape_inference::ShapeHandle shape =
GetUnknownOutputShape(node, output_port);
InferenceContext* ctx = GetContext(node);
if (ctx == nullptr) {
return errors::InvalidArgument("Missing context");
}
ctx->set_output(output_port, shape);
return Status::OK();
}
| null | null | 195,233
|
9711966831872105766661377543363260224
| 10
|
Prevent a crash due to heap OOB write in grappler.
PiperOrigin-RevId: 408318417
Change-Id: If095feb8c001e3a8ac4a85b7387b81e8309df47d
|
other
|
tensorflow
|
dcc21c7bc972b10b6fb95c2fb0f4ab5a59680ec2
| 1
|
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values,
const FunctionDef::ArgAttrs* arg_attrs,
bool ints_on_device,
int64_t resource_arg_unique_id) {
bool is_type_list;
DataTypeVector dtypes;
TF_RETURN_IF_ERROR(
ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
if (dtypes.size() < size_t{1}) {
return errors::Internal("Expected a list of at least one dtype");
}
int arg_index = result_.nodes.size();
TF_RETURN_IF_ERROR(
AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes}));
// Creates dtypes.size() nodes in the graph.
for (size_t i = 0; i < dtypes.size(); ++i) {
TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i),
{true, arg_index, 0, false, {dtypes[i]}}));
DCHECK_EQ(arg_index, result_.nodes.size());
string name = arg_def.name();
if (dtypes.size() > 1) {
strings::StrAppend(&name, "_", i);
}
NodeDef* gnode = AddNode(name);
if (ints_on_device && dtypes[i] == DataType::DT_INT32) {
gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp);
} else {
gnode->set_op(FunctionLibraryDefinition::kArgOp);
}
DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i];
AddAttr("T", dtype, gnode);
AddAttr("index", arg_index, gnode);
if (resource_arg_unique_id >= 0) {
AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode);
}
if (arg_attrs) {
for (const auto& arg_attr : arg_attrs->attr()) {
AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr());
}
}
result_.arg_types.push_back(dtypes[i]);
++arg_index;
}
return Status::OK();
}
| null | null | 195,234
|
178235233323678897399616972223891327831
| 45
|
Eliminate debug `CHECK`-fail from `function.cc`
PiperOrigin-RevId: 409416119
Change-Id: I8376ee464d434e9b970ff0ad49edfdaa2a273cfe
|
other
|
ImageMagick
|
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
| 1
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
*options,
input_filename[MagickPathExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
ssize_t
c;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t) floor(bounds.x2-bounds.x1+0.5);
height=(size_t) floor(bounds.y2-bounds.y1+0.5);
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5);
page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5);
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MagickPathExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->resolution.x/2.0;
image->magick_rows*=image->resolution.y/2.0;
image->columns*=image->resolution.x/2.0;
image->rows*=image->resolution.y/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
| null | null | 195,237
|
64211844764718959430074467543645938607
| 257
|
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986)
* fix Division by zero in XMenuWidget() of MagickCore/widget.c
* Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c
* fix Division by zero in ReadEnhMetaFile() of coders/emf.c
* Resolve conflicts
* fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025
* fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299
Co-authored-by: zhailiangliang <[email protected]>
|
other
|
flatpak
|
e26ac7586c392b5eb35ff4609fe232c52523b2cf
| 1
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog)},
/* Useless old syscall */
{SCMP_SYS (uselib)},
/* Don't allow disabling accounting */
{SCMP_SYS (acct)},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt)},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl)},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key)},
{SCMP_SYS (keyctl)},
{SCMP_SYS (request_key)},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages)},
{SCMP_SYS (mbind)},
{SCMP_SYS (get_mempolicy)},
{SCMP_SYS (set_mempolicy)},
{SCMP_SYS (migrate_pages)},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare)},
{SCMP_SYS (mount)},
{SCMP_SYS (pivot_root)},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
};
struct
{
int scall;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open)},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace)}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| null | null | 195,238
|
153668292473734614743830700124675831253
| 240
|
run: Add an errno value to seccomp filters
At the moment, if we block a syscall we always make it fail with EPERM,
but this is risky: user-space libraries can start to use new replacements
for old syscalls at any time, and will often treat EPERM as a fatal error.
For new syscalls, we should make the syscall fail with ENOSYS, which is
indistinguishable from running on an older kernel and will cause fallback
to an older implementation, for example clone3() to clone().
In future we should probably move from EPERM to ENOSYS for some of the
syscalls we already block, but for now keep the status quo.
This is a prerequisite for fixing the vulnerability tracked as
GHSA-67h7-w3jq-vh4q.
Signed-off-by: Simon McVittie <[email protected]>
|
other
|
tensorflow
|
1b54cadd19391b60b6fcccd8d076426f7221d5e8
| 1
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec));
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
| null | null | 195,242
|
149037414387774871757110019685380440226
| 116
|
Add missing validation to sparse dense cwise ops.
PiperOrigin-RevId: 415543133
Change-Id: I5baf3284e919338afb96178c468ad3d3cb0d956c
|
other
|
gpac
|
cf6771c857eb9a290e2c19ddacfdd3ed98b27618
| 1
|
static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si)
{
s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0;
/*s->current_picture.reference= h->nal_ref_idc != 0;*/
gf_bs_read_ue_log(bs, "first_mb_in_slice");
si->slice_type = gf_bs_read_ue_log(bs, "slice_type");
if (si->slice_type > 9) return -1;
pps_id = gf_bs_read_ue_log(bs, "pps_id");
if (pps_id > 255) return -1;
si->pps = &avc->pps[pps_id];
if (!si->pps->slice_group_count) return -2;
si->sps = &avc->sps[si->pps->sps_id];
if (!si->sps->log2_max_frame_num) return -2;
avc->sps_active_idx = si->pps->sps_id;
avc->pps_active_idx = pps_id;
si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num");
si->field_pic_flag = 0;
si->bottom_field_flag = 0;
if (!si->sps->frame_mbs_only_flag) {
si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag");
if (si->field_pic_flag)
si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag");
}
if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag)
si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id");
if (si->sps->poc_type == 0) {
si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb");
if (si->pps->pic_order_present && !si->field_pic_flag) {
si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb");
}
}
else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) {
si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0");
if ((si->pps->pic_order_present == 1) && !si->field_pic_flag)
si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1");
}
if (si->pps->redundant_pic_cnt_present) {
si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt");
}
if (si->slice_type % 5 == GF_AVC_TYPE_B) {
gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag");
}
num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1;
num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1;
if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) {
Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag");
if (num_ref_idx_active_override_flag) {
num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1");
if (si->slice_type % 5 == GF_AVC_TYPE_B) {
num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1");
}
}
}
if (si->nal_unit_type == 20 || si->nal_unit_type == 21) {
//ref_pic_list_mvc_modification(); /* specified in Annex H */
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n"));
assert(0);
return -1;
}
else {
ref_pic_list_modification(bs, si->slice_type);
}
if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP))
|| (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) {
pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1);
}
if (si->nal_ref_idc != 0) {
dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE));
}
if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) {
gf_bs_read_ue_log(bs, "cabac_init_idc");
}
/*slice_qp_delta = */gf_bs_read_se(bs);
if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) {
if (si->slice_type % 5 == GF_AVC_TYPE_SP) {
gf_bs_read_int_log(bs, 1, "sp_for_switch_flag");
}
gf_bs_read_se_log(bs, "slice_qs_delta");
}
if (si->pps->deblocking_filter_control_present_flag) {
if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) {
gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2");
gf_bs_read_se_log(bs, "slice_beta_offset_div2");
}
}
if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) {
gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle");
}
return 0;
}
| null | null | 195,246
|
37130545727332639316837539685864368945
| 107
|
fixed #1898
|
other
|
tensorflow
|
955059813cc325dc1db5e2daa6221271406d4439
| 1
|
Node* Graph::AddNode(NodeDef node_def, Status* status) {
const OpRegistrationData* op_reg_data;
status->Update(ops_.LookUp(node_def.op(), &op_reg_data));
if (!status->ok()) return nullptr;
DataTypeVector inputs;
DataTypeVector outputs;
status->Update(
InOutTypesForNode(node_def, op_reg_data->op_def, &inputs, &outputs));
if (!status->ok()) {
*status = AttachDef(*status, node_def);
return nullptr;
}
Node::NodeClass node_class = op_reg_data->is_function_op
? Node::NC_FUNCTION_OP
: Node::GetNodeClassForOp(node_def.op());
if (op_reg_data->type_ctor != nullptr) {
VLOG(3) << "AddNode: found type constructor for " << node_def.name();
const auto ctor_type =
full_type::SpecializeType(AttrSlice(node_def), op_reg_data->op_def);
const FullTypeDef ctor_typedef = ctor_type.ValueOrDie();
if (ctor_typedef.type_id() != TFT_UNSET) {
*(node_def.mutable_experimental_type()) = ctor_typedef;
}
} else {
VLOG(3) << "AddNode: no type constructor for " << node_def.name();
}
Node* node = AllocateNode(std::make_shared<NodeProperties>(
&op_reg_data->op_def, std::move(node_def),
inputs, outputs, op_reg_data->fwd_type_fn),
nullptr, node_class);
return node;
}
| null | null | 195,261
|
96876716692604842973877099610961161682
| 36
|
Check for type inference error on node construction.
PiperOrigin-RevId: 409415804
Change-Id: Ieb6e020906b96f522bf8e2fa103715ddbbdc434a
|
other
|
pcre2
|
d4fa336fbcc388f89095b184ba6d99422cfc676c
| 1
|
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks)
{
DEFINE_COMPILER;
jump_list *found = NULL;
jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks;
sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX;
struct sljit_jump *jump = NULL;
PCRE2_SPTR ccbegin;
int compares, invertcmp, numberofcmps;
#if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16)
BOOL utf = common->utf;
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */
#ifdef SUPPORT_UNICODE
sljit_u32 unicode_status = 0;
int typereg = TMP1;
const sljit_u32 *other_cases;
sljit_uw typeoffset;
#endif /* SUPPORT_UNICODE */
/* Scanning the necessary info. */
cc++;
ccbegin = cc;
compares = 0;
if (cc[-1] & XCL_MAP)
{
min = 0;
cc += 32 / sizeof(PCRE2_UCHAR);
}
while (*cc != XCL_END)
{
compares++;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
if (c < min) min = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (c < min) min = c;
GETCHARINCTEST(c, cc);
if (c > max) max = c;
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_SAVE_CHAR;
#endif /* SUPPORT_UNICODE */
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_CLIST && *cc == XCL_PROP)
{
other_cases = PRIV(ucd_caseless_sets) + cc[1];
while (*other_cases != NOTACHAR)
{
if (*other_cases > max) max = *other_cases;
if (*other_cases < min) min = *other_cases;
other_cases++;
}
}
else
{
max = READ_CHAR_MAX;
min = 0;
}
switch(*cc)
{
case PT_ANY:
/* Any either accepts everything or ignored. */
if (cc[-1] == XCL_PROP)
{
compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE);
if (list == backtracks)
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
return;
}
break;
case PT_LAMP:
case PT_GC:
case PT_PC:
case PT_ALNUM:
unicode_status |= XCLASS_HAS_TYPE;
break;
case PT_SCX:
unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION;
if (cc[-1] == XCL_NOTPROP)
{
unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP;
break;
}
compares++;
/* Fall through */
case PT_SC:
unicode_status |= XCLASS_HAS_SCRIPT;
break;
case PT_SPACE:
case PT_PXSPACE:
case PT_WORD:
case PT_PXGRAPH:
case PT_PXPRINT:
case PT_PXPUNCT:
unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE;
break;
case PT_CLIST:
case PT_UCNC:
unicode_status |= XCLASS_SAVE_CHAR;
break;
case PT_BOOL:
unicode_status |= XCLASS_HAS_BOOL;
break;
case PT_BIDICL:
unicode_status |= XCLASS_HAS_BIDICL;
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
}
SLJIT_ASSERT(compares > 0);
/* We are not necessary in utf mode even in 8 bit mode. */
cc = ccbegin;
if ((cc[-1] & XCL_NOT) != 0)
read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
{
#ifdef SUPPORT_UNICODE
read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0);
#else /* !SUPPORT_UNICODE */
read_char(common, min, max, NULL, 0);
#endif /* SUPPORT_UNICODE */
}
if ((cc[-1] & XCL_HASPROP) == 0)
{
if ((cc[-1] & XCL_MAP) != 0)
{
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found))
{
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO));
}
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
JUMPHERE(jump);
cc += 32 / sizeof(PCRE2_UCHAR);
}
else
{
OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min);
add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min));
}
}
else if ((cc[-1] & XCL_MAP) != 0)
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#ifdef SUPPORT_UNICODE
unicode_status |= XCLASS_CHAR_SAVED;
#endif /* SUPPORT_UNICODE */
if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list))
{
#if PCRE2_CODE_UNIT_WIDTH == 8
jump = NULL;
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO));
#if PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf)
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
JUMPHERE(jump);
}
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
cc += 32 / sizeof(PCRE2_UCHAR);
}
#ifdef SUPPORT_UNICODE
if (unicode_status & XCLASS_NEEDS_UCD)
{
if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 32
if (!common->utf)
{
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR);
JUMPHERE(jump);
}
#endif /* PCRE2_CODE_UNIT_WIDTH == 32 */
OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2));
OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1);
OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2);
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
ccbegin = cc;
if (unicode_status & XCLASS_HAS_BIDICL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BIDICL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]);
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_BOOL)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_BOOL)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT)
{
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
switch (*cc)
{
case PT_SCX:
if (cc[-1] == XCL_NOTPROP)
break;
/* Fall through */
case PT_SC:
compares--;
invertcmp = (compares == 0 && list != backtracks);
if (cc[-1] == XCL_NOTPROP)
invertcmp ^= 0x1;
add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]));
}
cc += 2;
}
}
cc = ccbegin;
}
if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION)
{
OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2);
if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP)
{
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
{
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0;
}
else
{
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0);
unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR;
}
}
OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script));
}
while (*cc != XCL_END)
{
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
GETCHARINCTEST(c, cc);
}
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
cc++;
if (*cc == PT_SCX)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (cc[-1] == XCL_NOTPROP)
{
jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]);
if (invertcmp)
{
add_jump(compiler, backtracks, jump);
jump = NULL;
}
invertcmp ^= 0x1;
}
OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f));
add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp));
if (jump != NULL)
JUMPHERE(jump);
}
cc += 2;
}
}
if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR)
OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0);
cc = ccbegin;
}
if (unicode_status & XCLASS_SAVE_CHAR)
OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0);
if (unicode_status & XCLASS_HAS_TYPE)
{
if (unicode_status & XCLASS_SAVE_CHAR)
typereg = RETURN_ADDR;
OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype));
}
}
#endif /* SUPPORT_UNICODE */
/* Generating code. */
charoffset = 0;
numberofcmps = 0;
#ifdef SUPPORT_UNICODE
typeoffset = 0;
#endif /* SUPPORT_UNICODE */
while (*cc != XCL_END)
{
compares--;
invertcmp = (compares == 0 && list != backtracks);
jump = NULL;
if (*cc == XCL_SINGLE)
{
cc ++;
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
else if (*cc == XCL_RANGE)
{
cc ++;
GETCHARINCTEST(c, cc);
SET_CHAR_OFFSET(c);
GETCHARINCTEST(c, cc);
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
else
{
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
numberofcmps = 0;
}
}
#ifdef SUPPORT_UNICODE
else
{
SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
if (*cc == XCL_NOTPROP)
invertcmp ^= 0x1;
cc++;
switch(*cc)
{
case PT_ANY:
if (!invertcmp)
jump = JUMP(SLJIT_JUMP);
break;
case PT_LAMP:
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_GC:
c = PRIV(ucp_typerange)[(int)cc[1] * 2];
SET_TYPE_OFFSET(c);
jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c);
break;
case PT_PC:
jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset);
break;
case PT_SC:
case PT_SCX:
case PT_BOOL:
case PT_BIDICL:
compares++;
/* Do nothing. */
break;
case PT_SPACE:
case PT_PXSPACE:
SET_CHAR_OFFSET(9);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_TYPE_OFFSET(ucp_Zl);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_WORD:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
/* Fall through. */
case PT_ALNUM:
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Nd);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_CLIST:
other_cases = PRIV(ucd_caseless_sets) + cc[1];
/* At least three characters are required.
Otherwise this case would be handled by the normal code path. */
SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR);
SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]);
/* Optimizing character pairs, if their difference is power of 2. */
if (is_powerof2(other_cases[1] ^ other_cases[0]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
other_cases += 2;
}
else if (is_powerof2(other_cases[2] ^ other_cases[1]))
{
if (charoffset == 0)
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]);
else
{
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset));
OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
other_cases += 3;
}
else
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
}
while (*other_cases != NOTACHAR)
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
}
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_UCNC:
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset));
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_CHAR_OFFSET(0xa0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset));
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_PXGRAPH:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPRINT:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
break;
case PT_PXPUNCT:
SET_TYPE_OFFSET(ucp_Sc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc);
OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f);
OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Pc);
OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc);
OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
default:
SLJIT_UNREACHABLE();
break;
}
cc += 2;
}
#endif /* SUPPORT_UNICODE */
if (jump != NULL)
add_jump(compiler, compares > 0 ? list : backtracks, jump);
}
if (found != NULL)
set_jumps(found, LABEL());
}
| null | null | 195,264
|
308941058134977243475634714405852587736
| 734
|
Fix incorrect value reading in JIT.
|
other
|
tensorflow
|
0a365c029e437be0349c31f8d4c9926b69fa3fa1
| 1
|
bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node,
const GraphProperties& properties) {
// Push down multiplication on ConvND.
// * ConvND
// / \ / \
// ConvND C2 -- > X *
// / \ / \
// X C1 C1 C2
//
// where C1 and C2 are constants and X is non-constant.
//
// TODO(rmlarsen): Use PrepareConstantPushDown() to simplify this code.
if (!IsAnyMul(*node) || NumNonControlInputs(*node) != 2) return false;
NodeDef* mul_left_child = node_map_->GetNode(node->input(0));
NodeDef* mul_right_child = node_map_->GetNode(node->input(1));
// One child must be constant, and the second must be Conv op.
const bool left_child_is_constant = IsReallyConstant(*mul_left_child);
const bool right_child_is_constant = IsReallyConstant(*mul_right_child);
if (!left_child_is_constant && !right_child_is_constant) {
return false;
}
NodeDef* conv_node =
left_child_is_constant ? mul_right_child : mul_left_child;
if (!IsConv2D(*conv_node) && !IsConv3D(*conv_node)) {
return false;
}
if (node->device() != mul_left_child->device() ||
node->device() != mul_right_child->device()) {
return false;
}
// Make sure that it is safe to change the value of the convolution
// output.
if (conv_node->input_size() < 2 ||
NumNonControlOutputs(*conv_node, *node_map_) > 1 ||
nodes_to_preserve_.find(conv_node->name()) != nodes_to_preserve_.end()) {
return false;
}
// Identify the nodes to swap.
NodeDef* conv_left_child = node_map_->GetNode(conv_node->input(0));
NodeDef* conv_right_child = node_map_->GetNode(conv_node->input(1));
const bool conv_left_is_constant = IsReallyConstant(*conv_left_child);
const bool conv_right_is_constant = IsReallyConstant(*conv_right_child);
if (!conv_left_is_constant && !conv_right_is_constant) {
// At least one of the convolution inputs should be constant.
return false;
}
if (conv_left_is_constant && conv_right_is_constant) {
// Leverage regular constant folding to handle this.
return false;
}
const auto& mul_props = properties.GetOutputProperties(node->name());
const auto& conv_props = properties.GetOutputProperties(conv_node->name());
if (mul_props.empty() || conv_props.empty()) {
return false;
}
const auto& mul_shape = mul_props[0].shape();
const auto& conv_shape = conv_props[0].shape();
if (!ShapesSymbolicallyEqual(mul_shape, conv_shape)) {
return false;
}
const auto& input_props = properties.GetInputProperties(conv_node->name());
if (input_props.size() < 2) {
return false;
}
const auto& filter_shape = input_props[1].shape();
NodeDef* const_node =
left_child_is_constant ? mul_left_child : mul_right_child;
const auto& const_props = properties.GetOutputProperties(const_node->name());
if (const_props.empty()) {
return false;
}
const auto& const_shape = const_props[0].shape();
if (!IsValidConstShapeForMulConvPushDown(
conv_node->attr().at("data_format").s(), filter_shape, const_shape)) {
return false;
}
string mul_new_name = AddPrefixToNodeName("merged_input", conv_node->name());
if (node_map_->NodeExists(mul_new_name)) {
return false;
}
// Make sure we don't introduce loops in the graph by removing control
// dependencies from the conv2d node to c2.
string conv_const_input =
conv_left_is_constant ? conv_node->input(0) : conv_node->input(1);
if (MaybeRemoveControlInput(conv_node->name(), const_node, optimized_graph,
node_map_.get())) {
// Add a control dep from c1 to c2 to ensure c2 is in the right frame
MaybeAddControlInput(conv_const_input, const_node, optimized_graph,
node_map_.get());
}
conv_node->set_name(node->name());
node->set_name(mul_new_name);
if (conv_left_is_constant) {
node_map_->UpdateInput(conv_node->name(), node->input(0), mul_new_name);
conv_node->set_input(0, mul_new_name);
} else {
node_map_->UpdateInput(conv_node->name(), node->input(1), mul_new_name);
conv_node->set_input(1, mul_new_name);
}
NodeDef* conv_const_node =
conv_left_is_constant ? conv_left_child : conv_right_child;
if (left_child_is_constant) {
node->set_input(1, conv_const_node->name());
} else {
node->set_input(0, conv_const_node->name());
}
node_map_->AddNode(mul_new_name, node);
return true;
}
| null | null | 195,274
|
4285713778775675854453316748837747586
| 118
|
Prevent null pointer dereference in constant folding.
Under certain conditions, an invalid protobuf saved model with invalid nodes would be loaded. During optimization phase, Grappler optimizer will then dereference a null pointer.
PiperOrigin-RevId: 409683530
Change-Id: I1f10340a7ec384bc9bc587300390f1078cf5caa0
|
other
|
tensorflow
|
adbbabdb0d3abb3cdeac69e38a96de1d678b24b3
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& indices = context->input(0);
const Tensor& values = context->input(1);
const Tensor& shape = context->input(2);
const Tensor& weights = context->input(3);
bool use_weights = weights.NumElements() > 0;
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()),
errors::InvalidArgument(
"Input indices must be a 2-dimensional tensor. Got: ",
indices.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(values.shape()),
errors::InvalidArgument("Input values must be a vector. Got: ",
values.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(shape.shape()),
errors::InvalidArgument("Input shape must be a vector. Got: ",
shape.shape().DebugString()));
OP_REQUIRES(context,
values.shape().dim_size(0) == indices.shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices.",
"Got ", values.shape().dim_size(0),
" values, indices shape: ", indices.shape().DebugString()));
OP_REQUIRES(
context, shape.shape().dim_size(0) == indices.shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices.",
"Got ", shape.shape().dim_size(0),
" dimensions, indices shape: ", indices.shape().DebugString()));
OP_REQUIRES(context, shape.NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
bool is_1d = shape.NumElements() == 1;
auto shape_vector = shape.flat<int64_t>();
int num_batches = is_1d ? 1 : shape_vector(0);
int num_values = values.NumElements();
const auto indices_values = indices.matrix<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
for (int idx = 0; idx < num_values; ++idx) {
int batch = is_1d ? 0 : indices_values(idx, 0);
if (batch >= num_batches) {
OP_REQUIRES(context, batch < num_batches,
errors::InvalidArgument(
"Indices value along the first dimension must be ",
"lower than the first index of the shape.", "Got ",
batch, " as batch and ", num_batches,
" as the first dimension of the shape."));
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch][value] = 1;
} else if (use_weights) {
per_batch_counts[batch][value] += weight_values(idx);
} else {
per_batch_counts[batch][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
| null | null | 195,289
|
240895495183406743805424775757256484081
| 84
|
Further validate sparse tensor for `SparseCount`: indices must be valid within dense shape.
PiperOrigin-RevId: 414888122
Change-Id: I4552bd74c135ecd4bcb5448acc0a3ce9402d8286
|
other
|
tensorflow
|
ef1d027be116f25e25bb94a60da491c2cf55bd0b
| 1
|
void Compute(OpKernelContext* context) override {
const Tensor& rhs = context->input(1);
// We always return the input ref.
context->forward_ref_input_to_ref_output(0, 0);
// We can't always know how this value will be used downstream, so make
// conservative assumptions in specifying constraints on the memory
// allocation attributes, unless the Grappler graph analysis determined that
// it was safe not to.
AllocatorAttributes attr;
if (!relax_constraints_) {
attr.set_gpu_compatible(true);
attr.set_nic_compatible(true);
}
{
mutex_lock l(*context->input_ref_mutex(0));
const Tensor& old_lhs = context->mutable_input(0, /* lock_held */ true);
const bool same_shape = old_lhs.shape().IsSameSize(rhs.shape());
if (validate_shape_) {
OP_REQUIRES(context, same_shape,
errors::InvalidArgument(
"Assign requires shapes of both tensors to match. "
"lhs shape= ",
old_lhs.shape().DebugString(),
" rhs shape= ", rhs.shape().DebugString()));
}
// In the code below we try to minimize the amount of memory allocation
// and copying by trying the following two shortcuts:
// 1. If the lhs is initialized and has the same number of elements as
// the rhs we can avoid a memory allocation.
// 2. If we can reuse the rhs buffer we avoid both a memory allocation
// and copying.
// 1. Try to copy into an existing buffer.
if (old_lhs.IsInitialized() &&
old_lhs.shape().num_elements() == rhs.shape().num_elements()) {
// The existing lhs tensor has already been initialized and the right
// hand side can fit in the underlying buffer.
Tensor reshaped_old_lhs;
if (same_shape) {
reshaped_old_lhs = old_lhs;
} else {
CHECK(reshaped_old_lhs.CopyFrom(old_lhs, rhs.shape()));
context->replace_ref_input(0, reshaped_old_lhs,
/* lock_held */ true);
}
if (use_exclusive_lock_) {
Copy(context, &reshaped_old_lhs, rhs);
return;
}
} else {
// 2. Try to reuse the rhs.
std::unique_ptr<Tensor> input_alias = context->forward_input(
1, OpKernelContext::Params::kNoReservation /*output_index*/,
rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr);
if (input_alias != nullptr) {
// Update the ref to point to the new buffer.
context->replace_ref_input(0, *input_alias, /* lock_held */ true);
return;
}
// Otherwise, create a new tensor whose shape matches the
// right hand side, hand off to lhs and copy the rhs into it.
Tensor copy_tensor;
OP_REQUIRES_OK(context,
context->allocate_temp(old_lhs.dtype(), rhs.shape(),
©_tensor, attr));
// We track memory of variables in variable ops instead of in this
// assign op.
context->clear_recorded_memory();
context->replace_ref_input(0, copy_tensor, /* lock_held */ true);
if (use_exclusive_lock_) {
Copy(context, ©_tensor, rhs);
return;
}
}
}
// The tensor has already been initialized and the right hand side
// matches the left hand side's shape. We have been told to do the
// copy outside the lock.
Tensor old_unlocked_lhs = context->mutable_input(0, /* lock_held */ false);
Copy(context, &old_unlocked_lhs, rhs);
}
| null | null | 195,291
|
97167582643884347139190040205263847517
| 87
|
Prevent copying uninitialized data in `AssignOp`.
This prevents harder to debug undefined behaviors that cannot be traced back to the original tensor after assignments occur earlier in the graph execution. Several of these undefined behaviors are just reference bindings to null pointers, which are caught when running under ubsan/asan.
PiperOrigin-RevId: 408654780
Change-Id: Iad2ec40d43f5fd7ea016c20283356c12d5ddeab1
|
other
|
mruby
|
ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
| 1
|
gen_hash(codegen_scope *s, node *tree, int val, int limit)
{
int slimit = GEN_VAL_STACK_MAX;
if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX;
int len = 0;
mrb_bool update = FALSE;
while (tree) {
if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) {
if (len > 0) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
}
codegen(s, tree->car->cdr, val);
if (len > 0 || update) {
pop(); pop();
genop_1(s, OP_HASHCAT, cursp());
push();
}
update = TRUE;
len = 0;
}
else {
codegen(s, tree->car->car, val);
codegen(s, tree->car->cdr, val);
len++;
}
tree = tree->cdr;
if (val && cursp() >= slimit) {
pop_n(len*2);
if (!update) {
genop_2(s, OP_HASH, cursp(), len);
}
else {
pop();
genop_2(s, OP_HASHADD, cursp(), len);
}
push();
update = TRUE;
len = 0;
}
}
if (update) {
if (val && len > 0) {
pop_n(len*2+1);
genop_2(s, OP_HASHADD, cursp(), len);
push();
}
return -1; /* variable length */
}
return len;
}
| null | null | 195,293
|
179799278733408012150569492196546099231
| 59
|
codegen.c: fixed a bug in hash code generation with `!val`.
|
other
|
tensorflow
|
f57315566d7094f322b784947093406c2aea0d7d
| 1
|
void Compute(OpKernelContext* ctx) override {
StagingMap<Ordered>* map = nullptr;
OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map));
core::ScopedUnref scope(map);
typename StagingMap<Ordered>::OptionalTuple tuple;
const Tensor* key_tensor;
const Tensor* indices_tensor;
OpInputList values_tensor;
OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor));
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor));
OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor));
OP_REQUIRES(ctx, key_tensor->NumElements() > 0,
errors::InvalidArgument("key must not be empty"));
// Create copy for insertion into Staging Area
Tensor key(*key_tensor);
// Create the tuple to store
for (std::size_t i = 0; i < values_tensor.size(); ++i) {
tuple.push_back(values_tensor[i]);
}
// Store the tuple in the map
OP_REQUIRES_OK(ctx, map->put(&key, indices_tensor, &tuple));
}
| null | null | 195,294
|
192365470107876975300656283112132229474
| 27
|
Add a check for Key being scalar tensor for MapStage and OrderedMapStage ops.
According to documentation[1][2], key must be int64 value, but this wasn't enforced and the ops would fail with check failure for non-scalar key value.
[1]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/ordered-map-stage
[2]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/map-stage
PiperOrigin-RevId: 413822112
Change-Id: I9d118faf990e6361900aa32272eff486ad9f0e2e
|
other
|
mruby
|
c8c083cb750606b2da81582cd8e43b442bb143e6
| 1
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
if (n == 14) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
push();
n = 15;
}
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
}
else {
pop_n(2);
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
push();
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 15) {
n++;
if (n == 15) {
pop_n(14);
genop_2(s, OP_ARRAY, cursp(), 15);
}
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_massignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
| null | null | 195,295
|
301285559812644229995992820467941638273
| 178
|
codegen.c: need to pack argument when `n==13` too.
Because we have extra 2 arguments coming (kw and rhs).
|
other
|
uWebSockets
|
03fca626a95130ab80f86adada54b29d27242759
| 1
|
void publish(Topic *iterator, size_t start, size_t stop, std::string_view topic, std::pair<std::string_view, std::string_view> message) {
/* If we already have 64 triggered topics make sure to drain it here */
if (numTriggeredTopics == 64) {
drain();
}
/* Iterate over all segments in given topic */
for (; stop != std::string::npos; start = stop + 1) {
stop = topic.find('/', start);
std::string_view segment = topic.substr(start, stop - start);
/* It is very important to disallow wildcards when publishing.
* We will not catch EVERY misuse this lazy way, but enough to hinder
* explosive recursion.
* Terminating wildcards MAY still get triggered along the way, if for
* instace the error is found late while iterating the topic segments. */
if (segment.length() == 1) {
if (segment[0] == '+' || segment[0] == '#') {
return;
}
}
/* Do we have a terminating wildcard child? */
if (iterator->terminatingWildcardChild) {
iterator->terminatingWildcardChild->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->terminatingWildcardChild->triggered) {
triggeredTopics[numTriggeredTopics++] = iterator->terminatingWildcardChild;
iterator->terminatingWildcardChild->triggered = true;
}
}
/* Do we have a wildcard child? */
if (iterator->wildcardChild) {
publish(iterator->wildcardChild, stop + 1, stop, topic, message);
}
std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment);
if (it == iterator->children.end()) {
/* Stop trying to match by exact string */
return;
}
iterator = it->second;
}
/* If we went all the way we matched exactly */
iterator->messages[messageId] = message;
/* Add this topic to triggered */
if (!iterator->triggered) {
triggeredTopics[numTriggeredTopics++] = iterator;
iterator->triggered = true;
}
}
| null | null | 195,296
|
277577479981175522099070212405779218657
| 56
|
Fix overflow of triggered topics
|
other
|
radare2
|
37897226a1a31f982bfefdc4aeefc2e50355c73c
| 1
|
R_API bool r_io_bank_map_add_top(RIO *io, const ut32 bankid, const ut32 mapid) {
RIOBank *bank = r_io_bank_get (io, bankid);
RIOMap *map = r_io_map_get (io, mapid);
r_return_val_if_fail (io && bank && map, false);
RIOMapRef *mapref = _mapref_from_map (map);
if (!mapref) {
return false;
}
RIOSubMap *sm = r_io_submap_new (io, mapref);
if (!sm) {
free (mapref);
return false;
}
RRBNode *entry = _find_entry_submap_node (bank, sm);
if (!entry) {
// no intersection with any submap, so just insert
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
bank->last_used = NULL;
RIOSubMap *bd = (RIOSubMap *)entry->data;
if (r_io_submap_to (bd) == r_io_submap_to (sm) &&
r_io_submap_from (bd) >= r_io_submap_from (sm)) {
// _find_entry_submap_node guarantees, that there is no submap
// prior to bd in the range of sm, so instead of deleting and inserting
// we can just memcpy
memcpy (bd, sm, sizeof (RIOSubMap));
free (sm);
r_list_append (bank->maprefs, mapref);
return true;
}
if (r_io_submap_from (bd) < r_io_submap_from (sm) &&
r_io_submap_to (sm) < r_io_submap_to (bd)) {
// split bd into 2 maps => bd and bdsm
RIOSubMap *bdsm = R_NEWCOPY (RIOSubMap, bd);
if (!bdsm) {
free (sm);
free (mapref);
return false;
}
r_io_submap_set_from (bdsm, r_io_submap_to (sm) + 1);
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
// TODO: insert and check return value, before adjusting sm size
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (bdsm);
free (mapref);
return false;
}
if (!r_crbtree_insert (bank->submaps, bdsm, _find_sm_by_from_vaddr_cb, NULL)) {
r_crbtree_delete (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL);
free (sm);
free (bdsm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
// guaranteed intersection
if (r_io_submap_from (bd) < r_io_submap_from (sm)) {
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
entry = r_rbnode_next (entry);
}
while (entry && r_io_submap_to (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
//delete all submaps that are completly included in sm
RRBNode *next = r_rbnode_next (entry);
// this can be optimized, there is no need to do search here
r_crbtree_delete (bank->submaps, entry->data, _find_sm_by_from_vaddr_cb, NULL);
entry = next;
}
if (entry && r_io_submap_from (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
bd = (RIOSubMap *)entry->data;
r_io_submap_set_from (bd, r_io_submap_to (sm) + 1);
}
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
| null | null | 195,302
|
166117759561842192488491492863600381325
| 89
|
Fix use-after-free in iobank rbtree usage ##io
* See havoc4 bin for reproducer
* Reported via huntr.dev by 'Cen Zhang'
|
other
|
flatpak
|
462fca2c666e0cd2b60d6d2593a7216a83047aaf
| 1
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
* Other useful resources:
* https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c
* https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (setns), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (umount), EPERM},
{SCMP_SYS (umount2), EPERM},
{SCMP_SYS (pivot_root), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
/* seccomp can't look into clone3()'s struct clone_args to check whether
* the flags are OK, so we have no choice but to block clone3().
* Return ENOSYS so user-space will fall back to clone().
* (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */
{SCMP_SYS (clone3), ENOSYS},
/* New mount manipulation APIs can also change our VFS. There's no
* legitimate reason to do these in the sandbox, so block all of them
* rather than thinking about which ones might be dangerous.
* (GHSA-67h7-w3jq-vh4q) */
{SCMP_SYS (open_tree), ENOSYS},
{SCMP_SYS (move_mount), ENOSYS},
{SCMP_SYS (fsopen), ENOSYS},
{SCMP_SYS (fsconfig), ENOSYS},
{SCMP_SYS (fsmount), ENOSYS},
{SCMP_SYS (fspick), ENOSYS},
{SCMP_SYS (mount_setattr), ENOSYS},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| null | null | 195,308
|
47398103208121456017040412728514126371
| 275
|
run: Don't allow chroot()
If we don't allow pivot_root() then there seems no reason why we should
allow chroot().
Partially fixes GHSA-67h7-w3jq-vh4q.
Signed-off-by: Simon McVittie <[email protected]>
|
other
|
squid
|
5e2ea2b13bd98f53e29964ca26bb0d602a8a12b9
| 1
|
gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
{
char *pos = inbuf;
char *lpos = NULL;
char *tline = NULL;
LOCAL_ARRAY(char, line, TEMP_BUF_SIZE);
LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE);
char *name = NULL;
char *selector = NULL;
char *host = NULL;
char *port = NULL;
char *escaped_selector = NULL;
const char *icon_url = NULL;
char gtype;
StoreEntry *entry = NULL;
memset(tmpbuf, '\0', TEMP_BUF_SIZE);
memset(line, '\0', TEMP_BUF_SIZE);
entry = gopherState->entry;
if (gopherState->conversion == GopherStateData::HTML_INDEX_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "Gopher Index %s", html_url);
storeAppendPrintf(entry,
"<p>This is a searchable Gopher index. Use the search\n"
"function of your browser to enter search terms.\n"
"<ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
if (gopherState->conversion == GopherStateData::HTML_CSO_PAGE) {
char *html_url = html_quote(entry->url());
gopherHTMLHeader(entry, "CSO Search of %s", html_url);
storeAppendPrintf(entry,
"<P>A CSO database usually contains a phonebook or\n"
"directory. Use the search function of your browser to enter\n"
"search terms.</P><ISINDEX>\n");
gopherHTMLFooter(entry);
/* now let start sending stuff to client */
entry->flush();
gopherState->HTML_header_added = 1;
return;
}
String outbuf;
if (!gopherState->HTML_header_added) {
if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT)
gopherHTMLHeader(entry, "CSO Search Result", NULL);
else
gopherHTMLHeader(entry, "Gopher Menu", NULL);
outbuf.append ("<PRE>");
gopherState->HTML_header_added = 1;
gopherState->HTML_pre = 1;
}
while (pos < inbuf + len) {
int llen;
int left = len - (pos - inbuf);
lpos = (char *)memchr(pos, '\n', left);
if (lpos) {
++lpos; /* Next line is after \n */
llen = lpos - pos;
} else {
llen = left;
}
if (gopherState->len + llen >= TEMP_BUF_SIZE) {
debugs(10, DBG_IMPORTANT, "GopherHTML: Buffer overflow. Lost some data on URL: " << entry->url() );
llen = TEMP_BUF_SIZE - gopherState->len - 1;
gopherState->overflowed = true; // may already be true
}
if (!lpos) {
/* there is no complete line in inbuf */
/* copy it to temp buffer */
/* note: llen is adjusted above */
memcpy(gopherState->buf + gopherState->len, pos, llen);
gopherState->len += llen;
break;
}
if (gopherState->len != 0) {
/* there is something left from last tx. */
memcpy(line, gopherState->buf, gopherState->len);
memcpy(line + gopherState->len, pos, llen);
llen += gopherState->len;
gopherState->len = 0;
} else {
memcpy(line, pos, llen);
}
line[llen + 1] = '\0';
/* move input to next line */
pos = lpos;
/* at this point. We should have one line in buffer to process */
if (*line == '.') {
/* skip it */
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
switch (gopherState->conversion) {
case GopherStateData::HTML_INDEX_RESULT:
case GopherStateData::HTML_DIR: {
tline = line;
gtype = *tline;
++tline;
name = tline;
selector = strchr(tline, TAB);
if (selector) {
*selector = '\0';
++selector;
host = strchr(selector, TAB);
if (host) {
*host = '\0';
++host;
port = strchr(host, TAB);
if (port) {
char *junk;
port[0] = ':';
junk = strchr(host, TAB);
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\r');
if (junk)
*junk++ = 0; /* Chop port */
else {
junk = strchr(host, '\n');
if (junk)
*junk++ = 0; /* Chop port */
}
}
if ((port[1] == '0') && (!port[2]))
port[0] = 0; /* 0 means none */
}
/* escape a selector here */
escaped_selector = xstrdup(rfc1738_escape_part(selector));
switch (gtype) {
case GOPHER_DIRECTORY:
icon_url = mimeGetIconURL("internal-menu");
break;
case GOPHER_HTML:
case GOPHER_FILE:
icon_url = mimeGetIconURL("internal-text");
break;
case GOPHER_INDEX:
case GOPHER_CSO:
icon_url = mimeGetIconURL("internal-index");
break;
case GOPHER_IMAGE:
case GOPHER_GIF:
case GOPHER_PLUS_IMAGE:
icon_url = mimeGetIconURL("internal-image");
break;
case GOPHER_SOUND:
case GOPHER_PLUS_SOUND:
icon_url = mimeGetIconURL("internal-sound");
break;
case GOPHER_PLUS_MOVIE:
icon_url = mimeGetIconURL("internal-movie");
break;
case GOPHER_TELNET:
case GOPHER_3270:
icon_url = mimeGetIconURL("internal-telnet");
break;
case GOPHER_BIN:
case GOPHER_MACBINHEX:
case GOPHER_DOSBIN:
case GOPHER_UUENCODED:
icon_url = mimeGetIconURL("internal-binary");
break;
case GOPHER_INFO:
icon_url = NULL;
break;
case GOPHER_WWW:
icon_url = mimeGetIconURL("internal-link");
break;
default:
icon_url = mimeGetIconURL("internal-unknown");
break;
}
memset(tmpbuf, '\0', TEMP_BUF_SIZE);
if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
if (strlen(escaped_selector) != 0)
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
icon_url, escaped_selector, rfc1738_escape_part(host),
*port ? ":" : "", port, html_quote(name));
else
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
icon_url, rfc1738_escape_part(host), *port ? ":" : "",
port, html_quote(name));
} else if (gtype == GOPHER_INFO) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name));
} else {
if (strncmp(selector, "GET /", 5) == 0) {
/* WWW link */
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
} else if (gtype == GOPHER_WWW) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"%s\">%s</A>\n",
icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
} else {
/* Standard link */
snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
icon_url, host, gtype, escaped_selector, html_quote(name));
}
}
safe_free(escaped_selector);
outbuf.append(tmpbuf);
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
} else {
memset(line, '\0', TEMP_BUF_SIZE);
continue;
}
break;
} /* HTML_DIR, HTML_INDEX_RESULT */
case GopherStateData::HTML_CSO_RESULT: {
if (line[0] == '-') {
int code, recno;
char *s_code, *s_recno, *result;
s_code = strtok(line + 1, ":\n");
s_recno = strtok(NULL, ":\n");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
recno = atoi(s_recno);
if (code != 200)
break;
if (gopherState->cso_recno != recno) {
snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
gopherState->cso_recno = recno;
} else {
snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
}
outbuf.append(tmpbuf);
break;
} else {
int code;
char *s_code, *result;
s_code = strtok(line, ":");
result = strtok(NULL, "\n");
if (!result)
break;
code = atoi(s_code);
switch (code) {
case 200: {
/* OK */
/* Do nothing here */
break;
}
case 102: /* Number of matches */
case 501: /* No Match */
case 502: { /* Too Many Matches */
/* Print the message the server returns */
snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
outbuf.append(tmpbuf);
break;
}
}
}
break;
} /* HTML_CSO_RESULT */
default:
break; /* do nothing */
} /* switch */
} /* while loop */
if (outbuf.size() > 0) {
entry->append(outbuf.rawBuf(), outbuf.size());
/* now let start sending stuff to client */
entry->flush();
}
outbuf.clean();
return;
}
| null | null | 195,309
|
301420510121776719662993879220124365349
| 346
|
Improve handling of Gopher responses (#1022)
|
other
|
gpac
|
30ac5e5236b790accd1f25347eebf2dc8c6c1bcb
| 1
|
char *gf_text_get_utf8_line(char *szLine, u32 lineSize, FILE *txt_in, s32 unicode_type)
{
u32 i, j, len;
char *sOK;
char szLineConv[1024];
unsigned short *sptr;
memset(szLine, 0, sizeof(char)*lineSize);
sOK = gf_fgets(szLine, lineSize, txt_in);
if (!sOK) return NULL;
if (unicode_type<=1) {
j=0;
len = (u32) strlen(szLine);
for (i=0; i<len; i++) {
if (!unicode_type && (szLine[i] & 0x80)) {
/*non UTF8 (likely some win-CP)*/
if ((szLine[i+1] & 0xc0) != 0x80) {
szLineConv[j] = 0xc0 | ( (szLine[i] >> 6) & 0x3 );
j++;
szLine[i] &= 0xbf;
}
/*UTF8 2 bytes char*/
else if ( (szLine[i] & 0xe0) == 0xc0) {
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 3 bytes char*/
else if ( (szLine[i] & 0xf0) == 0xe0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
}
/*UTF8 4 bytes char*/
else if ( (szLine[i] & 0xf8) == 0xf0) {
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
szLineConv[j] = szLine[i];
i++;
j++;
} else {
i+=1;
continue;
}
}
szLineConv[j] = szLine[i];
j++;
}
szLineConv[j] = 0;
strcpy(szLine, szLineConv);
return sOK;
}
#ifdef GPAC_BIG_ENDIAN
if (unicode_type==3)
#else
if (unicode_type==2)
#endif
{
i=0;
while (1) {
char c;
if (!szLine[i] && !szLine[i+1]) break;
c = szLine[i+1];
szLine[i+1] = szLine[i];
szLine[i] = c;
i+=2;
}
}
sptr = (u16 *)szLine;
i = (u32) gf_utf8_wcstombs(szLineConv, 1024, (const unsigned short **) &sptr);
szLineConv[i] = 0;
strcpy(szLine, szLineConv);
/*this is ugly indeed: since input is UTF16-LE, there are many chances the gf_fgets never reads the \0 after a \n*/
if (unicode_type==3) gf_fgetc(txt_in);
return sOK;
}
| null | null | 195,328
|
326851394099143406887426305839598235668
| 84
|
fixed #1897
|
other
|
tensorflow
|
08d7b00c0a5a20926363849f611729f53f3ec022
| 1
|
Status ConcatShapeHelper(InferenceContext* c, int start_value_index,
int end_value_index, int dim_index) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused));
const Tensor* concat_dim_t = c->input_tensor(dim_index);
if (concat_dim_t == nullptr) {
// Return an unknown shape with same rank as inputs, or an unknown rank
// if no input's rank is known.
// Find rank.
int32_t rank = InferenceContext::kUnknownRank;
for (int i = start_value_index; i < end_value_index; ++i) {
if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i));
if (rank != InferenceContext::kUnknownRank) {
break;
}
}
if (rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return Status::OK();
} else if (rank == 0) {
return errors::InvalidArgument(
"Can't concatenate scalars (use tf.stack instead)");
} else {
for (int i = start_value_index; i < end_value_index; ++i) {
// Check that all the inputs are of the correct rank.
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused));
}
}
// Build result of <rank> different unknown dims.
std::vector<DimensionHandle> dims;
dims.reserve(rank);
for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim());
c->set_output(0, c->MakeShape(dims));
return Status::OK();
}
// Merge all the non-concat dims, and sum the concat dim to make an output
// shape.
int64_t concat_dim;
if (concat_dim_t->dtype() == DT_INT32) {
concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0));
} else {
concat_dim = concat_dim_t->flat<int64_t>()(0);
}
// Minimum required number of dimensions.
const int min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1;
ShapeHandle output_before;
ShapeHandle output_after;
ShapeHandle input = c->input(end_value_index - 1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before));
DimensionHandle output_middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
output_after = c->Scalar(); // no dimensions.
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after));
}
for (int i = end_value_index - 2; i >= start_value_index; --i) {
ShapeHandle before;
ShapeHandle after;
input = c->input(i);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before));
DimensionHandle middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after));
}
TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before));
TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle));
TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after));
}
ShapeHandle s;
TF_RETURN_IF_ERROR(
c->Concatenate(output_before, c->Vector(output_middle), &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s));
c->set_output(0, s);
return Status::OK();
}
| null | null | 195,331
|
199280692740720705081563144733122975970
| 87
|
Fix Segfault in Concat V2 shape function.
PiperOrigin-RevId: 412120654
Change-Id: I3ff915faea694f9ad8b00024e9af2de9909011be
|
other
|
gpac
|
b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
| 1
|
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 item_count, extent_count, i, j;
GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s;
ISOM_DECREASE_SIZE(ptr, 2)
ptr->offset_size = gf_bs_read_int(bs, 4);
ptr->length_size = gf_bs_read_int(bs, 4);
ptr->base_offset_size = gf_bs_read_int(bs, 4);
if (ptr->version == 1 || ptr->version == 2) {
ptr->index_size = gf_bs_read_int(bs, 4);
} else {
gf_bs_read_int(bs, 4);
}
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
item_count = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
item_count = gf_bs_read_u32(bs);
}
for (i = 0; i < item_count; i++) {
GF_ItemLocationEntry *location_entry = (GF_ItemLocationEntry *)gf_malloc(sizeof(GF_ItemLocationEntry));
if (!location_entry) return GF_OUT_OF_MEM;
gf_list_add(ptr->location_entries, location_entry);
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->item_ID = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
location_entry->item_ID = gf_bs_read_u32(bs);
}
if (ptr->version == 1 || ptr->version == 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->construction_method = gf_bs_read_u16(bs);
}
else {
location_entry->construction_method = 0;
}
ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) )
location_entry->data_reference_index = gf_bs_read_u16(bs);
location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
location_entry->original_base_offset = location_entry->base_offset;
#endif
ISOM_DECREASE_SIZE(ptr, 2)
extent_count = gf_bs_read_u16(bs);
location_entry->extent_entries = gf_list_new();
for (j = 0; j < extent_count; j++) {
GF_ItemExtentEntry *extent_entry = (GF_ItemExtentEntry *)gf_malloc(sizeof(GF_ItemExtentEntry));
if (!extent_entry) return GF_OUT_OF_MEM;
gf_list_add(location_entry->extent_entries, extent_entry);
if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) {
ISOM_DECREASE_SIZE(ptr, ptr->index_size)
extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size);
}
else {
extent_entry->extent_index = 0;
}
ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) )
extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size);
extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
extent_entry->original_extent_offset = extent_entry->extent_offset;
#endif
}
}
return GF_OK;
}
| null | null | 195,334
|
312103916874790257173149118731192381232
| 74
|
fixed #1890
|
other
|
gpac
|
5ce0c906ed8599d218036b18b78e8126a496f137
| 1
|
static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id)
{
GF_List *list = NULL, *alt_list = NULL;
GF_NALUFFParam *sl;
u32 i, count;
u32 crc = gf_crc_32(data, size);
if (ctx->codecid==GF_CODECID_HEVC) {
switch (ps_type) {
case GF_HEVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_HEVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_HEVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
default:
assert(0);
return;
}
} else if (ctx->codecid==GF_CODECID_VVC) {
switch (ps_type) {
case GF_VVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_VVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_VVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
case GF_VVC_NALU_DEC_PARAM:
if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new();
list = ctx->vvc_dci;
break;
case GF_VVC_NALU_APS_PREFIX:
if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new();
list = ctx->vvc_aps_pre;
break;
default:
assert(0);
return;
}
} else {
switch (ps_type) {
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
case GF_AVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_AVC_NALU_PIC_PARAM:
list = ctx->pps;
alt_list = ctx->pps_svc;
break;
case GF_AVC_NALU_SEQ_PARAM_EXT:
if (!ctx->sps_ext) ctx->sps_ext = gf_list_new();
list = ctx->sps_ext;
break;
default:
assert(0);
return;
}
}
sl = NULL;
count = gf_list_count(list);
for (i=0; i<count; i++) {
sl = gf_list_get(list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
//handle alt PPS list for SVC
if (!sl && alt_list) {
count = gf_list_count(alt_list);
for (i=0; i<count; i++) {
sl = gf_list_get(alt_list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
}
if (sl) {
//otherwise we keep this new param set
sl->data = gf_realloc(sl->data, size);
memcpy(sl->data, data, size);
sl->size = size;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
return;
}
//TODO we might want to purge the list after a while !!
GF_SAFEALLOC(sl, GF_NALUFFParam);
if (!sl) return;
sl->data = gf_malloc(sizeof(char) * size);
if (!sl->data) {
gf_free(sl);
return;
}
memcpy(sl->data, data, size);
sl->size = size;
sl->id = ps_id;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
gf_list_add(list, sl);
}
| null | null | 195,338
|
132316141068759731368278245393080210909
| 119
|
fixed #1892
|
other
|
tensorflow
|
e952a89b7026b98fe8cbe626514a93ed68b7c510
| 1
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument("Input sp_shape must be a vector. Got: ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
OP_REQUIRES(
ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", shape_t->shape().dim_size(0),
" dimensions, indices shape: ", indices_t->shape().DebugString()));
OP_REQUIRES(ctx, shape_t->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec));
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
| null | null | 195,340
|
314838534816877520594003996398254571856
| 129
|
Prevent overflow in sparse dense cwise ops.
PiperOrigin-RevId: 415543171
Change-Id: I22dab7c41be2121ab5efe5403ca0e2f9b7cb24b8
|
other
|
tensorflow
|
b9bd6cfd1c50e6807846af9a86f9b83cafc9c8ae
| 1
|
int64_t OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info,
bool* found_unknown_shapes) {
int64_t total_output_size = 0;
// Use float as default for calculations.
for (const auto& output : op_info.outputs()) {
DataType dt = output.dtype();
const auto& original_output_shape = output.shape();
int64_t output_size = DataTypeSize(BaseType(dt));
int num_dims = std::max(1, original_output_shape.dim_size());
auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims,
found_unknown_shapes);
for (const auto& dim : output_shape.dim()) {
output_size *= dim.size();
}
total_output_size += output_size;
VLOG(1) << "Output Size: " << output_size
<< " Total Output Size:" << total_output_size;
}
return total_output_size;
}
| null | null | 195,341
|
311478021714143493416324497647348491294
| 20
|
Prevent integer overflow in `OpLevelCostEstimator::CalculateOutputSize`.
In order to not change the API, we return a negative value in case of overflow. A better fix is to change the API to return a status instead.
PiperOrigin-RevId: 408701427
Change-Id: Idf31e7f0bf18ca824d084fdd355e1f653f145c20
|
other
|
tensorflow
|
002408c3696b173863228223d535f9de72a101a9
| 1
|
void Compute(OpKernelContext* context) override {
// Here's the basic idea:
// Batch and depth dimension are independent from row and col dimension. And
// because FractionalAvgPool currently only support pooling along row and
// col, we can basically think of this 4D tensor backpropagation as
// operation of a series of 2D planes.
//
// For each element of a 'slice' (2D plane) of output_backprop, we need to
// figure out its contributors when doing FractionalAvgPool operation. This
// can be done based on row_pooling_sequence, col_pooling_seq and
// overlapping.
// Once we figure out the original contributors, we just need to evenly
// divide the value of this element among these contributors.
//
// Internally, we divide the out_backprop tensor and store it in a temporary
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
// Grab the inputs.
const Tensor& orig_input_tensor_shape = context->input(0);
OP_REQUIRES(context,
orig_input_tensor_shape.dims() == 1 &&
orig_input_tensor_shape.NumElements() == 4,
errors::InvalidArgument("original input tensor shape must be"
"1-dimensional and 4 elements"));
const Tensor& out_backprop = context->input(1);
const Tensor& row_seq_tensor = context->input(2);
const Tensor& col_seq_tensor = context->input(3);
const int64_t out_batch = out_backprop.dim_size(0);
const int64_t out_rows = out_backprop.dim_size(1);
const int64_t out_cols = out_backprop.dim_size(2);
const int64_t out_depth = out_backprop.dim_size(3);
OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", row_seq_tensor must have at least ",
out_rows + 1, " elements, but got ",
row_seq_tensor.NumElements()));
OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", col_seq_tensor must have at least ",
out_cols + 1, " elements, but got ",
col_seq_tensor.NumElements()));
auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>();
auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>();
auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>();
const int64_t in_batch = orig_input_tensor_shape_flat(0);
const int64_t in_rows = orig_input_tensor_shape_flat(1);
const int64_t in_cols = orig_input_tensor_shape_flat(2);
const int64_t in_depth = orig_input_tensor_shape_flat(3);
OP_REQUIRES(
context, in_batch != 0,
errors::InvalidArgument("Batch dimension of input must not be 0"));
OP_REQUIRES(
context, in_rows != 0,
errors::InvalidArgument("Rows dimension of input must not be 0"));
OP_REQUIRES(
context, in_cols != 0,
errors::InvalidArgument("Columns dimension of input must not be 0"));
OP_REQUIRES(
context, in_depth != 0,
errors::InvalidArgument("Depth dimension of input must not be 0"));
constexpr int tensor_in_and_out_dims = 4;
// Transform orig_input_tensor_shape into TensorShape
TensorShape in_shape;
for (auto i = 0; i < tensor_in_and_out_dims; ++i) {
in_shape.AddDim(orig_input_tensor_shape_flat(i));
}
// Create intermediate in_backprop.
Tensor in_backprop_tensor_temp;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp(
{0}, DataTypeToEnum<double>::v(), in_shape,
&in_backprop_tensor_temp));
in_backprop_tensor_temp.flat<double>().setZero();
// Transform 4D tensor to 2D matrix.
EigenDoubleMatrixMap in_backprop_tensor_temp_mat(
in_backprop_tensor_temp.flat<double>().data(), in_depth,
in_cols * in_rows * in_batch);
ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(),
out_depth,
out_cols * out_rows * out_batch);
// Loop through each element of out_backprop and evenly distribute the
// element to the corresponding pooling cell.
const int64_t in_max_row_index = in_rows - 1;
const int64_t in_max_col_index = in_cols - 1;
for (int64_t b = 0; b < out_batch; ++b) {
for (int64_t r = 0; r < out_rows; ++r) {
const int64_t in_row_start = row_seq_tensor_flat(r);
int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1)
: row_seq_tensor_flat(r + 1) - 1;
in_row_end = std::min(in_row_end, in_max_row_index);
for (int64_t c = 0; c < out_cols; ++c) {
const int64_t in_col_start = col_seq_tensor_flat(c);
int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1)
: col_seq_tensor_flat(c + 1) - 1;
in_col_end = std::min(in_col_end, in_max_col_index);
const int64_t num_elements_in_pooling_cell =
(in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1);
const int64_t out_index = (b * out_rows + r) * out_cols + c;
// Now we can evenly distribute out_backprop(b, h, w, *) to
// in_backprop(b, hs:he, ws:we, *).
for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) {
for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) {
const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c;
// Walk through each channel (depth).
for (int64_t d = 0; d < out_depth; ++d) {
const double out_backprop_element = static_cast<double>(
out_backprop_mat.coeffRef(d, out_index));
double& in_backprop_ref =
in_backprop_tensor_temp_mat.coeffRef(d, in_index);
in_backprop_ref +=
out_backprop_element / num_elements_in_pooling_cell;
}
}
}
}
}
}
// Depending on the type, cast double to type T.
Tensor* in_backprop_tensor = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, in_shape, &in_backprop_tensor));
auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>();
auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>();
for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) {
in_backprop_tensor_flat(i) =
static_cast<T>(in_backprop_tensor_temp_flat(i));
}
}
| null | null | 195,343
|
317653769327632791074029336205336280427
| 141
|
Add negative bound check for row and column pooling_sequence in FractionalAvgPoolGrad op to avoid out of bound heap access
PiperOrigin-RevId: 413837346
Change-Id: I2b86034101df31bee161abcb781755e236c7bccd
|
other
|
flatpak
|
65cbfac982cb1c83993a9e19aa424daee8e9f042
| 1
|
flatpak_dir_ensure_bundle_remote (FlatpakDir *self,
GFile *file,
GBytes *extra_gpg_data,
FlatpakDecomposed **out_ref,
char **out_checksum,
char **out_metadata,
gboolean *out_created_remote,
GCancellable *cancellable,
GError **error)
{
g_autoptr(FlatpakDecomposed) ref = NULL;
gboolean created_remote = FALSE;
g_autoptr(GBytes) deploy_data = NULL;
g_autoptr(GVariant) metadata = NULL;
g_autofree char *origin = NULL;
g_autofree char *fp_metadata = NULL;
g_autofree char *basename = NULL;
g_autoptr(GBytes) included_gpg_data = NULL;
GBytes *gpg_data = NULL;
g_autofree char *to_checksum = NULL;
g_autofree char *remote = NULL;
g_autofree char *collection_id = NULL;
if (!flatpak_dir_ensure_repo (self, cancellable, error))
return NULL;
metadata = flatpak_bundle_load (file, &to_checksum,
&ref,
&origin,
NULL, &fp_metadata, NULL,
&included_gpg_data,
&collection_id,
error);
if (metadata == NULL)
return NULL;
gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data;
deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL);
if (deploy_data != NULL)
{
remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data));
/* We need to import any gpg keys because otherwise the pull will fail */
if (gpg_data != NULL)
{
g_autoptr(GKeyFile) new_config = NULL;
new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self));
if (!flatpak_dir_modify_remote (self, remote, new_config,
gpg_data, cancellable, error))
return NULL;
}
}
else
{
g_autofree char *id = flatpak_decomposed_dup_id (ref);
/* Add a remote for later updates */
basename = g_file_get_basename (file);
remote = flatpak_dir_create_origin_remote (self,
origin,
id,
basename,
flatpak_decomposed_get_ref (ref),
gpg_data,
collection_id,
&created_remote,
cancellable,
error);
if (remote == NULL)
return NULL;
}
if (out_created_remote)
*out_created_remote = created_remote;
if (out_ref)
*out_ref = g_steal_pointer (&ref);
if (out_checksum)
*out_checksum = g_steal_pointer (&to_checksum);
if (out_metadata)
*out_metadata = g_steal_pointer (&fp_metadata);
return g_steal_pointer (&remote);
}
| null | null | 195,385
|
228799429357940261115009589207592722070
| 89
|
Ensure that bundles have metadata on install
If we have a bundle without metadata we wouldn't properly present
the permissions in the transaction.
|
other
|
postgres
|
160c0258802d10b0600d7671b1bbea55d8e17d45
| 1
|
PQconnectPoll(PGconn *conn)
{
bool reset_connection_state_machine = false;
bool need_new_connection = false;
PGresult *res;
char sebuf[PG_STRERROR_R_BUFLEN];
int optval;
if (conn == NULL)
return PGRES_POLLING_FAILED;
/* Get the new data */
switch (conn->status)
{
/*
* We really shouldn't have been polled in these two cases, but we
* can handle it.
*/
case CONNECTION_BAD:
return PGRES_POLLING_FAILED;
case CONNECTION_OK:
return PGRES_POLLING_OK;
/* These are reading states */
case CONNECTION_AWAITING_RESPONSE:
case CONNECTION_AUTH_OK:
case CONNECTION_CHECK_WRITABLE:
case CONNECTION_CONSUME:
case CONNECTION_CHECK_STANDBY:
{
/* Load waiting data */
int n = pqReadData(conn);
if (n < 0)
goto error_return;
if (n == 0)
return PGRES_POLLING_READING;
break;
}
/* These are writing states, so we just proceed. */
case CONNECTION_STARTED:
case CONNECTION_MADE:
break;
/* Special cases: proceed without waiting. */
case CONNECTION_SSL_STARTUP:
case CONNECTION_NEEDED:
case CONNECTION_GSS_STARTUP:
case CONNECTION_CHECK_TARGET:
break;
default:
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("invalid connection state, probably indicative of memory corruption\n"));
goto error_return;
}
keep_going: /* We will come back to here until there is
* nothing left to do. */
/* Time to advance to next address, or next host if no more addresses? */
if (conn->try_next_addr)
{
if (conn->addr_cur && conn->addr_cur->ai_next)
{
conn->addr_cur = conn->addr_cur->ai_next;
reset_connection_state_machine = true;
}
else
conn->try_next_host = true;
conn->try_next_addr = false;
}
/* Time to advance to next connhost[] entry? */
if (conn->try_next_host)
{
pg_conn_host *ch;
struct addrinfo hint;
int thisport;
int ret;
char portstr[MAXPGPATH];
if (conn->whichhost + 1 < conn->nconnhost)
conn->whichhost++;
else
{
/*
* Oops, no more hosts.
*
* If we are trying to connect in "prefer-standby" mode, then drop
* the standby requirement and start over.
*
* Otherwise, an appropriate error message is already set up, so
* we just need to set the right status.
*/
if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY &&
conn->nconnhost > 0)
{
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2;
conn->whichhost = 0;
}
else
goto error_return;
}
/* Drop any address info for previous host */
release_conn_addrinfo(conn);
/*
* Look up info for the new host. On failure, log the problem in
* conn->errorMessage, then loop around to try the next host. (Note
* we don't clear try_next_host until we've succeeded.)
*/
ch = &conn->connhost[conn->whichhost];
/* Initialize hint structure */
MemSet(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
conn->addrlist_family = hint.ai_family = AF_UNSPEC;
/* Figure out the port number we're going to use. */
if (ch->port == NULL || ch->port[0] == '\0')
thisport = DEF_PGPORT;
else
{
if (!parse_int_param(ch->port, &thisport, conn, "port"))
goto error_return;
if (thisport < 1 || thisport > 65535)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid port number: \"%s\"\n"),
ch->port);
goto keep_going;
}
}
snprintf(portstr, sizeof(portstr), "%d", thisport);
/* Use pg_getaddrinfo_all() to resolve the address */
switch (ch->type)
{
case CHT_HOST_NAME:
ret = pg_getaddrinfo_all(ch->host, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
ch->host, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_HOST_ADDRESS:
hint.ai_flags = AI_NUMERICHOST;
ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not parse network address \"%s\": %s\n"),
ch->hostaddr, gai_strerror(ret));
goto keep_going;
}
break;
case CHT_UNIX_SOCKET:
#ifdef HAVE_UNIX_SOCKETS
conn->addrlist_family = hint.ai_family = AF_UNIX;
UNIXSOCK_PATH(portstr, thisport, ch->host);
if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
portstr,
(int) (UNIXSOCK_PATH_BUFLEN - 1));
goto keep_going;
}
/*
* NULL hostname tells pg_getaddrinfo_all to parse the service
* name as a Unix-domain socket path.
*/
ret = pg_getaddrinfo_all(NULL, portstr, &hint,
&conn->addrlist);
if (ret || !conn->addrlist)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
portstr, gai_strerror(ret));
goto keep_going;
}
#else
Assert(false);
#endif
break;
}
/* OK, scan this addrlist for a working server address */
conn->addr_cur = conn->addrlist;
reset_connection_state_machine = true;
conn->try_next_host = false;
}
/* Reset connection state machine? */
if (reset_connection_state_machine)
{
/*
* (Re) initialize our connection control variables for a set of
* connection attempts to a single server address. These variables
* must persist across individual connection attempts, but we must
* reset them when we start to consider a new server.
*/
conn->pversion = PG_PROTOCOL(3, 0);
conn->send_appname = true;
#ifdef USE_SSL
/* initialize these values based on SSL mode */
conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */
conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */
#endif
#ifdef ENABLE_GSS
conn->try_gss = (conn->gssencmode[0] != 'd'); /* "disable" */
#endif
reset_connection_state_machine = false;
need_new_connection = true;
}
/* Force a new connection (perhaps to the same server as before)? */
if (need_new_connection)
{
/* Drop any existing connection */
pqDropConnection(conn, true);
/* Reset all state obtained from old server */
pqDropServerData(conn);
/* Drop any PGresult we might have, too */
conn->asyncStatus = PGASYNC_IDLE;
conn->xactStatus = PQTRANS_IDLE;
conn->pipelineStatus = PQ_PIPELINE_OFF;
pqClearAsyncResult(conn);
/* Reset conn->status to put the state machine in the right state */
conn->status = CONNECTION_NEEDED;
need_new_connection = false;
}
/* Now try to advance the state machine for this connection */
switch (conn->status)
{
case CONNECTION_NEEDED:
{
/*
* Try to initiate a connection to one of the addresses
* returned by pg_getaddrinfo_all(). conn->addr_cur is the
* next one to try.
*
* The extra level of braces here is historical. It's not
* worth reindenting this whole switch case to remove 'em.
*/
{
struct addrinfo *addr_cur = conn->addr_cur;
char host_addr[NI_MAXHOST];
/*
* Advance to next possible host, if we've tried all of
* the addresses for the current host.
*/
if (addr_cur == NULL)
{
conn->try_next_host = true;
goto keep_going;
}
/* Remember current address for possible use later */
memcpy(&conn->raddr.addr, addr_cur->ai_addr,
addr_cur->ai_addrlen);
conn->raddr.salen = addr_cur->ai_addrlen;
/*
* Set connip, too. Note we purposely ignore strdup
* failure; not a big problem if it fails.
*/
if (conn->connip != NULL)
{
free(conn->connip);
conn->connip = NULL;
}
getHostaddr(conn, host_addr, NI_MAXHOST);
if (host_addr[0])
conn->connip = strdup(host_addr);
/* Try to create the socket */
conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0);
if (conn->sock == PGINVALID_SOCKET)
{
int errorno = SOCK_ERRNO;
/*
* Silently ignore socket() failure if we have more
* addresses to try; this reduces useless chatter in
* cases where the address list includes both IPv4 and
* IPv6 but kernel only accepts one family.
*/
if (addr_cur->ai_next != NULL ||
conn->whichhost + 1 < conn->nconnhost)
{
conn->try_next_addr = true;
goto keep_going;
}
emitHostIdentityInfo(conn, host_addr);
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not create socket: %s\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Once we've identified a target address, all errors
* except the preceding socket()-failure case should be
* prefixed with host-identity information. (If the
* connection succeeds, the contents of conn->errorMessage
* won't matter, so this is harmless.)
*/
emitHostIdentityInfo(conn, host_addr);
/*
* Select socket options: no delay of outgoing data for
* TCP sockets, nonblock mode, close-on-exec. Try the
* next address if any of this fails.
*/
if (!IS_AF_UNIX(addr_cur->ai_family))
{
if (!connectNoDelay(conn))
{
/* error message already created */
conn->try_next_addr = true;
goto keep_going;
}
}
if (!pg_set_noblock(conn->sock))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to nonblocking mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#ifdef F_SETFD
if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to close-on-exec mode: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
conn->try_next_addr = true;
goto keep_going;
}
#endif /* F_SETFD */
if (!IS_AF_UNIX(addr_cur->ai_family))
{
#ifndef WIN32
int on = 1;
#endif
int usekeepalives = useKeepalives(conn);
int err = 0;
if (usekeepalives < 0)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("keepalives parameter must be an integer\n"));
err = 1;
}
else if (usekeepalives == 0)
{
/* Do nothing */
}
#ifndef WIN32
else if (setsockopt(conn->sock,
SOL_SOCKET, SO_KEEPALIVE,
(char *) &on, sizeof(on)) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("%s(%s) failed: %s\n"),
"setsockopt",
"SO_KEEPALIVE",
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
err = 1;
}
else if (!setKeepalivesIdle(conn)
|| !setKeepalivesInterval(conn)
|| !setKeepalivesCount(conn))
err = 1;
#else /* WIN32 */
#ifdef SIO_KEEPALIVE_VALS
else if (!setKeepalivesWin32(conn))
err = 1;
#endif /* SIO_KEEPALIVE_VALS */
#endif /* WIN32 */
else if (!setTCPUserTimeout(conn))
err = 1;
if (err)
{
conn->try_next_addr = true;
goto keep_going;
}
}
/*----------
* We have three methods of blocking SIGPIPE during
* send() calls to this socket:
*
* - setsockopt(sock, SO_NOSIGPIPE)
* - send(sock, ..., MSG_NOSIGNAL)
* - setting the signal mask to SIG_IGN during send()
*
* The third method requires three syscalls per send,
* so we prefer either of the first two, but they are
* less portable. The state is tracked in the following
* members of PGconn:
*
* conn->sigpipe_so - we have set up SO_NOSIGPIPE
* conn->sigpipe_flag - we're specifying MSG_NOSIGNAL
*
* If we can use SO_NOSIGPIPE, then set sigpipe_so here
* and we're done. Otherwise, set sigpipe_flag so that
* we will try MSG_NOSIGNAL on sends. If we get an error
* with MSG_NOSIGNAL, we'll clear that flag and revert to
* signal masking.
*----------
*/
conn->sigpipe_so = false;
#ifdef MSG_NOSIGNAL
conn->sigpipe_flag = true;
#else
conn->sigpipe_flag = false;
#endif /* MSG_NOSIGNAL */
#ifdef SO_NOSIGPIPE
optval = 1;
if (setsockopt(conn->sock, SOL_SOCKET, SO_NOSIGPIPE,
(char *) &optval, sizeof(optval)) == 0)
{
conn->sigpipe_so = true;
conn->sigpipe_flag = false;
}
#endif /* SO_NOSIGPIPE */
/*
* Start/make connection. This should not block, since we
* are in nonblock mode. If it does, well, too bad.
*/
if (connect(conn->sock, addr_cur->ai_addr,
addr_cur->ai_addrlen) < 0)
{
if (SOCK_ERRNO == EINPROGRESS ||
#ifdef WIN32
SOCK_ERRNO == EWOULDBLOCK ||
#endif
SOCK_ERRNO == EINTR)
{
/*
* This is fine - we're in non-blocking mode, and
* the connection is in progress. Tell caller to
* wait for write-ready on socket.
*/
conn->status = CONNECTION_STARTED;
return PGRES_POLLING_WRITING;
}
/* otherwise, trouble */
}
else
{
/*
* Hm, we're connected already --- seems the "nonblock
* connection" wasn't. Advance the state machine and
* go do the next stuff.
*/
conn->status = CONNECTION_STARTED;
goto keep_going;
}
/*
* This connection failed. Add the error report to
* conn->errorMessage, then try the next address if any.
*/
connectFailureMessage(conn, SOCK_ERRNO);
conn->try_next_addr = true;
goto keep_going;
}
}
case CONNECTION_STARTED:
{
ACCEPT_TYPE_ARG3 optlen = sizeof(optval);
/*
* Write ready, since we've made it here, so the connection
* has been made ... or has failed.
*/
/*
* Now check (using getsockopt) that there is not an error
* state waiting for us on the socket.
*/
if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR,
(char *) &optval, &optlen) == -1)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get socket error status: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
else if (optval != 0)
{
/*
* When using a nonblocking connect, we will typically see
* connect failures at this point, so provide a friendly
* error message.
*/
connectFailureMessage(conn, optval);
/*
* Try the next address if any, just as in the case where
* connect() returned failure immediately.
*/
conn->try_next_addr = true;
goto keep_going;
}
/* Fill in the client address */
conn->laddr.salen = sizeof(conn->laddr.addr);
if (getsockname(conn->sock,
(struct sockaddr *) &conn->laddr.addr,
&conn->laddr.salen) < 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get client address from socket: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/*
* Make sure we can write before advancing to next step.
*/
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
case CONNECTION_MADE:
{
char *startpacket;
int packetlen;
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
*/
if (conn->requirepeer && conn->requirepeer[0] &&
IS_AF_UNIX(conn->raddr.addr.ss_family))
{
#ifndef WIN32
char pwdbuf[BUFSIZ];
struct passwd pass_buf;
struct passwd *pass;
int passerr;
#endif
uid_t uid;
gid_t gid;
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
/*
* Provide special error message if getpeereid is a
* stub
*/
if (errno == ENOSYS)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get peer credentials: %s\n"),
strerror_r(errno, sebuf, sizeof(sebuf)));
goto error_return;
}
#ifndef WIN32
passerr = pqGetpwuid(uid, &pass_buf, pwdbuf, sizeof(pwdbuf), &pass);
if (pass == NULL)
{
if (passerr != 0)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not look up local user ID %d: %s\n"),
(int) uid,
strerror_r(passerr, sebuf, sizeof(sebuf)));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("local user with ID %d does not exist\n"),
(int) uid);
goto error_return;
}
if (strcmp(pass->pw_name, conn->requirepeer) != 0)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n"),
conn->requirepeer, pass->pw_name);
goto error_return;
}
#else /* WIN32 */
/* should have failed with ENOSYS above */
Assert(false);
#endif /* WIN32 */
}
if (IS_AF_UNIX(conn->raddr.addr.ss_family))
{
/* Don't request SSL or GSSAPI over Unix sockets */
#ifdef USE_SSL
conn->allow_ssl_try = false;
#endif
#ifdef ENABLE_GSS
conn->try_gss = false;
#endif
}
#ifdef ENABLE_GSS
/*
* If GSSAPI encryption is enabled, then call
* pg_GSS_have_cred_cache() which will return true if we can
* acquire credentials (and give us a handle to use in
* conn->gcred), and then send a packet to the server asking
* for GSSAPI Encryption (and skip past SSL negotiation and
* regular startup below).
*/
if (conn->try_gss && !conn->gctx)
conn->try_gss = pg_GSS_have_cred_cache(&conn->gcred);
if (conn->try_gss && !conn->gctx)
{
ProtocolVersion pv = pg_hton32(NEGOTIATE_GSS_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send GSSAPI negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_GSS_STARTUP;
return PGRES_POLLING_READING;
}
else if (!conn->gctx && conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("GSSAPI encryption required but was impossible (possibly no credential cache, no server support, or using a local socket)\n"));
goto error_return;
}
#endif
#ifdef USE_SSL
/*
* Enable the libcrypto callbacks before checking if SSL needs
* to be done. This is done before sending the startup packet
* as depending on the type of authentication done, like MD5
* or SCRAM that use cryptohashes, the callbacks would be
* required even without a SSL connection
*/
if (pqsecure_initialize(conn, false, true) < 0)
goto error_return;
/*
* If SSL is enabled and we haven't already got encryption of
* some sort running, request SSL instead of sending the
* startup message.
*/
if (conn->allow_ssl_try && !conn->wait_ssl_try &&
!conn->ssl_in_use
#ifdef ENABLE_GSS
&& !conn->gssenc
#endif
)
{
ProtocolVersion pv;
/*
* Send the SSL request packet.
*
* Theoretically, this could block, but it really
* shouldn't since we only got here if the socket is
* write-ready.
*/
pv = pg_hton32(NEGOTIATE_SSL_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send SSL negotiation packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
conn->status = CONNECTION_SSL_STARTUP;
return PGRES_POLLING_READING;
}
#endif /* USE_SSL */
/*
* Build the startup packet.
*/
startpacket = pqBuildStartupPacket3(conn, &packetlen,
EnvironmentOptions);
if (!startpacket)
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("out of memory\n"));
goto error_return;
}
/*
* Send the startup packet.
*
* Theoretically, this could block, but it really shouldn't
* since we only got here if the socket is write-ready.
*/
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send startup packet: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
free(startpacket);
goto error_return;
}
free(startpacket);
conn->status = CONNECTION_AWAITING_RESPONSE;
return PGRES_POLLING_READING;
}
/*
* Handle SSL negotiation: wait for postmaster messages and
* respond as necessary.
*/
case CONNECTION_SSL_STARTUP:
{
#ifdef USE_SSL
PostgresPollingStatusType pollres;
/*
* On first time through, get the postmaster's response to our
* SSL negotiation packet.
*/
if (!conn->ssl_in_use)
{
/*
* We use pqReadData here since it has the logic to
* distinguish no-data-yet from connection closure. Since
* conn->ssl isn't set, a plain recv() will occur.
*/
char SSLok;
int rdresult;
rdresult = pqReadData(conn);
if (rdresult < 0)
{
/* errorMessage is already filled in */
goto error_return;
}
if (rdresult == 0)
{
/* caller failed to wait for data */
return PGRES_POLLING_READING;
}
if (pqGetc(&SSLok, conn) < 0)
{
/* should not happen really */
return PGRES_POLLING_READING;
}
if (SSLok == 'S')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/*
* Set up global SSL state if required. The crypto
* state has already been set if libpq took care of
* doing that, so there is no need to make that happen
* again.
*/
if (pqsecure_initialize(conn, true, false) != 0)
goto error_return;
}
else if (SSLok == 'N')
{
/* mark byte consumed */
conn->inStart = conn->inCursor;
/* OK to do without SSL? */
if (conn->sslmode[0] == 'r' || /* "require" */
conn->sslmode[0] == 'v') /* "verify-ca" or
* "verify-full" */
{
/* Require SSL, but server does not want it */
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server does not support SSL, but SSL was required\n"));
goto error_return;
}
/* Otherwise, proceed with normal startup */
conn->allow_ssl_try = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (SSLok == 'E')
{
/*
* Server failure of some sort, such as failure to
* fork a backend process. We need to process and
* report the error message, which might be formatted
* according to either protocol 2 or protocol 3.
* Rather than duplicate the code for that, we flip
* into AWAITING_RESPONSE state and let the code there
* deal with it. Note we have *not* consumed the "E"
* byte here.
*/
conn->status = CONNECTION_AWAITING_RESPONSE;
goto keep_going;
}
else
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to SSL negotiation: %c\n"),
SSLok);
goto error_return;
}
}
/*
* Begin or continue the SSL negotiation process.
*/
pollres = pqsecure_open_client(conn);
if (pollres == PGRES_POLLING_OK)
{
/* SSL handshake done, ready to send startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
if (pollres == PGRES_POLLING_FAILED)
{
/*
* Failed ... if sslmode is "prefer" then do a non-SSL
* retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/* Else it's a hard failure */
goto error_return;
}
/* Else, return POLLING_READING or POLLING_WRITING status */
return pollres;
#else /* !USE_SSL */
/* can't get here */
goto error_return;
#endif /* USE_SSL */
}
case CONNECTION_GSS_STARTUP:
{
#ifdef ENABLE_GSS
PostgresPollingStatusType pollres;
/*
* If we haven't yet, get the postmaster's response to our
* negotiation packet
*/
if (conn->try_gss && !conn->gctx)
{
char gss_ok;
int rdresult = pqReadData(conn);
if (rdresult < 0)
/* pqReadData fills in error message */
goto error_return;
else if (rdresult == 0)
/* caller failed to wait for data */
return PGRES_POLLING_READING;
if (pqGetc(&gss_ok, conn) < 0)
/* shouldn't happen... */
return PGRES_POLLING_READING;
if (gss_ok == 'E')
{
/*
* Server failure of some sort. Assume it's a
* protocol version support failure, and let's see if
* we can't recover (if it's not, we'll get a better
* error message on retry). Server gets fussy if we
* don't hang up the socket, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
/* mark byte consumed */
conn->inStart = conn->inCursor;
if (gss_ok == 'N')
{
/* Server doesn't want GSSAPI; fall back if we can */
if (conn->gssencmode[0] == 'r')
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server doesn't support GSSAPI encryption, but it was required\n"));
goto error_return;
}
conn->try_gss = false;
/* We can proceed using this connection */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (gss_ok != 'G')
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("received invalid response to GSSAPI negotiation: %c\n"),
gss_ok);
goto error_return;
}
}
/* Begin or continue GSSAPI negotiation */
pollres = pqsecure_open_gss(conn);
if (pollres == PGRES_POLLING_OK)
{
/* All set for startup packet */
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
else if (pollres == PGRES_POLLING_FAILED &&
conn->gssencmode[0] == 'p')
{
/*
* We failed, but we can retry on "prefer". Have to drop
* the current connection to do so, though.
*/
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
return pollres;
#else /* !ENABLE_GSS */
/* unreachable */
goto error_return;
#endif /* ENABLE_GSS */
}
/*
* Handle authentication exchange: wait for postmaster messages
* and respond as necessary.
*/
case CONNECTION_AWAITING_RESPONSE:
{
char beresp;
int msgLength;
int avail;
AuthRequest areq;
int res;
/*
* Scan the message from current point (note that if we find
* the message is incomplete, we will return without advancing
* inStart, and resume here next time).
*/
conn->inCursor = conn->inStart;
/* Read type byte */
if (pqGetc(&beresp, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Validate message type: we expect only an authentication
* request or an error here. Anything else probably means
* it's not Postgres on the other end at all.
*/
if (!(beresp == 'R' || beresp == 'E'))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
/* Read message length word */
if (pqGetInt(&msgLength, 4, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/*
* Try to validate message length before using it.
* Authentication requests can't be very large, although GSS
* auth requests may not be that small. Errors can be a
* little larger, but not huge. If we see a large apparent
* length in an error, it means we're really talking to a
* pre-3.0-protocol server; cope. (Before version 14, the
* server also used the old protocol for errors that happened
* before processing the startup packet.)
*/
if (beresp == 'R' && (msgLength < 8 || msgLength > 2000))
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("expected authentication request from server, but received %c\n"),
beresp);
goto error_return;
}
if (beresp == 'E' && (msgLength < 8 || msgLength > 30000))
{
/* Handle error from a pre-3.0 server */
conn->inCursor = conn->inStart + 1; /* reread data */
if (pqGets_append(&conn->errorMessage, conn))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* Before 7.2, the postmaster didn't always end its
* messages with a newline, so add one if needed to
* conform to libpq conventions.
*/
if (conn->errorMessage.len == 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
{
appendPQExpBufferChar(&conn->errorMessage, '\n');
}
goto error_return;
}
/*
* Can't process if message body isn't all here yet.
*/
msgLength -= 4;
avail = conn->inEnd - conn->inCursor;
if (avail < msgLength)
{
/*
* Before returning, try to enlarge the input buffer if
* needed to hold the whole message; see notes in
* pqParseInput3.
*/
if (pqCheckInBufferSpace(conn->inCursor + (size_t) msgLength,
conn))
goto error_return;
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* Handle errors. */
if (beresp == 'E')
{
if (pqGetErrorNotice3(conn, true))
{
/* We'll come back when there is more data */
return PGRES_POLLING_READING;
}
/* OK, we read the message; mark data consumed */
conn->inStart = conn->inCursor;
/*
* If error is "cannot connect now", try the next host if
* any (but we don't want to consider additional addresses
* for this host, nor is there much point in changing SSL
* or GSS mode). This is helpful when dealing with
* standby servers that might not be in hot-standby state.
*/
if (strcmp(conn->last_sqlstate,
ERRCODE_CANNOT_CONNECT_NOW) == 0)
{
conn->try_next_host = true;
goto keep_going;
}
/* Check to see if we should mention pgpassfile */
pgpassfileWarning(conn);
#ifdef ENABLE_GSS
/*
* If gssencmode is "prefer" and we're using GSSAPI, retry
* without it.
*/
if (conn->gssenc && conn->gssencmode[0] == 'p')
{
/* only retry once */
conn->try_gss = false;
need_new_connection = true;
goto keep_going;
}
#endif
#ifdef USE_SSL
/*
* if sslmode is "allow" and we haven't tried an SSL
* connection already, then retry with an SSL connection
*/
if (conn->sslmode[0] == 'a' /* "allow" */
&& !conn->ssl_in_use
&& conn->allow_ssl_try
&& conn->wait_ssl_try)
{
/* only retry once */
conn->wait_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
/*
* if sslmode is "prefer" and we're in an SSL connection,
* then do a non-SSL retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->ssl_in_use
&& conn->allow_ssl_try /* redundant? */
&& !conn->wait_ssl_try) /* redundant? */
{
/* only retry once */
conn->allow_ssl_try = false;
need_new_connection = true;
goto keep_going;
}
#endif
goto error_return;
}
/* It is an authentication request. */
conn->auth_req_received = true;
/* Get the type of request. */
if (pqGetInt((int *) &areq, 4, conn))
{
/* We'll come back when there are more data */
return PGRES_POLLING_READING;
}
msgLength -= 4;
/*
* Process the rest of the authentication request message, and
* respond to it if necessary.
*
* Note that conn->pghost must be non-NULL if we are going to
* avoid the Kerberos code doing a hostname look-up.
*/
res = pg_fe_sendauth(areq, msgLength, conn);
/* OK, we have processed the message; mark data consumed */
conn->inStart = conn->inCursor;
if (res != STATUS_OK)
goto error_return;
/*
* Just make sure that any data sent by pg_fe_sendauth is
* flushed out. Although this theoretically could block, it
* really shouldn't since we don't send large auth responses.
*/
if (pqFlush(conn))
goto error_return;
if (areq == AUTH_REQ_OK)
{
/* We are done with authentication exchange */
conn->status = CONNECTION_AUTH_OK;
/*
* Set asyncStatus so that PQgetResult will think that
* what comes back next is the result of a query. See
* below.
*/
conn->asyncStatus = PGASYNC_BUSY;
}
/* Look to see if we have more data yet. */
goto keep_going;
}
case CONNECTION_AUTH_OK:
{
/*
* Now we expect to hear from the backend. A ReadyForQuery
* message indicates that startup is successful, but we might
* also get an Error message indicating failure. (Notice
* messages indicating nonfatal warnings are also allowed by
* the protocol, as are ParameterStatus and BackendKeyData
* messages.) Easiest way to handle this is to let
* PQgetResult() read the messages. We just have to fake it
* out about the state of the connection, by setting
* asyncStatus = PGASYNC_BUSY (done above).
*/
if (PQisBusy(conn))
return PGRES_POLLING_READING;
res = PQgetResult(conn);
/*
* NULL return indicating we have gone to IDLE state is
* expected
*/
if (res)
{
if (res->resultStatus != PGRES_FATAL_ERROR)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("unexpected message from server during startup\n"));
else if (conn->send_appname &&
(conn->appname || conn->fbappname))
{
/*
* If we tried to send application_name, check to see
* if the error is about that --- pre-9.0 servers will
* reject it at this stage of the process. If so,
* close the connection and retry without sending
* application_name. We could possibly get a false
* SQLSTATE match here and retry uselessly, but there
* seems no great harm in that; we'll just get the
* same error again if it's unrelated.
*/
const char *sqlstate;
sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
if (sqlstate &&
strcmp(sqlstate, ERRCODE_APPNAME_UNKNOWN) == 0)
{
PQclear(res);
conn->send_appname = false;
need_new_connection = true;
goto keep_going;
}
}
/*
* if the resultStatus is FATAL, then conn->errorMessage
* already has a copy of the error; needn't copy it back.
* But add a newline if it's not there already, since
* postmaster error messages may not have one.
*/
if (conn->errorMessage.len <= 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
appendPQExpBufferChar(&conn->errorMessage, '\n');
PQclear(res);
goto error_return;
}
/* Almost there now ... */
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_TARGET:
{
/*
* If a read-write, read-only, primary, or standby connection
* is required, see if we have one.
*/
if (conn->target_server_type == SERVER_TYPE_READ_WRITE ||
conn->target_server_type == SERVER_TYPE_READ_ONLY)
{
bool read_only_server;
/*
* If the server didn't report
* "default_transaction_read_only" or "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SHOW transaction_read_only". This GUC exists in
* all server versions that support 3.0 protocol.
*/
if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN ||
conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SHOW transaction_read_only"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
read_only_server =
(conn->default_transaction_read_only == PG_BOOL_YES ||
conn->in_hot_standby == PG_BOOL_YES);
if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ?
read_only_server : !read_only_server)
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_READ_WRITE)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is read-only\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("session is not read-only\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
else if (conn->target_server_type == SERVER_TYPE_PRIMARY ||
conn->target_server_type == SERVER_TYPE_STANDBY ||
conn->target_server_type == SERVER_TYPE_PREFER_STANDBY)
{
/*
* If the server didn't report "in_hot_standby" at
* startup, we must determine its state by sending the
* query "SELECT pg_catalog.pg_is_in_recovery()". Servers
* before 9.0 don't have that function, but by the same
* token they don't have any standby mode, so we may just
* assume the result.
*/
if (conn->sversion < 90000)
conn->in_hot_standby = PG_BOOL_NO;
if (conn->in_hot_standby == PG_BOOL_UNKNOWN)
{
/*
* We use PQsendQueryContinue so that
* conn->errorMessage does not get cleared. We need
* to preserve any error messages related to previous
* hosts we have tried and failed to connect to.
*/
conn->status = CONNECTION_OK;
if (!PQsendQueryContinue(conn,
"SELECT pg_catalog.pg_is_in_recovery()"))
goto error_return;
/* We'll return to this state when we have the answer */
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
/* OK, we can make the test */
if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ?
(conn->in_hot_standby == PG_BOOL_YES) :
(conn->in_hot_standby == PG_BOOL_NO))
{
/* Wrong server state, reject and try the next host */
if (conn->target_server_type == SERVER_TYPE_PRIMARY)
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is in hot standby mode\n"));
else
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("server is not in hot standby mode\n"));
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/*
* Try next host if any, but we don't want to consider
* additional addresses for this host.
*/
conn->try_next_host = true;
goto keep_going;
}
}
/* We can release the address list now. */
release_conn_addrinfo(conn);
/*
* Contents of conn->errorMessage are no longer interesting
* (and it seems some clients expect it to be empty after a
* successful connection).
*/
resetPQExpBuffer(&conn->errorMessage);
/* We are open for business! */
conn->status = CONNECTION_OK;
return PGRES_POLLING_OK;
}
case CONNECTION_CONSUME:
{
/*
* This state just makes sure the connection is idle after
* we've obtained the result of a SHOW or SELECT query. Once
* we're clear, return to CONNECTION_CHECK_TARGET state to
* decide what to do next. We must transiently set status =
* CONNECTION_OK in order to use the result-consuming
* subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
/* Call PQgetResult() again until we get a NULL result */
res = PQgetResult(conn);
if (res != NULL)
{
PQclear(res);
conn->status = CONNECTION_CONSUME;
return PGRES_POLLING_READING;
}
conn->status = CONNECTION_CHECK_TARGET;
goto keep_going;
}
case CONNECTION_CHECK_WRITABLE:
{
/*
* Waiting for result of "SHOW transaction_read_only". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_WRITABLE;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
/*
* "transaction_read_only = on" proves that at least one
* of default_transaction_read_only and in_hot_standby is
* on, but we don't actually know which. We don't care
* though for the purpose of identifying a read-only
* session, so satisfy the CONNECTION_CHECK_TARGET code by
* claiming they are both on. On the other hand, if it's
* a read-write session, they are certainly both off.
*/
if (strncmp(val, "on", 2) == 0)
{
conn->default_transaction_read_only = PG_BOOL_YES;
conn->in_hot_standby = PG_BOOL_YES;
}
else
{
conn->default_transaction_read_only = PG_BOOL_NO;
conn->in_hot_standby = PG_BOOL_NO;
}
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SHOW transaction_read_only". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SHOW transaction_read_only");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
case CONNECTION_CHECK_STANDBY:
{
/*
* Waiting for result of "SELECT pg_is_in_recovery()". We
* must transiently set status = CONNECTION_OK in order to use
* the result-consuming subroutines.
*/
conn->status = CONNECTION_OK;
if (!PQconsumeInput(conn))
goto error_return;
if (PQisBusy(conn))
{
conn->status = CONNECTION_CHECK_STANDBY;
return PGRES_POLLING_READING;
}
res = PQgetResult(conn);
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
PQntuples(res) == 1)
{
char *val = PQgetvalue(res, 0, 0);
if (strncmp(val, "t", 1) == 0)
conn->in_hot_standby = PG_BOOL_YES;
else
conn->in_hot_standby = PG_BOOL_NO;
PQclear(res);
/* Finish reading messages before continuing */
conn->status = CONNECTION_CONSUME;
goto keep_going;
}
/* Something went wrong with "SELECT pg_is_in_recovery()". */
if (res)
PQclear(res);
/* Append error report to conn->errorMessage. */
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("\"%s\" failed\n"),
"SELECT pg_is_in_recovery()");
/* Close connection politely. */
conn->status = CONNECTION_OK;
sendTerminateConn(conn);
/* Try next host. */
conn->try_next_host = true;
goto keep_going;
}
default:
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid connection state %d, "
"probably indicative of memory corruption\n"),
conn->status);
goto error_return;
}
/* Unreachable */
error_return:
/*
* We used to close the socket at this point, but that makes it awkward
* for those above us if they wish to remove this socket from their own
* records (an fd_set for example). We'll just have this socket closed
* when PQfinish is called (which is compulsory even after an error, since
* the connection structure must be freed).
*/
conn->status = CONNECTION_BAD;
return PGRES_POLLING_FAILED;
}
| null | null | 195,388
|
137054181205669688597707960003696124857
| 1,597
|
libpq: reject extraneous data after SSL or GSS encryption handshake.
libpq collects up to a bufferload of data whenever it reads data from
the socket. When SSL or GSS encryption is requested during startup,
any additional data received with the server's yes-or-no reply
remained in the buffer, and would be treated as already-decrypted data
once the encryption handshake completed. Thus, a man-in-the-middle
with the ability to inject data into the TCP connection could stuff
some cleartext data into the start of a supposedly encryption-protected
database session.
This could probably be abused to inject faked responses to the
client's first few queries, although other details of libpq's behavior
make that harder than it sounds. A different line of attack is to
exfiltrate the client's password, or other sensitive data that might
be sent early in the session. That has been shown to be possible with
a server vulnerable to CVE-2021-23214.
To fix, throw a protocol-violation error if the internal buffer
is not empty after the encryption handshake.
Our thanks to Jacob Champion for reporting this problem.
Security: CVE-2021-23222
|
other
|
tensorflow
|
c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
| 1
|
bool RepeatedAttrDefEqual(
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
std::unordered_map<string, const OpDef::AttrDef*> a1_set;
for (const OpDef::AttrDef& def : a1) {
DCHECK(a1_set.find(def.name()) == a1_set.end())
<< "AttrDef names must be unique, but '" << def.name()
<< "' appears more than once";
a1_set[def.name()] = &def;
}
for (const OpDef::AttrDef& def : a2) {
auto iter = a1_set.find(def.name());
if (iter == a1_set.end()) return false;
if (!AttrDefEqual(*iter->second, def)) return false;
a1_set.erase(iter);
}
if (!a1_set.empty()) return false;
return true;
}
| null | null | 195,389
|
224872497264513344433480490903932145183
| 19
|
Remove a `DCHECK`-fail, log an error instead.
`DCHECK` in debug mode results in crashes. TensorFlow has had multiple vulnerabilities due to this.
Outside of debug mode, `DCHECK` is a no-op.
A better alternative is to report an error to the log buffer and continue. This should happen both in debug mode and in prod mode.
PiperOrigin-RevId: 408375925
Change-Id: Id5b3e19c73f3fbe0cc4bba26ca44ff9607bb6356
|
other
|
tensorflow
|
f68fdab93fb7f4ddb4eb438c8fe052753c9413e8
| 1
|
void Compute(tensorflow::OpKernelContext* context) override {
for (int ngram_width : ngram_widths_) {
OP_REQUIRES(
context, ngram_width > 0,
errors::InvalidArgument("ngram_widths must contain positive values"));
}
const tensorflow::Tensor* data;
OP_REQUIRES_OK(context, context->input("data", &data));
const auto& input_data = data->flat<tstring>().data();
const tensorflow::Tensor* splits;
OP_REQUIRES_OK(context, context->input("data_splits", &splits));
const auto& splits_vec = splits->flat<SPLITS_TYPE>();
// Validate that the splits are valid indices into data, only if there are
// splits specified.
const int input_data_size = data->flat<tstring>().size();
const int splits_vec_size = splits_vec.size();
if (splits_vec_size > 0) {
int prev_split = splits_vec(0);
OP_REQUIRES(context, prev_split == 0,
errors::InvalidArgument("First split value must be 0, got ",
prev_split));
for (int i = 1; i < splits_vec_size; ++i) {
bool valid_splits = splits_vec(i) >= prev_split;
valid_splits = valid_splits && (splits_vec(i) <= input_data_size);
OP_REQUIRES(context, valid_splits,
errors::InvalidArgument(
"Invalid split value ", splits_vec(i), ", must be in [",
prev_split, ", ", input_data_size, "]"));
prev_split = splits_vec(i);
}
OP_REQUIRES(context, prev_split == input_data_size,
errors::InvalidArgument(
"Last split value must be data size. Expected ",
input_data_size, ", got ", prev_split));
}
int num_batch_items = splits_vec.size() - 1;
tensorflow::Tensor* ngrams_splits;
OP_REQUIRES_OK(
context, context->allocate_output(1, splits->shape(), &ngrams_splits));
auto ngrams_splits_data = ngrams_splits->flat<SPLITS_TYPE>().data();
// If there is no data or size, return an empty RT.
if (data->flat<tstring>().size() == 0 || splits_vec.size() == 0) {
tensorflow::Tensor* empty;
OP_REQUIRES_OK(context,
context->allocate_output(0, data->shape(), &empty));
for (int i = 0; i <= num_batch_items; ++i) {
ngrams_splits_data[i] = 0;
}
return;
}
ngrams_splits_data[0] = 0;
for (int i = 1; i <= num_batch_items; ++i) {
int length = splits_vec(i) - splits_vec(i - 1);
int num_ngrams = 0;
for (int ngram_width : ngram_widths_)
num_ngrams += get_num_ngrams(length, ngram_width);
if (preserve_short_ && length > 0 && num_ngrams == 0) {
num_ngrams = 1;
}
ngrams_splits_data[i] = ngrams_splits_data[i - 1] + num_ngrams;
}
tensorflow::Tensor* ngrams;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({ngrams_splits_data[num_batch_items]}), &ngrams));
auto ngrams_data = ngrams->flat<tstring>().data();
for (int i = 0; i < num_batch_items; ++i) {
auto data_start = &input_data[splits_vec(i)];
int output_start_idx = ngrams_splits_data[i];
for (int ngram_width : ngram_widths_) {
auto output_start = &ngrams_data[output_start_idx];
int length = splits_vec(i + 1) - splits_vec(i);
int num_ngrams = get_num_ngrams(length, ngram_width);
CreateNgrams(data_start, output_start, num_ngrams, ngram_width);
output_start_idx += num_ngrams;
}
// If we're preserving short sequences, check to see if no sequence was
// generated by comparing the current output start idx to the original
// one (ngram_splits_data). If no ngrams were generated, then they will
// be equal (since we increment output_start_idx by num_ngrams every
// time we create a set of ngrams.)
if (preserve_short_ && output_start_idx == ngrams_splits_data[i]) {
int data_length = splits_vec(i + 1) - splits_vec(i);
// One legitimate reason to not have any ngrams when preserve_short_
// is true is if the sequence itself is empty. In that case, move on.
if (data_length == 0) {
continue;
}
// We don't have to worry about dynamic padding sizes here: if padding
// was dynamic, every sequence would have had sufficient padding to
// generate at least one ngram.
int ngram_width = data_length + 2 * pad_width_;
auto output_start = &ngrams_data[output_start_idx];
int num_ngrams = 1;
CreateNgrams(data_start, output_start, num_ngrams, ngram_width);
}
}
}
| null | null | 195,391
|
47970521259378054197444704767683650255
| 107
|
Add a check for pad width to be a positive value.
PiperOrigin-RevId: 413275853
Change-Id: I261a8db9dabf5ce48a806a9e58129080c9fac619
|
other
|
v4l2loopback
|
e4cd225557486c420f6a34411f98c575effd43dd
| 1
|
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file);
int labellen = (sizeof(cap->card) < sizeof(dev->card_label)) ?
sizeof(cap->card) :
sizeof(dev->card_label);
int device_nr =
((struct v4l2loopback_private *)video_get_drvdata(dev->vdev))
->device_nr;
__u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
strlcpy(cap->driver, "v4l2 loopback", sizeof(cap->driver));
snprintf(cap->card, labellen, dev->card_label);
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:v4l2loopback-%03d", device_nr);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
/* since 3.1.0, the v4l2-core system is supposed to set the version */
cap->version = V4L2LOOPBACK_VERSION_CODE;
#endif
#ifdef V4L2_CAP_VIDEO_M2M
capabilities |= V4L2_CAP_VIDEO_M2M;
#endif /* V4L2_CAP_VIDEO_M2M */
if (dev->announce_all_caps) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
} else {
if (dev->ready_for_capture) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE;
}
if (dev->ready_for_output) {
capabilities |= V4L2_CAP_VIDEO_OUTPUT;
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
dev->vdev->device_caps =
#endif /* >=linux-4.7.0 */
cap->device_caps = cap->capabilities = capabilities;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
cap->capabilities |= V4L2_CAP_DEVICE_CAPS;
#endif
memset(cap->reserved, 0, sizeof(cap->reserved));
return 0;
}
| null | null | 195,398
|
315173664575668559594492834827107741837
| 49
|
add explicit format specifier to printf() invocations
CWE-134
|
other
|
tensorflow
|
045deec1cbdebb27d817008ad5df94d96a08b1bf
| 1
|
bool IsIdentityConsumingSwitch(const MutableGraphView& graph,
const NodeDef& node) {
if ((IsIdentity(node) || IsIdentityNSingleInput(node)) &&
node.input_size() > 0) {
TensorId tensor_id = ParseTensorName(node.input(0));
if (IsTensorIdControlling(tensor_id)) {
return false;
}
NodeDef* input_node = graph.GetNode(tensor_id.node());
return IsSwitch(*input_node);
}
return false;
}
| null | null | 195,399
|
285381432180800261456018739544418866483
| 14
|
Prevent null pointer dereference in `mutable_graph_view`
PiperOrigin-RevId: 409684472
Change-Id: I577eb9d9ac470fcec0501423171e739a4ec0cb5c
|
other
|
tensorflow
|
a1e1511dde36b3f8aa27a6ec630838e7ea40e091
| 1
|
int TfLiteIntArrayGetSizeInBytes(int size) {
static TfLiteIntArray dummy;
int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
#if defined(_MSC_VER)
// Context for why this is needed is in http://b/189926408#comment21
computed_size -= sizeof(dummy.data[0]);
#endif
return computed_size;
}
| null | null | 195,402
|
57995846542337948463298638540979134332
| 10
|
[lite] Update TfLiteIntArrayCreate to return size_t
PiperOrigin-RevId: 416439896
Change-Id: I847f69b68d1ddaff4b1e925a09b8b69c1756653b
|
other
|
tensorflow
|
a1e1511dde36b3f8aa27a6ec630838e7ea40e091
| 1
|
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
if (alloc_size <= 0) return NULL;
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
if (!ret) return ret;
ret->size = size;
return ret;
}
| null | null | 195,403
|
231945924034172266841118434454343948993
| 8
|
[lite] Update TfLiteIntArrayCreate to return size_t
PiperOrigin-RevId: 416439896
Change-Id: I847f69b68d1ddaff4b1e925a09b8b69c1756653b
|
other
|
tensorflow
|
ba4e8ac4dc2991e350d5cc407f8598c8d4ee70fb
| 1
|
void Compute(OpKernelContext* context) override {
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
EigenMatrixMap;
constexpr int tensor_in_and_out_dims = 4;
const Tensor& tensor_in = context->input(0);
OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims,
errors::InvalidArgument("tensor_in must be 4-dimensional"));
std::vector<int> input_size(tensor_in_and_out_dims);
std::vector<int> output_size(tensor_in_and_out_dims);
for (int i = 0; i < tensor_in_and_out_dims; ++i) {
input_size[i] = tensor_in.dim_size(i);
}
// Output size.
for (int i = 0; i < tensor_in_and_out_dims; ++i) {
// This must match the same logic in the shape function in
// core/ops/nn_ops.cc.
output_size[i] =
static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i]));
DCHECK_GT(output_size[i], 0);
}
// Generate pooling sequence.
std::vector<int64_t> height_cum_seq;
std::vector<int64_t> width_cum_seq;
GuardedPhiloxRandom generator;
generator.Init(seed_, seed2_);
height_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1],
&generator, pseudo_random_);
width_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2],
&generator, pseudo_random_);
// Prepare output.
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
0,
TensorShape({output_size[0], output_size[1],
output_size[2], output_size[3]}),
&output_tensor));
Tensor* output_height_seq_tensor = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
1, TensorShape({static_cast<int64_t>(height_cum_seq.size())}),
&output_height_seq_tensor));
Tensor* output_width_seq_tensor = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
2, TensorShape({static_cast<int64_t>(width_cum_seq.size())}),
&output_width_seq_tensor));
ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3],
input_size[2] * input_size[1] * input_size[0]);
EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3],
output_size[2] * output_size[1] * output_size[0]);
// Initializes the output tensor with MIN<T>.
output_tensor->flat<T>().setConstant(Eigen::NumTraits<T>::lowest());
auto output_height_seq_flat = output_height_seq_tensor->flat<int64_t>();
auto output_width_seq_flat = output_width_seq_tensor->flat<int64_t>();
// Set output tensors.
for (int i = 0; i < height_cum_seq.size(); ++i) {
output_height_seq_flat(i) = height_cum_seq[i];
}
for (int i = 0; i < width_cum_seq.size(); ++i) {
output_width_seq_flat(i) = width_cum_seq[i];
}
// For both input and output,
// 0: batch
// 1: height / row
// 2: width / col
// 3: depth / channel
const int64_t height_max = input_size[1] - 1;
const int64_t width_max = input_size[2] - 1;
for (int64_t b = 0; b < input_size[0]; ++b) {
// height sequence.
for (int64_t hs = 0; hs < height_cum_seq.size() - 1; ++hs) {
// height start and end.
const int64_t height_start = height_cum_seq[hs];
int64_t height_end =
overlapping_ ? height_cum_seq[hs + 1] : height_cum_seq[hs + 1] - 1;
height_end = std::min(height_end, height_max);
// width sequence.
for (int64_t ws = 0; ws < width_cum_seq.size() - 1; ++ws) {
const int64_t out_offset =
(b * output_size[1] + hs) * output_size[2] + ws;
// width start and end.
const int64_t width_start = width_cum_seq[ws];
int64_t width_end =
overlapping_ ? width_cum_seq[ws + 1] : width_cum_seq[ws + 1] - 1;
width_end = std::min(width_end, width_max);
for (int64_t h = height_start; h <= height_end; ++h) {
for (int64_t w = width_start; w <= width_end; ++w) {
const int64_t in_offset =
(b * input_size[1] + h) * input_size[2] + w;
out_mat.col(out_offset) =
out_mat.col(out_offset).cwiseMax(in_mat.col(in_offset));
}
}
}
}
}
}
| null | null | 195,404
|
258575790704643448800507962005081633594
| 114
|
Fix potential divide by zero error when executing FractionalMaxPool, when pooling ratio is higher than input size for a particular dimension.
PiperOrigin-RevId: 412151722
Change-Id: I06e57cbb8eca43816eff79eac264fa7aae8f7163
|
other
|
ImageMagick6
|
29c8abce0da56b536542f76a9ddfebdaab5b2943
| 1
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MaxTextExtent],
*density,
filename[MaxTextExtent],
geometry[MaxTextExtent],
*options,
input_filename[MaxTextExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
int
c;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t) floor(bounds.x2-bounds.x1+0.5);
height=(size_t) floor(bounds.y2-bounds.y1+0.5);
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MaxTextExtent,"%gx%g",
image->x_resolution,image->y_resolution);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor((double) page.width*image->x_resolution/delta.x+
0.5);
page.height=(size_t) floor((double) page.height*image->y_resolution/delta.y+
0.5);
(void) FormatLocaleString(options,MaxTextExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MaxTextExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MaxTextExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MaxTextExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MaxTextExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,&image->exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->x_resolution/2.0;
image->magick_rows*=image->y_resolution/2.0;
image->columns*=image->x_resolution/2.0;
image->rows*=image->y_resolution/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
| null | null | 195,405
|
25930169199725305650091152672892679233
| 259
|
https://github.com/ImageMagick/ImageMagick/pull/4986
|
other
|
gpac
|
64a2e1b799352ac7d7aad1989bc06e7b0f2b01db
| 1
|
void gitn_box_del(GF_Box *s)
{
u32 i;
GroupIdToNameBox *ptr = (GroupIdToNameBox *)s;
if (ptr == NULL) return;
for (i=0; i<ptr->nb_entries; i++) {
if (ptr->entries[i].name) gf_free(ptr->entries[i].name);
}
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
| null | null | 195,409
|
43001760750779411383718379614700541077
| 11
|
fixed #2092
|
other
|
tensorflow
|
965b97e4a9650495cda5a8c210ef6684b4b9eceb
| 1
|
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
// Create a new SparseTensorSliceDatasetOp::Dataset, insert it in
// the step container, and return it as the output.
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
const auto num_indices = indices->NumElements();
const auto num_values = values->NumElements();
if (num_indices == 0 || num_values == 0) {
OP_REQUIRES(ctx, num_indices == num_values,
errors::InvalidArgument(
"If indices or values are empty, the other one must also "
"be. Got indices of shape ",
indices->shape().DebugString(), " and values of shape ",
values->shape().DebugString()));
}
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
dense_shape->shape().DebugString()));
// We currently ensure that `sparse_tensor` is ordered in the
// batch dimension.
// TODO(mrry): Investigate ways to avoid this unconditional check
// if we can be sure that the sparse tensor was produced in an
// appropriate order (e.g. by `tf.parse_example()` or a Dataset
// that batches elements into rows of a SparseTensor).
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64_t>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64_t, 8> std_order(dense_shape->NumElements(), 0);
sparse::SparseTensor tensor;
OP_REQUIRES_OK(
ctx, sparse::SparseTensor::Create(
*indices, *values, TensorShape(dense_shape->vec<int64_t>()),
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
| null | null | 195,410
|
265741127438637665990195597689736087682
| 58
|
Properly validate sparse tensor in `SparseTensorSliceDataset`
Existing validation was incomplete.
PiperOrigin-RevId: 415375048
Change-Id: I14cd18f29ede73286f3ffac35171bd15828997e9
|
other
|
weechat
|
9904cb6d2eb40f679d8ff6557c22d53a3e3dc75a
| 1
|
IRC_PROTOCOL_CALLBACK(352)
{
char *pos_attr, *pos_hopcount, *pos_realname, *str_host;
int arg_start, length;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
IRC_PROTOCOL_MIN_ARGS(5);
/* silently ignore malformed 352 message (missing infos) */
if (argc < 8)
return WEECHAT_RC_OK;
pos_attr = NULL;
pos_hopcount = NULL;
pos_realname = NULL;
if (argc > 8)
{
arg_start = (strcmp (argv[8], "*") == 0) ? 9 : 8;
if (argv[arg_start][0] == ':')
{
pos_attr = NULL;
pos_hopcount = (argc > arg_start) ? argv[arg_start] + 1 : NULL;
pos_realname = (argc > arg_start + 1) ? argv_eol[arg_start + 1] : NULL;
}
else
{
pos_attr = argv[arg_start];
pos_hopcount = (argc > arg_start + 1) ? argv[arg_start + 1] + 1 : NULL;
pos_realname = (argc > arg_start + 2) ? argv_eol[arg_start + 2] : NULL;
}
}
ptr_channel = irc_channel_search (server, argv[3]);
ptr_nick = (ptr_channel) ?
irc_nick_search (server, ptr_channel, argv[7]) : NULL;
/* update host in nick */
if (ptr_nick)
{
length = strlen (argv[4]) + 1 + strlen (argv[5]) + 1;
str_host = malloc (length);
if (str_host)
{
snprintf (str_host, length, "%s@%s", argv[4], argv[5]);
irc_nick_set_host (ptr_nick, str_host);
free (str_host);
}
}
/* update away flag in nick */
if (ptr_channel && ptr_nick && pos_attr)
{
irc_nick_set_away (server, ptr_channel, ptr_nick,
(pos_attr[0] == 'G') ? 1 : 0);
}
/* update realname in nick */
if (ptr_channel && ptr_nick && pos_realname)
{
if (ptr_nick->realname)
free (ptr_nick->realname);
if (pos_realname &&
weechat_hashtable_has_key (server->cap_list, "extended-join"))
{
ptr_nick->realname = strdup (pos_realname);
}
else
{
ptr_nick->realname = NULL;
}
}
/* display output of who (manual who from user) */
if (!ptr_channel || (ptr_channel->checking_whox <= 0))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "who", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s(%s%s@%s%s)%s %s%s%s%s(%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[7]),
argv[7],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_attr) ? pos_attr : "",
(pos_attr) ? " " : "",
(pos_hopcount) ? pos_hopcount : "",
(pos_hopcount) ? " " : "",
(pos_realname) ? pos_realname : "");
}
return WEECHAT_RC_OK;
}
| null | null | 195,471
|
327988380999970817884274968968322444182
| 105
|
irc: fix crash when receiving a malformed message 352 (who)
Thanks to Stuart Nevans Locke for reporting the issue.
|
other
|
hhvm
|
dabd48caf74995e605f1700344f1ff4a5d83441d
| 1
|
bool JSON_parser(Variant &z, const char *p, int length, bool const assoc,
int depth, int64_t options) {
// No GC safepoints during JSON parsing, please. Code is not re-entrant.
NoHandleSurpriseScope no_surprise(SafepointFlags);
json_parser *json = s_json_parser.get(); /* the parser state */
// Clear and reuse the thread-local string buffers. They are only freed if
// they exceed kMaxPersistentStringBufferCapacity at exit or if the thread
// is explicitly flushed (e.g., due to being idle).
json->initSb(length);
SCOPE_EXIT {
constexpr int kMaxPersistentStringBufferCapacity = 256 * 1024;
if (json->sb_cap > kMaxPersistentStringBufferCapacity) json->flushSb();
};
// SimpleParser only handles the most common set of options. Also, only use it
// if its array nesting depth check is *more* restrictive than what the user
// asks for, to ensure that the precise semantics of the general case is
// applied for all nesting overflows.
if (assoc &&
options == (options & (k_JSON_FB_LOOSE |
k_JSON_FB_DARRAYS |
k_JSON_FB_DARRAYS_AND_VARRAYS |
k_JSON_FB_HACK_ARRAYS |
k_JSON_FB_THRIFT_SIMPLE_JSON |
k_JSON_FB_LEGACY_HACK_ARRAYS)) &&
depth >= SimpleParser::kMaxArrayDepth &&
length <= RuntimeOption::EvalSimpleJsonMaxLength &&
SimpleParser::TryParse(p, length, json->tl_buffer.tv, z,
get_container_type_from_options(options),
options & k_JSON_FB_THRIFT_SIMPLE_JSON)) {
return true;
}
int b; /* the next character */
int c; /* the next character class */
int s; /* the next state */
int state = 0;
/*<fb>*/
bool const loose = options & k_JSON_FB_LOOSE;
JSONContainerType const container_type =
get_container_type_from_options(options);
int qchr = 0;
int8_t const *byte_class;
int8_t const (*next_state_table)[32];
if (loose) {
byte_class = loose_ascii_class;
next_state_table = loose_state_transition_table;
} else {
byte_class = ascii_class;
next_state_table = state_transition_table;
}
/*</fb>*/
UncheckedBuffer *buf = &json->sb_buf;
UncheckedBuffer *key = &json->sb_key;
DataType type = kInvalidDataType;
unsigned short escaped_bytes = 0;
auto reset_type = [&] { type = kInvalidDataType; };
json->depth = depth;
// Since the stack is maintainined on a per request basis, for performance
// reasons, it only makes sense to expand if necessary and cycles are wasted
// contracting. Calls with a depth other than default should be rare.
if (depth > json->stack.size()) {
json->stack.resize(depth);
}
SCOPE_EXIT {
if (json->stack.empty()) return;
for (int i = 0; i <= json->mark; i++) {
json->stack[i].key.reset();
json->stack[i].val.unset();
}
json->mark = -1;
};
json->mark = json->top = -1;
push(json, Mode::DONE);
UTF8To16Decoder decoder(p, length, loose);
for (;;) {
b = decoder.decode();
// Fast-case most common transition: append a simple string character.
if (state == 3 && type == KindOfString) {
while (b != '\"' && b != '\\' && b != '\'' && b <= 127 && b >= ' ') {
buf->append((char)b);
b = decoder.decode();
}
}
if (b == UTF8_END) break; // UTF-8 decoding finishes successfully.
if (b == UTF8_ERROR) {
s_json_parser->error_code = JSON_ERROR_UTF8;
return false;
}
assertx(b >= 0);
if ((b & 127) == b) {
/*<fb>*/
c = byte_class[b];
/*</fb>*/
if (c <= S_ERR) {
s_json_parser->error_code = JSON_ERROR_CTRL_CHAR;
return false;
}
} else {
c = S_ETC;
}
/*
Get the next state from the transition table.
*/
/*<fb>*/
s = next_state_table[state][c];
if (s == -4) {
if (b != qchr) {
s = 3;
} else {
qchr = 0;
}
}
/*</fb>*/
if (s < 0) {
/*
Perform one of the predefined actions.
*/
switch (s) {
/*
empty }
*/
case -9:
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::KEY)) {
return false;
}
state = 9;
break;
/*
{
*/
case -8:
if (!push(json, Mode::KEY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 1;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
// stable_maps is meaningless
top = req::make<c_Map>();
} else {
/*</fb>*/
if (!assoc) {
top = SystemLib::AllocStdClassObject();
/* <fb> */
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateDict();
} else if (container_type == JSONContainerType::DARRAYS ||
container_type == JSONContainerType::DARRAYS_AND_VARRAYS)
{
top = Array::CreateDArray();
/* </fb> */
} else if (
container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyDictArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*<fb>*/
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
}
*/
case -7:
/*** BEGIN Facebook: json_utf8_loose ***/
/*
If this is a trailing comma in an object definition,
we're in Mode::KEY. In that case, throw that off the
stack and restore Mode::OBJECT so that we pretend the
trailing comma just didn't happen.
*/
if (loose) {
if (pop(json, Mode::KEY)) {
push(json, Mode::OBJECT);
}
}
/*** END Facebook: json_utf8_loose ***/
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::OBJECT) {
Variant mval;
json_create_zval(mval, *buf, type, options);
Variant &top = json->stack[json->top].val;
object_set(json, top, copy_and_clear(*key),
mval, assoc, container_type);
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key,
assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::OBJECT)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
break;
/*
[
*/
case -6:
if (!push(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 2;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
top = req::make<c_Vector>();
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateVec();
} else if (container_type == JSONContainerType::DARRAYS_AND_VARRAYS) {
top = Array::CreateVArray();
} else if (container_type == JSONContainerType::DARRAYS) {
top = Array::CreateDArray();
} else if (container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyVecArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
]
*/
case -5:
{
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::ARRAY) {
Variant mval;
json_create_zval(mval, *buf, type, options);
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc,
container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
}
break;
/*
"
*/
case -4:
switch (json->stack[json->top].mode) {
case Mode::KEY:
state = 27;
std::swap(buf, key);
reset_type();
break;
case Mode::ARRAY:
case Mode::OBJECT:
state = 9;
break;
case Mode::DONE:
if (type == KindOfString) {
z = copy_and_clear(*buf);
state = 9;
break;
}
/* fall through if not KindOfString */
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
break;
/*
,
*/
case -3:
{
Variant mval;
if (type != kInvalidDataType &&
(json->stack[json->top].mode == Mode::OBJECT ||
json->stack[json->top].mode == Mode::ARRAY)) {
json_create_zval(mval, *buf, type, options);
}
switch (json->stack[json->top].mode) {
case Mode::OBJECT:
if (pop(json, Mode::OBJECT) &&
push(json, Mode::KEY)) {
if (type != kInvalidDataType) {
Variant &top = json->stack[json->top].val;
object_set(
json,
top,
copy_and_clear(*key),
mval,
assoc,
container_type
);
}
state = 29;
}
break;
case Mode::ARRAY:
if (type != kInvalidDataType) {
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
}
state = 28;
break;
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
buf->clear();
reset_type();
check_non_safepoint_surprise();
}
break;
/*<fb>*/
/*
: (after unquoted string)
*/
case -10:
if (json->stack[json->top].mode == Mode::KEY) {
state = 27;
std::swap(buf, key);
reset_type();
s = -2;
} else {
s = 3;
break;
}
/*</fb>*/
/*
:
*/
case -2:
if (pop(json, Mode::KEY) && push(json, Mode::OBJECT)) {
state = 28;
break;
}
/*
syntax error
*/
case -1:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
/*
Change the state and iterate.
*/
bool is_tsimplejson = options & k_JSON_FB_THRIFT_SIMPLE_JSON;
if (type == KindOfString) {
if (/*<fb>*/(/*</fb>*/s == 3/*<fb>*/ || s == 30)/*</fb>*/ &&
state != 8) {
if (state != 4) {
utf16_to_utf8(*buf, b);
} else {
switch (b) {
case 'b': buf->append('\b'); break;
case 't': buf->append('\t'); break;
case 'n': buf->append('\n'); break;
case 'f': buf->append('\f'); break;
case 'r': buf->append('\r'); break;
default:
utf16_to_utf8(*buf, b);
break;
}
}
} else if (s == 6) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
escaped_bytes = 0;
} else {
escaped_bytes = dehexchar(b) << 12;
}
} else if (s == 7) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
escaped_bytes += dehexchar(b) << 8;
}
} else if (s == 8) {
escaped_bytes += dehexchar(b) << 4;
} else if (s == 3 && state == 8) {
escaped_bytes += dehexchar(b);
if (UNLIKELY(is_tsimplejson)) {
buf->append((char)escaped_bytes);
} else {
utf16_to_utf8(*buf, escaped_bytes);
}
}
} else if ((type == kInvalidDataType || type == KindOfNull) &&
(c == S_DIG || c == S_ZER)) {
type = KindOfInt64;
buf->append((char)b);
} else if (type == KindOfInt64 && s == 24) {
type = KindOfDouble;
buf->append((char)b);
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64) &&
c == S_DOT) {
type = KindOfDouble;
buf->append((char)b);
} else if (type != KindOfString && c == S_QUO) {
type = KindOfString;
/*<fb>*/qchr = b;/*</fb>*/
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64 || type == KindOfDouble) &&
((state == 12 && s == 9) ||
(state == 16 && s == 9))) {
type = KindOfBoolean;
} else if (type == kInvalidDataType && state == 19 && s == 9) {
type = KindOfNull;
} else if (type != KindOfString && c > S_WSP) {
utf16_to_utf8(*buf, b);
}
state = s;
}
}
if (state == 9 && pop(json, Mode::DONE)) {
s_json_parser->error_code = JSON_ERROR_NONE;
return true;
}
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
| null | null | 195,549
|
230969768529717165037239975546250419627
| 499
|
Fix a json_decode crash when depth==0
Summary:
Setting depth=0 is an error, and should result in NULL, but we weren't
checking for it, so in the case of a single, top-level string, we
would reading the -1th element of the stack.
Differential Revision: D19609959
fbshipit-source-id: 04ca1e0965e04b44df2d5c806a73c3da99ff66fb
|
other
|
hhvm
|
1888810e77b446a79a7674784d5f139fcfa605e2
| 1
|
bool WddxPacket::recursiveAddVar(const String& varName,
const Variant& varVariant,
bool hasVarTag) {
bool isArray = varVariant.isArray();
bool isObject = varVariant.isObject();
if (isArray || isObject) {
if (hasVarTag) {
m_packetString.append("<var name='");
m_packetString.append(varName.data());
m_packetString.append("'>");
}
Array varAsArray;
Object varAsObject = varVariant.toObject();
if (isArray) varAsArray = varVariant.toArray();
if (isObject) varAsArray = varAsObject.toArray();
int length = varAsArray.length();
if (length > 0) {
ArrayIter it = ArrayIter(varAsArray);
if (it.first().isString()) isObject = true;
if (isObject) {
m_packetString.append("<struct>");
if (!isArray) {
m_packetString.append("<var name='php_class_name'><string>");
m_packetString.append(varAsObject->getClassName());
m_packetString.append("</string></var>");
}
} else {
m_packetString.append("<array length='");
m_packetString.append(std::to_string(length));
m_packetString.append("'>");
}
for (ArrayIter it(varAsArray); it; ++it) {
Variant key = it.first();
Variant value = it.second();
recursiveAddVar(key.toString(), value, isObject);
}
if (isObject) {
m_packetString.append("</struct>");
}
else {
m_packetString.append("</array>");
}
}
else {
//empty object
if (isObject) {
m_packetString.append("<struct>");
if (!isArray) {
m_packetString.append("<var name='php_class_name'><string>");
m_packetString.append(varAsObject->getClassName());
m_packetString.append("</string></var>");
}
m_packetString.append("</struct>");
}
}
if (hasVarTag) {
m_packetString.append("</var>");
}
return true;
}
String varType = getDataTypeString(varVariant.getType());
if (!getWddxEncoded(varType, "", varName, false).empty()) {
String varValue;
if (varType.compare("boolean") == 0) {
varValue = varVariant.toBoolean() ? "true" : "false";
} else {
varValue = StringUtil::HtmlEncode(varVariant.toString(),
StringUtil::QuoteStyle::Double,
"UTF-8", false, false).toCppString();
}
m_packetString.append(
getWddxEncoded(varType, varValue, varName, hasVarTag));
return true;
}
return false;
}
| null | null | 195,551
|
257002953542231292524849386209084454732
| 82
|
Fix infinite recursion in wddx
Summary:
It wasn't checking for infinite recursion due to references or self-referential
objects. As it turns out closures always return themselves when converted to an
array. Raising a warning and returning is how PHP-src deals with this problem,
nothing special is done for closures.
Reviewed By: alexmalyshev
Differential Revision: D3465655
fbshipit-source-id: a42bc34d30cf4825faf33596139c0c05f8e4f5f1
|
other
|
hhvm
|
dbeb9a56a638e3fdcef8b691c2a2967132dae692
| 1
|
String string_number_format(double d, int dec,
const String& dec_point,
const String& thousand_sep) {
char *tmpbuf = nullptr, *resbuf;
char *s, *t; /* source, target */
char *dp;
int integral;
int tmplen, reslen=0;
int count=0;
int is_negative=0;
if (d < 0) {
is_negative = 1;
d = -d;
}
if (dec < 0) dec = 0;
d = php_math_round(d, dec);
// departure from PHP: we got rid of dependencies on spprintf() here.
String tmpstr(63, ReserveString);
tmpbuf = tmpstr.mutableData();
tmplen = snprintf(tmpbuf, 64, "%.*F", dec, d);
if (tmplen < 0) return empty_string();
if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) {
tmpstr.setSize(tmplen);
return tmpstr;
}
if (tmplen >= 64) {
// Uncommon, asked for more than 64 chars worth of precision
tmpstr = String(tmplen, ReserveString);
tmpbuf = tmpstr.mutableData();
tmplen = snprintf(tmpbuf, tmplen + 1, "%.*F", dec, d);
if (tmplen < 0) return empty_string();
if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) {
tmpstr.setSize(tmplen);
return tmpstr;
}
}
/* find decimal point, if expected */
if (dec) {
dp = strpbrk(tmpbuf, ".,");
} else {
dp = nullptr;
}
/* calculate the length of the return buffer */
if (dp) {
integral = dp - tmpbuf;
} else {
/* no decimal point was found */
integral = tmplen;
}
/* allow for thousand separators */
if (!thousand_sep.empty()) {
if (integral + thousand_sep.size() * ((integral-1) / 3) < integral) {
/* overflow */
raise_error("String overflow");
}
integral += ((integral-1) / 3) * thousand_sep.size();
}
reslen = integral;
if (dec) {
reslen += dec;
if (!dec_point.empty()) {
if (reslen + dec_point.size() < dec_point.size()) {
/* overflow */
raise_error("String overflow");
}
reslen += dec_point.size();
}
}
/* add a byte for minus sign */
if (is_negative) {
reslen++;
}
String resstr(reslen, ReserveString);
resbuf = resstr.mutableData();
s = tmpbuf+tmplen-1;
t = resbuf+reslen-1;
/* copy the decimal places.
* Take care, as the sprintf implementation may return less places than
* we requested due to internal buffer limitations */
if (dec) {
int declen = dp ? s - dp : 0;
int topad = dec > declen ? dec - declen : 0;
/* pad with '0's */
while (topad--) {
*t-- = '0';
}
if (dp) {
s -= declen + 1; /* +1 to skip the point */
t -= declen;
/* now copy the chars after the point */
memcpy(t + 1, dp + 1, declen);
}
/* add decimal point */
if (!dec_point.empty()) {
memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size());
t -= dec_point.size();
}
}
/* copy the numbers before the decimal point, adding thousand
* separator every three digits */
while(s >= tmpbuf) {
*t-- = *s--;
if (thousand_sep && (++count%3)==0 && s>=tmpbuf) {
memcpy(t + (1 - thousand_sep.size()),
thousand_sep.data(),
thousand_sep.size());
t -= thousand_sep.size();
}
}
/* and a minus sign, if needed */
if (is_negative) {
*t-- = '-';
}
resstr.setSize(reslen);
return resstr;
}
| null | null | 195,565
|
181665636095174982884879031927056078145
| 136
|
string_number_format: Correctly handles return value of snprintf
Summary: `snprintf` can return a value greater than the number of bytes copied. In case the first byte of the string is not a digit (could be '-'), size of `tmpstr` was being updated without checking `tmplen`. This resulted in either an assertion error or a heap overflow depending on whether the assertion is compiled or not.
Reviewed By: mofarrell, qianxuweiren
Differential Revision: D17327899
fbshipit-source-id: ee53875d21e02608c6d870388eecf1464de24ff1
|
other
|
qemu
|
7882080388be5088e72c425b02223c02e6cb4295
| 1
|
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
VirtQueueElement elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
if (!virtqueue_pop(vq, &elem)) {
return 0;
}
memcpy(elem.in_sg[0].iov_base, buf, len);
virtqueue_push(vq, &elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
return len;
}
| null | null | 195,626
|
6672846869563615468107463227186545579
| 19
|
virtio-serial: fix ANY_LAYOUT
Don't assume a specific layout for control messages.
Required by virtio 1.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Amit Shah <[email protected]>
Reviewed-by: Jason Wang <[email protected]>
|
other
|
scylladb
|
1c2eef384da439b0457b6d71c7e37d7268e471cb
| 1
|
future<fragmented_temporary_buffer> cql_server::connection::read_and_decompress_frame(size_t length, uint8_t flags)
{
using namespace compression_buffers;
if (flags & cql_frame_flags::compression) {
if (_compression == cql_compression::lz4) {
if (length < 4) {
throw std::runtime_error(fmt::format("CQL frame truncated: expected to have at least 4 bytes, got {}", length));
}
return _buffer_reader.read_exactly(_read_buf, length).then([this] (fragmented_temporary_buffer buf) {
auto linearization_buffer = bytes_ostream();
int32_t uncomp_len = request_reader(buf.get_istream(), linearization_buffer).read_int();
if (uncomp_len < 0) {
throw std::runtime_error("CQL frame uncompressed length is negative: " + std::to_string(uncomp_len));
}
buf.remove_prefix(4);
auto in = input_buffer.get_linearized_view(fragmented_temporary_buffer::view(buf));
auto uncomp = output_buffer.make_fragmented_temporary_buffer(uncomp_len, fragmented_temporary_buffer::default_fragment_size, [&] (bytes_mutable_view out) {
auto ret = LZ4_decompress_safe(reinterpret_cast<const char*>(in.data()), reinterpret_cast<char*>(out.data()),
in.size(), out.size());
if (ret < 0) {
throw std::runtime_error("CQL frame LZ4 uncompression failure");
}
return out.size();
});
on_compression_buffer_use();
return uncomp;
});
} else if (_compression == cql_compression::snappy) {
return _buffer_reader.read_exactly(_read_buf, length).then([this] (fragmented_temporary_buffer buf) {
auto in = input_buffer.get_linearized_view(fragmented_temporary_buffer::view(buf));
size_t uncomp_len;
if (snappy_uncompressed_length(reinterpret_cast<const char*>(in.data()), in.size(), &uncomp_len) != SNAPPY_OK) {
throw std::runtime_error("CQL frame Snappy uncompressed size is unknown");
}
auto uncomp = output_buffer.make_fragmented_temporary_buffer(uncomp_len, fragmented_temporary_buffer::default_fragment_size, [&] (bytes_mutable_view out) {
size_t output_len = out.size();
if (snappy_uncompress(reinterpret_cast<const char*>(in.data()), in.size(), reinterpret_cast<char*>(out.data()), &output_len) != SNAPPY_OK) {
throw std::runtime_error("CQL frame Snappy uncompression failure");
}
return output_len;
});
on_compression_buffer_use();
return uncomp;
});
} else {
throw exceptions::protocol_exception(format("Unknown compression algorithm"));
}
}
return _buffer_reader.read_exactly(_read_buf, length);
}
| null | null | 195,628
|
259153815550716489753062819304574368086
| 50
|
transport/server.cc: Return correct size of decompressed lz4 buffer
An incorrect size is returned from the function, which could lead to
crashes or undefined behavior. Fix by erroring out in these cases.
Fixes #11476
|
other
|
tensorflow
|
a5b89cd68c02329d793356bda85d079e9e69b4e7
| 1
|
Status GetDeviceForInput(const EagerOperation& op, const EagerContext& ctx,
TensorHandle* tensor_handle, Device** result) {
Device* cpu_device = ctx.HostCPU();
string device_name;
if (tensor_handle->Type() != TensorHandle::LOCAL) {
Device* device = tensor_handle->device();
device_name = device != nullptr ? device->name() : cpu_device->name();
*result = (device == nullptr ? cpu_device : device);
} else if (tensor_handle->dtype == DT_RESOURCE) {
// Use the resource's actual device because it is the device that will
// influence partitioning the multi-device function.
const Tensor* tensor;
// TODO(fishx): Avoid blocking here.
TF_RETURN_IF_ERROR(tensor_handle->Tensor(&tensor));
const ResourceHandle& handle = tensor->flat<ResourceHandle>()(0);
device_name = handle.device();
Device* input_device;
TF_RETURN_IF_ERROR(
ctx.FindDeviceFromName(device_name.c_str(), &input_device));
*result = input_device;
} else {
Device* device = tensor_handle->device();
const bool is_tpu = device != nullptr && device->device_type() == "TPU";
// int32 return values can be placed on TPUs.
const bool use_host_memory =
is_tpu ? MTypeFromDTypeIntsOnDevice(tensor_handle->dtype)
: MTypeFromDType(tensor_handle->dtype);
if (use_host_memory) {
*result = cpu_device;
} else {
// Eager ops executing as functions should have their preferred inputs set
// to the op's device. This allows us to avoid expensive D2H copies if a
// mirror of the tensor already exists on the op's device.
if (!op.is_function() && device != nullptr && device != cpu_device) {
device = absl::get<Device*>(op.Device());
}
*result = (device == nullptr ? cpu_device : device);
}
}
return Status::OK();
}
| null | null | 195,629
|
313132395886519540072250498373398294291
| 42
|
Fix empty resource handle vulnerability.
Some ops that attempt to extract a resource handle from user input
can lead to nullptr dereferences. This returns an error in such
a case.
PiperOrigin-RevId: 445571938
|
other
|
glewlwyd
|
4c5597c155bfbaf6491cf6b83479d241ae66940a
| 1
|
static int check_assertion(struct config_module * config, json_t * j_params, const char * username, json_t * j_scheme_data, json_t * j_assertion) {
int ret, res;
unsigned char * client_data = NULL, * challenge_b64 = NULL, * auth_data = NULL, rpid_hash[32] = {0}, * flags, cdata_hash[32] = {0},
data_signed[128] = {0}, sig[128] = {0}, * counter;
char * challenge_hash = NULL;
const char * rpid = NULL;
size_t client_data_len, challenge_b64_len, auth_data_len, rpid_hash_len = 32, cdata_hash_len = 32, sig_len = 128, counter_value = 0, rpid_len = 0;
json_t * j_client_data = NULL, * j_credential = NULL, * j_query;
gnutls_pubkey_t pubkey = NULL;
gnutls_datum_t pubkey_dat, data, signature;
if (j_scheme_data != NULL && j_assertion != NULL) {
do {
ret = G_OK;
if (!json_is_string(json_object_get(json_object_get(j_scheme_data, "credential"), "rawId")) || !json_string_length(json_object_get(json_object_get(j_scheme_data, "credential"), "rawId"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - rawId missing");
ret = G_ERROR_PARAM;
break;
}
j_credential = get_credential(config, j_params, username, json_string_value(json_object_get(json_object_get(j_scheme_data, "credential"), "rawId")));
if (check_result_value(j_credential, G_ERROR_NOT_FOUND)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - credential ID not found");
ret = G_ERROR_UNAUTHORIZED;
break;
}
if (!json_is_string(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "clientDataJSON")) || !json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "clientDataJSON"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON mandatory");
ret = G_ERROR_PARAM;
break;
}
if ((client_data = o_malloc(json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "clientDataJSON"))+1)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error allocating resources for client_data");
ret = G_ERROR_MEMORY;
break;
}
if (!o_base64_decode((const unsigned char *)json_string_value(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "clientDataJSON")), json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "clientDataJSON")), client_data, &client_data_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error o_base64_decode client_data");
ret = G_ERROR_PARAM;
break;
}
client_data[client_data_len] = '\0';
j_client_data = json_loads((const char *)client_data, JSON_DECODE_ANY, NULL);
if (j_client_data == NULL) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error parsing JSON client data %s", client_data);
ret = G_ERROR_PARAM;
break;
}
// Step 7
if (0 != o_strcmp("webauthn.get", json_string_value(json_object_get(j_client_data, "type")))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.type invalid");
ret = G_ERROR_PARAM;
break;
}
// Step 8
if (!json_string_length(json_object_get(j_client_data, "challenge"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.challenge mandatory");
ret = G_ERROR_PARAM;
break;
}
if ((challenge_b64 = o_malloc(json_string_length(json_object_get(j_client_data, "challenge"))+3)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "register_new_attestation - Error allocating resources for challenge_b64");
ret = G_ERROR_MEMORY;
break;
}
if (!o_base64url_2_base64((unsigned char *)json_string_value(json_object_get(j_client_data, "challenge")), json_string_length(json_object_get(j_client_data, "challenge")), challenge_b64, &challenge_b64_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.challenge invalid base64");
ret = G_ERROR_PARAM;
break;
}
challenge_b64[challenge_b64_len] = '\0';
if ((challenge_hash = generate_hash(config->hash_algorithm, (const char *)challenge_b64)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "register_new_attestation - Error generate_hash for challenge_b64");
ret = G_ERROR;
break;
}
if (0 != o_strcmp(challenge_hash, json_string_value(json_object_get(j_assertion, "challenge_hash")))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.challenge invalid");
ret = G_ERROR_PARAM;
break;
}
// Step 9
if (!json_string_length(json_object_get(j_client_data, "origin"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.origin mandatory");
ret = G_ERROR_PARAM;
break;
}
if (0 != o_strcmp(json_string_value(json_object_get(j_params, "rp-origin")), json_string_value(json_object_get(j_client_data, "origin")))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - clientDataJSON.origin invalid - Client send %s, required %s", json_string_value(json_object_get(j_client_data, "origin")), json_string_value(json_object_get(j_params, "rp-origin")));
ret = G_ERROR_PARAM;
break;
}
// Step 10 ??
// Step 11
if (!json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "authenticatorData"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - authenticatorData mandatory");
ret = G_ERROR_PARAM;
break;
}
if ((auth_data = o_malloc(json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "authenticatorData"))+1)) == NULL) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error allocating resources for auth_data");
ret = G_ERROR_PARAM;
break;
}
if (!o_base64_decode((const unsigned char *)json_string_value(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "authenticatorData")), json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "authenticatorData")), auth_data, &auth_data_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error o_base64_decode auth_data");
ret = G_ERROR_PARAM;
break;
}
if (auth_data_len < 37) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error authenticatorData invalid");
ret = G_ERROR_PARAM;
break;
}
if (o_strstr(json_string_value(json_object_get(j_params, "rp-origin")), "://") != NULL) {
rpid = o_strstr(json_string_value(json_object_get(j_params, "rp-origin")), "://")+3;
} else {
rpid = json_string_value(json_object_get(j_params, "rp-origin"));
}
if (o_strchr(rpid, ':') != NULL) {
rpid_len = o_strchr(rpid, ':') - rpid;
} else {
rpid_len = o_strlen(rpid);
}
if (!generate_digest_raw(digest_SHA256, (unsigned char *)rpid, rpid_len, rpid_hash, &rpid_hash_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error generate_digest_raw for rpid_hash");
ret = G_ERROR_PARAM;
break;
}
if (0 != memcmp(auth_data, rpid_hash, rpid_hash_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - authData.rpIdHash invalid");
ret = G_ERROR_PARAM;
break;
}
flags = auth_data + FLAGS_OFFSET;
// Step 12
if (!(*flags & FLAG_USER_PRESENT)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - authData.userPresent not set");
ret = G_ERROR_PARAM;
break;
}
// Step 13 ignored for now
//y_log_message(Y_LOG_LEVEL_DEBUG, "authData.userVerified: %d", !!(*flags & FLAG_USER_VERIFY));
// Step 14 ignored for now (no extension)
//y_log_message(Y_LOG_LEVEL_DEBUG, "authData.Extension: %d", !!(*flags & FLAG_ED));
// Step 15
if (!generate_digest_raw(digest_SHA256, client_data, client_data_len, cdata_hash, &cdata_hash_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error generate_digest_raw for cdata_hash");
ret = G_ERROR_PARAM;
break;
}
counter = auth_data + COUNTER_OFFSET;
counter_value = counter[3] | (counter[2] << 8) | (counter[1] << 16) | (counter[0] << 24);
if (gnutls_pubkey_init(&pubkey) < 0) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error gnutls_pubkey_init");
ret = G_ERROR;
break;
}
pubkey_dat.data = (unsigned char *)json_string_value(json_object_get(json_object_get(j_credential, "credential"), "public_key"));
pubkey_dat.size = json_string_length(json_object_get(json_object_get(j_credential, "credential"), "public_key"));
if ((ret = gnutls_pubkey_import(pubkey, &pubkey_dat, GNUTLS_X509_FMT_PEM)) < 0) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error gnutls_pubkey_import: %d", ret);
ret = G_ERROR;
break;
}
if (!o_base64url_decode((const unsigned char *)json_string_value(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "signature")), json_string_length(json_object_get(json_object_get(json_object_get(j_scheme_data, "credential"), "response"), "signature")), sig, &sig_len)) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Error o_base64url_decode signature");
ret = G_ERROR_PARAM;
break;
}
memcpy(data_signed, auth_data, auth_data_len);
memcpy(data_signed+auth_data_len, cdata_hash, cdata_hash_len);
// Let's verify sig over data_signed
data.data = data_signed;
data.size = (auth_data_len+cdata_hash_len);
signature.data = sig;
signature.size = sig_len;
if ((res = gnutls_pubkey_verify_data2(pubkey, GNUTLS_SIGN_ECDSA_SHA256, 0, &data, &signature)) < 0) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - Invalid signature: %d", res);
ret = G_ERROR_UNAUTHORIZED;
break;
}
if ((json_integer_value(json_object_get(json_object_get(j_credential, "credential"), "counter")) || counter_value) && counter_value <= (size_t)json_integer_value(json_object_get(json_object_get(j_credential, "credential"), "counter"))) {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_assertion - counter invalid");
ret = G_ERROR_UNAUTHORIZED;
break;
}
} while (0); // This is not a loop, but a structure where you can easily cancel the rest of the process with breaks
if (ret == G_OK) {
// Update assertion
j_query = json_pack("{sss{sisi}s{sO}}",
"table",
G_TABLE_WEBAUTHN_ASSERTION,
"set",
"gswa_counter",
counter_value,
"gswa_status",
1,
"where",
"gswa_id",
json_object_get(j_assertion, "gswa_id"));
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res != H_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error executing j_query (1)");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
ret = G_ERROR_DB;
} else {
// Update counter in credential if necessary
if (counter) {
j_query = json_pack("{sss{si}s{sO}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"set",
"gswc_counter",
counter_value,
"where",
"gswc_id",
json_object_get(json_object_get(j_credential, "credential"), "gswc_id"));
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res != H_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error executing j_query (2)");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
ret = G_ERROR_DB;
}
}
}
} else if (ret == G_ERROR_PARAM) {
j_query = json_pack("{sss{sisi}s{sO}}",
"table",
G_TABLE_WEBAUTHN_ASSERTION,
"set",
"gswa_counter",
counter_value,
"gswa_status",
2,
"where",
"gswa_id",
json_object_get(j_assertion, "gswa_id"));
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res != H_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error executing j_query (3)");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
ret = G_ERROR_DB;
}
} else {
j_query = json_pack("{sss{sisi}s{sO}}",
"table",
G_TABLE_WEBAUTHN_ASSERTION,
"set",
"gswa_counter",
counter_value,
"gswa_status",
3,
"where",
"gswa_id",
json_object_get(j_assertion, "gswa_id"));
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res != H_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_assertion - Error executing j_query (4)");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
ret = G_ERROR_DB;
}
}
o_free(client_data);
o_free(challenge_b64);
o_free(challenge_hash);
o_free(auth_data);
json_decref(j_client_data);
json_decref(j_credential);
gnutls_pubkey_deinit(pubkey);
} else {
ret = G_ERROR_PARAM;
}
return ret;
}
| null | null | 195,661
|
53228481570344946466629404953720331120
| 295
|
Fix possible buffer overflow
|
other
|
njs
|
2e00e95473861846aa8538be87db07699d9f676d
| 1
|
njs_array_prototype_splice(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
int64_t i, n, start, length, items, delta, delete;
njs_int_t ret;
njs_value_t *this, value, del_object;
njs_array_t *array, *deleted;
this = njs_argument(args, 0);
ret = njs_value_to_object(vm, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_object_length(vm, this, &length);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
ret = njs_value_to_integer(vm, njs_arg(args, nargs, 1), &start);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
start = (start < 0) ? njs_max(length + start, 0) : njs_min(start, length);
items = 0;
delete = 0;
if (nargs == 2) {
delete = length - start;
} else if (nargs > 2) {
items = nargs - 3;
ret = njs_value_to_integer(vm, njs_arg(args, nargs, 2), &delete);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
delete = njs_min(njs_max(delete, 0), length - start);
}
delta = items - delete;
if (njs_slow_path((length + delta) > NJS_MAX_LENGTH)) {
njs_type_error(vm, "Invalid length");
return NJS_ERROR;
}
/* TODO: ArraySpeciesCreate(). */
deleted = njs_array_alloc(vm, 0, delete, 0);
if (njs_slow_path(deleted == NULL)) {
return NJS_ERROR;
}
if (njs_fast_path(njs_is_fast_array(this) && deleted->object.fast_array)) {
array = njs_array(this);
for (i = 0, n = start; i < delete; i++, n++) {
deleted->start[i] = array->start[n];
}
} else {
njs_set_array(&del_object, deleted);
for (i = 0, n = start; i < delete; i++, n++) {
ret = njs_value_property_i64(vm, this, n, &value);
if (njs_slow_path(ret == NJS_ERROR)) {
return NJS_ERROR;
}
if (ret == NJS_OK) {
/* TODO: CreateDataPropertyOrThrow(). */
ret = njs_value_property_i64_set(vm, &del_object, i, &value);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
}
ret = njs_object_length_set(vm, &del_object, delete);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
}
if (njs_fast_path(njs_is_fast_array(this))) {
array = njs_array(this);
if (delta != 0) {
/*
* Relocate the rest of items.
* Index of the first item is in "n".
*/
if (delta > 0) {
ret = njs_array_expand(vm, array, 0, delta);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
ret = njs_array_copy_within(vm, this, start + items, start + delete,
array->length - (start + delete), 0);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
array->length += delta;
}
/* Copy new items. */
if (items > 0) {
memcpy(&array->start[start], &args[3],
items * sizeof(njs_value_t));
}
} else {
if (delta != 0) {
ret = njs_array_copy_within(vm, this, start + items, start + delete,
length - (start + delete), delta < 0);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
for (i = length - 1; i >= length + delta; i--) {
ret = njs_value_property_i64_delete(vm, this, i, NULL);
if (njs_slow_path(ret == NJS_ERROR)) {
return NJS_ERROR;
}
}
}
/* Copy new items. */
for (i = 3, n = start; items-- > 0; i++, n++) {
ret = njs_value_property_i64_set(vm, this, n, &args[i]);
if (njs_slow_path(ret == NJS_ERROR)) {
return NJS_ERROR;
}
}
ret = njs_object_length_set(vm, this, length + delta);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
}
njs_set_array(&vm->retval, deleted);
return NJS_OK;
}
| null | null | 195,665
|
245501748906215422216555410777131634826
| 155
|
Fixed Array.prototype.slice() with slow "this" argument.
Previously, when "this" argument was not a fast array, but the "deleted" array
was a fast array, the "deleted" array may be left in uninitialized state if
"this" argument had gaps.
This fix is to ensure that "deleted" is properly initialized.
This fixes #485 issue on Github.
|
other
|
mruby
|
38b164ace7d6ae1c367883a3d67d7f559783faad
| 1
|
gen_values(codegen_scope *s, node *t, int val, int limit)
{
int n = 0;
int first = 1;
int slimit = GEN_VAL_STACK_MAX;
if (limit == 0) limit = GEN_LIT_ARY_MAX;
if (cursp() >= slimit) slimit = INT16_MAX;
if (!val) {
while (t) {
codegen(s, t->car, NOVAL);
n++;
t = t->cdr;
}
return n;
}
while (t) {
int is_splat = nint(t->car->car) == NODE_SPLAT;
if (is_splat || n > limit || cursp() >= slimit) { /* flush stack */
pop_n(n);
if (first) {
if (n == 0) {
genop_1(s, OP_LOADNIL, cursp());
}
else {
genop_2(s, OP_ARRAY, cursp(), n);
}
push();
first = 0;
limit = GEN_LIT_ARY_MAX;
}
else if (n > 0) {
pop();
genop_2(s, OP_ARYPUSH, cursp(), n);
push();
}
n = 0;
}
codegen(s, t->car, val);
if (is_splat) {
pop(); pop();
genop_1(s, OP_ARYCAT, cursp());
push();
}
else {
n++;
}
t = t->cdr;
}
if (!first) {
pop();
if (n > 0) {
pop_n(n);
genop_2(s, OP_ARYPUSH, cursp(), n);
}
return -1; /* variable length */
}
return n;
}
| null | null | 195,668
|
65023506442959031648756130424951703203
| 62
|
codegen.c: fix a bug in `gen_values()`.
- Fix limit handling that fails 15 arguments method calls.
- Fix too early argument packing in arrays.
|
other
|
pjproject
|
856f87c2e97a27b256482dbe0d748b1194355a21
| 1
|
static pj_xml_node *xml_parse_node( pj_pool_t *pool, pj_scanner *scanner)
{
pj_xml_node *node;
pj_str_t end_name;
PJ_CHECK_STACK();
if (*scanner->curptr != '<')
on_syntax_error(scanner);
/* Handle Processing Instructino (PI) construct (i.e. "<?") */
if (*scanner->curptr == '<' && *(scanner->curptr+1) == '?') {
pj_scan_advance_n(scanner, 2, PJ_FALSE);
for (;;) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, '?', &dummy);
if (*scanner->curptr=='?' && *(scanner->curptr+1)=='>') {
pj_scan_advance_n(scanner, 2, PJ_TRUE);
break;
} else {
pj_scan_advance_n(scanner, 1, PJ_FALSE);
}
}
return xml_parse_node(pool, scanner);
}
/* Handle comments construct (i.e. "<!") */
if (pj_scan_strcmp(scanner, "<!", 2) == 0) {
pj_scan_advance_n(scanner, 2, PJ_FALSE);
for (;;) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, '>', &dummy);
if (pj_scan_strcmp(scanner, ">", 1) == 0) {
pj_scan_advance_n(scanner, 1, PJ_TRUE);
break;
} else {
pj_scan_advance_n(scanner, 1, PJ_FALSE);
}
}
return xml_parse_node(pool, scanner);
}
/* Alloc node. */
node = alloc_node(pool);
/* Get '<' */
pj_scan_get_char(scanner);
/* Get node name. */
pj_scan_get_until_chr( scanner, " />\t\r\n", &node->name);
/* Get attributes. */
while (*scanner->curptr != '>' && *scanner->curptr != '/') {
pj_xml_attr *attr = alloc_attr(pool);
pj_scan_get_until_chr( scanner, "=> \t\r\n", &attr->name);
if (*scanner->curptr == '=') {
pj_scan_get_char( scanner );
pj_scan_get_quotes(scanner, "\"'", "\"'", 2, &attr->value);
/* remove quote characters */
++attr->value.ptr;
attr->value.slen -= 2;
}
pj_list_push_back( &node->attr_head, attr );
}
if (*scanner->curptr == '/') {
pj_scan_get_char(scanner);
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
return node;
}
/* Enclosing bracket. */
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
/* Sub nodes. */
while (*scanner->curptr == '<' && *(scanner->curptr+1) != '/'
&& *(scanner->curptr+1) != '!')
{
pj_xml_node *sub_node = xml_parse_node(pool, scanner);
pj_list_push_back( &node->node_head, sub_node );
}
/* Content. */
if (!pj_scan_is_eof(scanner) && *scanner->curptr != '<') {
pj_scan_get_until_ch(scanner, '<', &node->content);
}
/* CDATA content. */
if (*scanner->curptr == '<' && *(scanner->curptr+1) == '!' &&
pj_scan_strcmp(scanner, "<![CDATA[", 9) == 0)
{
pj_scan_advance_n(scanner, 9, PJ_FALSE);
pj_scan_get_until_ch(scanner, ']', &node->content);
while (pj_scan_strcmp(scanner, "]]>", 3)) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, ']', &dummy);
}
node->content.slen = scanner->curptr - node->content.ptr;
pj_scan_advance_n(scanner, 3, PJ_TRUE);
}
/* Enclosing node. */
if (pj_scan_get_char(scanner) != '<' || pj_scan_get_char(scanner) != '/')
on_syntax_error(scanner);
pj_scan_get_until_chr(scanner, " \t>", &end_name);
/* Compare name. */
if (pj_stricmp(&node->name, &end_name) != 0)
on_syntax_error(scanner);
/* Enclosing '>' */
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
return node;
}
| null | null | 195,670
|
137312951595763938703339557990324963725
| 121
|
Merge pull request from GHSA-5x45-qp78-g4p4
* Prevent infinite loop in scanning xml content
* Simplify scanning method
* Optimization
|
other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.