| nwo
				 stringlengths 5 86 | sha
				 stringlengths 40 40 | path
				 stringlengths 4 189 | language
				 stringclasses 1
				value | identifier
				 stringlengths 1 94 | parameters
				 stringlengths 2 4.03k | argument_list
				 stringclasses 1
				value | return_statement
				 stringlengths 0 11.5k | docstring
				 stringlengths 1 33.2k | docstring_summary
				 stringlengths 0 5.15k | docstring_tokens
				 list | function
				 stringlengths 34 151k | function_tokens
				 list | url
				 stringlengths 90 278 | 
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 
	jeog/TDAmeritradeAPI | 
	91c738afd7d57b54f6231170bd64c2550fafd34d | 
	python/tdma_api/auth.py | 
	python | 
	set_certificate_bundle_path | 
	(path) | 
	Set certificate bundle file(.pem) path for ssl/tls host authentication.
    
    If library is built against default ssl/tls library the default certificate
    store should be used. If not(a connection error is returned) you'll have to 
    provide a certificate bundle to the connection libraries. 
    
    def set_certificate_bundle_path(path);
    
        path    ::  str  ::  path to the certificate bundle file(.pem)
        
        returns -> None
        throws  -> LibraryNotLoaded, CLibException | 
	Set certificate bundle file(.pem) path for ssl/tls host authentication.
    
    If library is built against default ssl/tls library the default certificate
    store should be used. If not(a connection error is returned) you'll have to 
    provide a certificate bundle to the connection libraries. 
    
    def set_certificate_bundle_path(path);
    
        path    ::  str  ::  path to the certificate bundle file(.pem)
        
        returns -> None
        throws  -> LibraryNotLoaded, CLibException | 
	[
  "Set",
  "certificate",
  "bundle",
  "file",
  "(",
  ".",
  "pem",
  ")",
  "path",
  "for",
  "ssl",
  "/",
  "tls",
  "host",
  "authentication",
  ".",
  "If",
  "library",
  "is",
  "built",
  "against",
  "default",
  "ssl",
  "/",
  "tls",
  "library",
  "the",
  "default",
  "certificate",
  "store",
  "should",
  "be",
  "used",
  ".",
  "If",
  "not",
  "(",
  "a",
  "connection",
  "error",
  "is",
  "returned",
  ")",
  "you",
  "ll",
  "have",
  "to",
  "provide",
  "a",
  "certificate",
  "bundle",
  "to",
  "the",
  "connection",
  "libraries",
  ".",
  "def",
  "set_certificate_bundle_path",
  "(",
  "path",
  ")",
  ";",
  "path",
  "::",
  "str",
  "::",
  "path",
  "to",
  "the",
  "certificate",
  "bundle",
  "file",
  "(",
  ".",
  "pem",
  ")",
  "returns",
  "-",
  ">",
  "None",
  "throws",
  "-",
  ">",
  "LibraryNotLoaded",
  "CLibException"
] | 
	def set_certificate_bundle_path(path):
    """Set certificate bundle file(.pem) path for ssl/tls host authentication.
    
    If library is built against default ssl/tls library the default certificate
    store should be used. If not(a connection error is returned) you'll have to 
    provide a certificate bundle to the connection libraries. 
    
    def set_certificate_bundle_path(path);
    
        path    ::  str  ::  path to the certificate bundle file(.pem)
        
        returns -> None
        throws  -> LibraryNotLoaded, CLibException
    """
    clib.set_str('SetCertificateBundlePath_ABI', path) | 
	[
  "def",
  "set_certificate_bundle_path",
  "(",
  "path",
  ")",
  ":",
  "clib",
  ".",
  "set_str",
  "(",
  "'SetCertificateBundlePath_ABI'",
  ",",
  "path",
  ")"
] | 
	https://github.com/jeog/TDAmeritradeAPI/blob/91c738afd7d57b54f6231170bd64c2550fafd34d/python/tdma_api/auth.py#L145-L160 | ||
| 
	su2code/SU2 | 
	72b2fa977b64b9683a388920f05298a40d39e5c5 | 
	SU2_PY/SU2/opt/scipy_tools.py | 
	python | 
	obj_df | 
	(x,project) | 
	return dobj | 
	dobj = obj_df(x,project)
        
        Objective Function Gradients
        SU2 Project interface to scipy.fmin_slsqp
        
        su2:         df(x), list[nobj x dim]
        scipy_slsqp: df(x), ndarray[dim] | 
	dobj = obj_df(x,project)
        
        Objective Function Gradients
        SU2 Project interface to scipy.fmin_slsqp
        
        su2:         df(x), list[nobj x dim]
        scipy_slsqp: df(x), ndarray[dim] | 
	[
  "dobj",
  "=",
  "obj_df",
  "(",
  "x",
  "project",
  ")",
  "Objective",
  "Function",
  "Gradients",
  "SU2",
  "Project",
  "interface",
  "to",
  "scipy",
  ".",
  "fmin_slsqp",
  "su2",
  ":",
  "df",
  "(",
  "x",
  ")",
  "list",
  "[",
  "nobj",
  "x",
  "dim",
  "]",
  "scipy_slsqp",
  ":",
  "df",
  "(",
  "x",
  ")",
  "ndarray",
  "[",
  "dim",
  "]"
] | 
	def obj_df(x,project):
    """ dobj = obj_df(x,project)
        
        Objective Function Gradients
        SU2 Project interface to scipy.fmin_slsqp
        
        su2:         df(x), list[nobj x dim]
        scipy_slsqp: df(x), ndarray[dim]
    """    
    
    dobj_list = project.obj_df(x)
    dobj=[0.0]*len(dobj_list[0])
    
    for this_dobj in dobj_list:
        idv=0
        for this_dv_dobj in this_dobj:
            dobj[idv] = dobj[idv]+this_dv_dobj;
            idv+=1
    dobj = array( dobj )
    
    return dobj | 
	[
  "def",
  "obj_df",
  "(",
  "x",
  ",",
  "project",
  ")",
  ":",
  "dobj_list",
  "=",
  "project",
  ".",
  "obj_df",
  "(",
  "x",
  ")",
  "dobj",
  "=",
  "[",
  "0.0",
  "]",
  "*",
  "len",
  "(",
  "dobj_list",
  "[",
  "0",
  "]",
  ")",
  "for",
  "this_dobj",
  "in",
  "dobj_list",
  ":",
  "idv",
  "=",
  "0",
  "for",
  "this_dv_dobj",
  "in",
  "this_dobj",
  ":",
  "dobj",
  "[",
  "idv",
  "]",
  "=",
  "dobj",
  "[",
  "idv",
  "]",
  "+",
  "this_dv_dobj",
  "idv",
  "+=",
  "1",
  "dobj",
  "=",
  "array",
  "(",
  "dobj",
  ")",
  "return",
  "dobj"
] | 
	https://github.com/su2code/SU2/blob/72b2fa977b64b9683a388920f05298a40d39e5c5/SU2_PY/SU2/opt/scipy_tools.py#L390-L410 | |
| 
	hanpfei/chromium-net | 
	392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | 
	third_party/catapult/third_party/apiclient/googleapiclient/model.py | 
	python | 
	BaseModel.deserialize | 
	(self, content) | 
	Perform the actual deserialization from response string to Python
    object.
    Args:
      content: string, the body of the HTTP response
    Returns:
      The body de-serialized as a Python object. | 
	Perform the actual deserialization from response string to Python
    object. | 
	[
  "Perform",
  "the",
  "actual",
  "deserialization",
  "from",
  "response",
  "string",
  "to",
  "Python",
  "object",
  "."
] | 
	def deserialize(self, content):
    """Perform the actual deserialization from response string to Python
    object.
    Args:
      content: string, the body of the HTTP response
    Returns:
      The body de-serialized as a Python object.
    """
    _abstract() | 
	[
  "def",
  "deserialize",
  "(",
  "self",
  ",",
  "content",
  ")",
  ":",
  "_abstract",
  "(",
  ")"
] | 
	https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/apiclient/googleapiclient/model.py#L223-L233 | ||
| 
	nucleic/atom | 
	9f0cb2a8101dd63c354a98ebc7489b2c616dc82a | 
	examples/tutorial/person.py | 
	python | 
	Person.debug_print | 
	(self, change) | 
	Prints out a debug message whenever the person's age changes. | 
	Prints out a debug message whenever the person's age changes. | 
	[
  "Prints",
  "out",
  "a",
  "debug",
  "message",
  "whenever",
  "the",
  "person",
  "s",
  "age",
  "changes",
  "."
] | 
	def debug_print(self, change):
        """Prints out a debug message whenever the person's age changes."""
        if self.debug:
            templ = "{first} {last} is {age} years old."
            s = templ.format(
                first=self.first_name,
                last=self.last_name,
                age=self.age,
            )
            print(s) | 
	[
  "def",
  "debug_print",
  "(",
  "self",
  ",",
  "change",
  ")",
  ":",
  "if",
  "self",
  ".",
  "debug",
  ":",
  "templ",
  "=",
  "\"{first} {last} is {age} years old.\"",
  "s",
  "=",
  "templ",
  ".",
  "format",
  "(",
  "first",
  "=",
  "self",
  ".",
  "first_name",
  ",",
  "last",
  "=",
  "self",
  ".",
  "last_name",
  ",",
  "age",
  "=",
  "self",
  ".",
  "age",
  ",",
  ")",
  "print",
  "(",
  "s",
  ")"
] | 
	https://github.com/nucleic/atom/blob/9f0cb2a8101dd63c354a98ebc7489b2c616dc82a/examples/tutorial/person.py#L26-L35 | ||
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/jedi/jedi/evaluate/arguments.py | 
	python | 
	try_iter_content | 
	(types, depth=0) | 
	Helper method for static analysis. | 
	Helper method for static analysis. | 
	[
  "Helper",
  "method",
  "for",
  "static",
  "analysis",
  "."
] | 
	def try_iter_content(types, depth=0):
    """Helper method for static analysis."""
    if depth > 10:
        # It's possible that a loop has references on itself (especially with
        # CompiledObject). Therefore don't loop infinitely.
        return
    for typ in types:
        try:
            f = typ.py__iter__
        except AttributeError:
            pass
        else:
            for lazy_context in f():
                try_iter_content(lazy_context.infer(), depth + 1) | 
	[
  "def",
  "try_iter_content",
  "(",
  "types",
  ",",
  "depth",
  "=",
  "0",
  ")",
  ":",
  "if",
  "depth",
  ">",
  "10",
  ":",
  "# It's possible that a loop has references on itself (especially with",
  "# CompiledObject). Therefore don't loop infinitely.",
  "return",
  "for",
  "typ",
  "in",
  "types",
  ":",
  "try",
  ":",
  "f",
  "=",
  "typ",
  ".",
  "py__iter__",
  "except",
  "AttributeError",
  ":",
  "pass",
  "else",
  ":",
  "for",
  "lazy_context",
  "in",
  "f",
  "(",
  ")",
  ":",
  "try_iter_content",
  "(",
  "lazy_context",
  ".",
  "infer",
  "(",
  ")",
  ",",
  "depth",
  "+",
  "1",
  ")"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/jedi/jedi/evaluate/arguments.py#L16-L30 | ||
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_carbon/_misc.py | 
	python | 
	DateSpan.SetWeeks | 
	(*args, **kwargs) | 
	return _misc_.DateSpan_SetWeeks(*args, **kwargs) | 
	SetWeeks(self, int n) -> DateSpan | 
	SetWeeks(self, int n) -> DateSpan | 
	[
  "SetWeeks",
  "(",
  "self",
  "int",
  "n",
  ")",
  "-",
  ">",
  "DateSpan"
] | 
	def SetWeeks(*args, **kwargs):
        """SetWeeks(self, int n) -> DateSpan"""
        return _misc_.DateSpan_SetWeeks(*args, **kwargs) | 
	[
  "def",
  "SetWeeks",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_misc_",
  ".",
  "DateSpan_SetWeeks",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L4661-L4663 | |
| 
	hanpfei/chromium-net | 
	392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | 
	third_party/catapult/third_party/gsutil/third_party/boto/boto/swf/layer1.py | 
	python | 
	Layer1.describe_activity_type | 
	(self, domain, activity_name, activity_version) | 
	return self.json_request('DescribeActivityType', {
            'domain': domain,
            'activityType': {'name': activity_name,
                             'version': activity_version}
        }) | 
	Returns information about the specified activity type. This
        includes configuration settings provided at registration time
        as well as other general information about the type.
        :type domain: string
        :param domain: The name of the domain in which the activity
            type is registered.
        :type activity_name: string
        :param activity_name: The name of this activity.
        :type activity_version: string
        :param activity_version: The version of this activity.
        :raises: UnknownResourceFault, SWFOperationNotPermittedError | 
	Returns information about the specified activity type. This
        includes configuration settings provided at registration time
        as well as other general information about the type. | 
	[
  "Returns",
  "information",
  "about",
  "the",
  "specified",
  "activity",
  "type",
  ".",
  "This",
  "includes",
  "configuration",
  "settings",
  "provided",
  "at",
  "registration",
  "time",
  "as",
  "well",
  "as",
  "other",
  "general",
  "information",
  "about",
  "the",
  "type",
  "."
] | 
	def describe_activity_type(self, domain, activity_name, activity_version):
        """
        Returns information about the specified activity type. This
        includes configuration settings provided at registration time
        as well as other general information about the type.
        :type domain: string
        :param domain: The name of the domain in which the activity
            type is registered.
        :type activity_name: string
        :param activity_name: The name of this activity.
        :type activity_version: string
        :param activity_version: The version of this activity.
        :raises: UnknownResourceFault, SWFOperationNotPermittedError
        """
        return self.json_request('DescribeActivityType', {
            'domain': domain,
            'activityType': {'name': activity_name,
                             'version': activity_version}
        }) | 
	[
  "def",
  "describe_activity_type",
  "(",
  "self",
  ",",
  "domain",
  ",",
  "activity_name",
  ",",
  "activity_version",
  ")",
  ":",
  "return",
  "self",
  ".",
  "json_request",
  "(",
  "'DescribeActivityType'",
  ",",
  "{",
  "'domain'",
  ":",
  "domain",
  ",",
  "'activityType'",
  ":",
  "{",
  "'name'",
  ":",
  "activity_name",
  ",",
  "'version'",
  ":",
  "activity_version",
  "}",
  "}",
  ")"
] | 
	https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/swf/layer1.py#L909-L931 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_cocoa/wizard.py | 
	python | 
	PyWizardPage.DoGetSize | 
	(*args, **kwargs) | 
	return _wizard.PyWizardPage_DoGetSize(*args, **kwargs) | 
	DoGetSize() -> (width, height) | 
	DoGetSize() -> (width, height) | 
	[
  "DoGetSize",
  "()",
  "-",
  ">",
  "(",
  "width",
  "height",
  ")"
] | 
	def DoGetSize(*args, **kwargs):
        """DoGetSize() -> (width, height)"""
        return _wizard.PyWizardPage_DoGetSize(*args, **kwargs) | 
	[
  "def",
  "DoGetSize",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_wizard",
  ".",
  "PyWizardPage_DoGetSize",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/wizard.py#L167-L169 | |
| 
	hanpfei/chromium-net | 
	392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | 
	third_party/catapult/third_party/closure_linter/closure_linter/statetracker.py | 
	python | 
	DocComment.GetTargetToken | 
	(self) | 
	Get this comment's target token.
    Returns:
      The token that is the target of this comment, or None if there isn't one. | 
	Get this comment's target token. | 
	[
  "Get",
  "this",
  "comment",
  "s",
  "target",
  "token",
  "."
] | 
	def GetTargetToken(self):
    """Get this comment's target token.
    Returns:
      The token that is the target of this comment, or None if there isn't one.
    """
    # File overviews describe the file, not a token.
    if self.HasFlag('fileoverview'):
      return
    skip_types = frozenset([
        Type.WHITESPACE,
        Type.BLANK_LINE,
        Type.START_PAREN])
    target_types = frozenset([
        Type.FUNCTION_NAME,
        Type.IDENTIFIER,
        Type.SIMPLE_LVALUE])
    token = self.end_token.next
    while token:
      if token.type in target_types:
        return token
      # Handles the case of a comment on "var foo = ...'
      if token.IsKeyword('var'):
        next_code_token = tokenutil.CustomSearch(
            token,
            lambda t: t.type not in Type.NON_CODE_TYPES)
        if (next_code_token and
            next_code_token.IsType(Type.SIMPLE_LVALUE)):
          return next_code_token
        return
      # Handles the case of a comment on "function foo () {}"
      if token.type is Type.FUNCTION_DECLARATION:
        next_code_token = tokenutil.CustomSearch(
            token,
            lambda t: t.type not in Type.NON_CODE_TYPES)
        if next_code_token.IsType(Type.FUNCTION_NAME):
          return next_code_token
        return
      # Skip types will end the search.
      if token.type not in skip_types:
        return
      token = token.next | 
	[
  "def",
  "GetTargetToken",
  "(",
  "self",
  ")",
  ":",
  "# File overviews describe the file, not a token.",
  "if",
  "self",
  ".",
  "HasFlag",
  "(",
  "'fileoverview'",
  ")",
  ":",
  "return",
  "skip_types",
  "=",
  "frozenset",
  "(",
  "[",
  "Type",
  ".",
  "WHITESPACE",
  ",",
  "Type",
  ".",
  "BLANK_LINE",
  ",",
  "Type",
  ".",
  "START_PAREN",
  "]",
  ")",
  "target_types",
  "=",
  "frozenset",
  "(",
  "[",
  "Type",
  ".",
  "FUNCTION_NAME",
  ",",
  "Type",
  ".",
  "IDENTIFIER",
  ",",
  "Type",
  ".",
  "SIMPLE_LVALUE",
  "]",
  ")",
  "token",
  "=",
  "self",
  ".",
  "end_token",
  ".",
  "next",
  "while",
  "token",
  ":",
  "if",
  "token",
  ".",
  "type",
  "in",
  "target_types",
  ":",
  "return",
  "token",
  "# Handles the case of a comment on \"var foo = ...'",
  "if",
  "token",
  ".",
  "IsKeyword",
  "(",
  "'var'",
  ")",
  ":",
  "next_code_token",
  "=",
  "tokenutil",
  ".",
  "CustomSearch",
  "(",
  "token",
  ",",
  "lambda",
  "t",
  ":",
  "t",
  ".",
  "type",
  "not",
  "in",
  "Type",
  ".",
  "NON_CODE_TYPES",
  ")",
  "if",
  "(",
  "next_code_token",
  "and",
  "next_code_token",
  ".",
  "IsType",
  "(",
  "Type",
  ".",
  "SIMPLE_LVALUE",
  ")",
  ")",
  ":",
  "return",
  "next_code_token",
  "return",
  "# Handles the case of a comment on \"function foo () {}\"",
  "if",
  "token",
  ".",
  "type",
  "is",
  "Type",
  ".",
  "FUNCTION_DECLARATION",
  ":",
  "next_code_token",
  "=",
  "tokenutil",
  ".",
  "CustomSearch",
  "(",
  "token",
  ",",
  "lambda",
  "t",
  ":",
  "t",
  ".",
  "type",
  "not",
  "in",
  "Type",
  ".",
  "NON_CODE_TYPES",
  ")",
  "if",
  "next_code_token",
  ".",
  "IsType",
  "(",
  "Type",
  ".",
  "FUNCTION_NAME",
  ")",
  ":",
  "return",
  "next_code_token",
  "return",
  "# Skip types will end the search.",
  "if",
  "token",
  ".",
  "type",
  "not",
  "in",
  "skip_types",
  ":",
  "return",
  "token",
  "=",
  "token",
  ".",
  "next"
] | 
	https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/statetracker.py#L452-L505 | ||
| 
	nlohmann/json | 
	eb2182414749825be086c825edb5229e5c28503d | 
	third_party/cpplint/cpplint.py | 
	python | 
	IsBlankLine | 
	(line) | 
	return not line or line.isspace() | 
	Returns true if the given line is blank.
  We consider a line to be blank if the line is empty or consists of
  only white spaces.
  Args:
    line: A line of a string.
  Returns:
    True, if the given line is blank. | 
	Returns true if the given line is blank. | 
	[
  "Returns",
  "true",
  "if",
  "the",
  "given",
  "line",
  "is",
  "blank",
  "."
] | 
	def IsBlankLine(line):
  """Returns true if the given line is blank.
  We consider a line to be blank if the line is empty or consists of
  only white spaces.
  Args:
    line: A line of a string.
  Returns:
    True, if the given line is blank.
  """
  return not line or line.isspace() | 
	[
  "def",
  "IsBlankLine",
  "(",
  "line",
  ")",
  ":",
  "return",
  "not",
  "line",
  "or",
  "line",
  ".",
  "isspace",
  "(",
  ")"
] | 
	https://github.com/nlohmann/json/blob/eb2182414749825be086c825edb5229e5c28503d/third_party/cpplint/cpplint.py#L3513-L3525 | |
| 
	ricardoquesada/Spidermonkey | 
	4a75ea2543408bd1b2c515aa95901523eeef7858 | 
	dom/bindings/parser/WebIDL.py | 
	python | 
	Parser.p_Ellipsis | 
	(self, p) | 
	Ellipsis : ELLIPSIS | 
	Ellipsis : ELLIPSIS | 
	[
  "Ellipsis",
  ":",
  "ELLIPSIS"
] | 
	def p_Ellipsis(self, p):
        """
            Ellipsis : ELLIPSIS
        """
        p[0] = True | 
	[
  "def",
  "p_Ellipsis",
  "(",
  "self",
  ",",
  "p",
  ")",
  ":",
  "p",
  "[",
  "0",
  "]",
  "=",
  "True"
] | 
	https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/dom/bindings/parser/WebIDL.py#L5016-L5020 | ||
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_toolbelt/multipart/encoder.py | 
	python | 
	Part.from_field | 
	(cls, field, encoding) | 
	return cls(headers, body) | 
	Create a part from a Request Field generated by urllib3. | 
	Create a part from a Request Field generated by urllib3. | 
	[
  "Create",
  "a",
  "part",
  "from",
  "a",
  "Request",
  "Field",
  "generated",
  "by",
  "urllib3",
  "."
] | 
	def from_field(cls, field, encoding):
        """Create a part from a Request Field generated by urllib3."""
        headers = encode_with(field.render_headers(), encoding)
        body = coerce_data(field.data, encoding)
        return cls(headers, body) | 
	[
  "def",
  "from_field",
  "(",
  "cls",
  ",",
  "field",
  ",",
  "encoding",
  ")",
  ":",
  "headers",
  "=",
  "encode_with",
  "(",
  "field",
  ".",
  "render_headers",
  "(",
  ")",
  ",",
  "encoding",
  ")",
  "body",
  "=",
  "coerce_data",
  "(",
  "field",
  ".",
  "data",
  ",",
  "encoding",
  ")",
  "return",
  "cls",
  "(",
  "headers",
  ",",
  "body",
  ")"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_toolbelt/multipart/encoder.py#L485-L489 | |
| 
	Illumina/strelka | 
	d7377443b62319f7c7bd70c241c4b2df3459e29a | 
	src/python/lib/configureUtil.py | 
	python | 
	validateFixExistingDirArg | 
	(argDir,label) | 
	return _validateFixArgHelper(argDir,label,os.path.isdir) | 
	convert directory arg to absolute path and check that it exists | 
	convert directory arg to absolute path and check that it exists | 
	[
  "convert",
  "directory",
  "arg",
  "to",
  "absolute",
  "path",
  "and",
  "check",
  "that",
  "it",
  "exists"
] | 
	def validateFixExistingDirArg(argDir,label) :
    """
    convert directory arg to absolute path and check that it exists
    """
    return _validateFixArgHelper(argDir,label,os.path.isdir) | 
	[
  "def",
  "validateFixExistingDirArg",
  "(",
  "argDir",
  ",",
  "label",
  ")",
  ":",
  "return",
  "_validateFixArgHelper",
  "(",
  "argDir",
  ",",
  "label",
  ",",
  "os",
  ".",
  "path",
  ".",
  "isdir",
  ")"
] | 
	https://github.com/Illumina/strelka/blob/d7377443b62319f7c7bd70c241c4b2df3459e29a/src/python/lib/configureUtil.py#L186-L190 | |
| 
	ApolloAuto/apollo | 
	463fb82f9e979d02dcb25044e60931293ab2dba0 | 
	scripts/record_map_data.py | 
	python | 
	ArgManager.args | 
	(self) | 
	return self._args | 
	Get parsed args. | 
	Get parsed args. | 
	[
  "Get",
  "parsed",
  "args",
  "."
] | 
	def args(self):
        """Get parsed args."""
        if self._args is None:
            self._args = self.parser.parse_args()
        return self._args | 
	[
  "def",
  "args",
  "(",
  "self",
  ")",
  ":",
  "if",
  "self",
  ".",
  "_args",
  "is",
  "None",
  ":",
  "self",
  ".",
  "_args",
  "=",
  "self",
  ".",
  "parser",
  ".",
  "parse_args",
  "(",
  ")",
  "return",
  "self",
  ".",
  "_args"
] | 
	https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/scripts/record_map_data.py#L94-L98 | |
| 
	OSGeo/gdal | 
	3748fc4ba4fba727492774b2b908a2130c864a83 | 
	swig/python/osgeo/ogr.py | 
	python | 
	MajorObject.SetMetadata | 
	(self, *args) | 
	return _ogr.MajorObject_SetMetadata(self, *args) | 
	r"""
        SetMetadata(MajorObject self, char ** papszMetadata, char const * pszDomain="") -> CPLErr
        SetMetadata(MajorObject self, char * pszMetadataString, char const * pszDomain="") -> CPLErr | 
	r"""
        SetMetadata(MajorObject self, char ** papszMetadata, char const * pszDomain="") -> CPLErr
        SetMetadata(MajorObject self, char * pszMetadataString, char const * pszDomain="") -> CPLErr | 
	[
  "r",
  "SetMetadata",
  "(",
  "MajorObject",
  "self",
  "char",
  "**",
  "papszMetadata",
  "char",
  "const",
  "*",
  "pszDomain",
  "=",
  ")",
  "-",
  ">",
  "CPLErr",
  "SetMetadata",
  "(",
  "MajorObject",
  "self",
  "char",
  "*",
  "pszMetadataString",
  "char",
  "const",
  "*",
  "pszDomain",
  "=",
  ")",
  "-",
  ">",
  "CPLErr"
] | 
	def SetMetadata(self, *args):
        r"""
        SetMetadata(MajorObject self, char ** papszMetadata, char const * pszDomain="") -> CPLErr
        SetMetadata(MajorObject self, char * pszMetadataString, char const * pszDomain="") -> CPLErr
        """
        return _ogr.MajorObject_SetMetadata(self, *args) | 
	[
  "def",
  "SetMetadata",
  "(",
  "self",
  ",",
  "*",
  "args",
  ")",
  ":",
  "return",
  "_ogr",
  ".",
  "MajorObject_SetMetadata",
  "(",
  "self",
  ",",
  "*",
  "args",
  ")"
] | 
	https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/ogr.py#L436-L441 | |
| 
	panda3d/panda3d | 
	833ad89ebad58395d0af0b7ec08538e5e4308265 | 
	samples/networking/03-distributed-node/AIDGameObject.py | 
	python | 
	AIDGameObject.announceGenerate | 
	(self) | 
	The AI has created this object, so we send it's distributed object ID
        over to the client.  That way the client can actually grab the object
        and use it to communicate with the AI.  Alternatively store it in the
        Client Repository in self.cr | 
	The AI has created this object, so we send it's distributed object ID
        over to the client.  That way the client can actually grab the object
        and use it to communicate with the AI.  Alternatively store it in the
        Client Repository in self.cr | 
	[
  "The",
  "AI",
  "has",
  "created",
  "this",
  "object",
  "so",
  "we",
  "send",
  "it",
  "s",
  "distributed",
  "object",
  "ID",
  "over",
  "to",
  "the",
  "client",
  ".",
  "That",
  "way",
  "the",
  "client",
  "can",
  "actually",
  "grab",
  "the",
  "object",
  "and",
  "use",
  "it",
  "to",
  "communicate",
  "with",
  "the",
  "AI",
  ".",
  "Alternatively",
  "store",
  "it",
  "in",
  "the",
  "Client",
  "Repository",
  "in",
  "self",
  ".",
  "cr"
] | 
	def announceGenerate(self):
        """ The AI has created this object, so we send it's distributed object ID
        over to the client.  That way the client can actually grab the object
        and use it to communicate with the AI.  Alternatively store it in the
        Client Repository in self.cr """
        base.messenger.send(self.cr.uniqueName('AIDGameObjectGenerated'), [self.doId])
        # call the base class method
        DistributedObject.announceGenerate(self) | 
	[
  "def",
  "announceGenerate",
  "(",
  "self",
  ")",
  ":",
  "base",
  ".",
  "messenger",
  ".",
  "send",
  "(",
  "self",
  ".",
  "cr",
  ".",
  "uniqueName",
  "(",
  "'AIDGameObjectGenerated'",
  ")",
  ",",
  "[",
  "self",
  ".",
  "doId",
  "]",
  ")",
  "# call the base class method",
  "DistributedObject",
  ".",
  "announceGenerate",
  "(",
  "self",
  ")"
] | 
	https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/samples/networking/03-distributed-node/AIDGameObject.py#L10-L17 | ||
| 
	ChromiumWebApps/chromium | 
	c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | 
	tools/clang/scripts/run_tool.py | 
	python | 
	_CompilerDispatcher.__ProcessResult | 
	(self, result) | 
	Handles result processing.
    Args:
      result: The result dictionary returned by _ExecuteTool. | 
	Handles result processing. | 
	[
  "Handles",
  "result",
  "processing",
  "."
] | 
	def __ProcessResult(self, result):
    """Handles result processing.
    Args:
      result: The result dictionary returned by _ExecuteTool.
    """
    if result['status']:
      self.__success_count += 1
      for k, v in result['edits'].iteritems():
        self.__edits[k].extend(v)
    else:
      self.__failed_count += 1
      sys.stdout.write('\nFailed to process %s\n' % result['filename'])
      sys.stdout.write(result['stderr'])
      sys.stdout.write('\n')
    percentage = (
        float(self.__success_count + self.__failed_count) /
        len(self.__filenames)) * 100
    sys.stdout.write('Succeeded: %d, Failed: %d [%.2f%%]\r' % (
        self.__success_count, self.__failed_count, percentage))
    sys.stdout.flush() | 
	[
  "def",
  "__ProcessResult",
  "(",
  "self",
  ",",
  "result",
  ")",
  ":",
  "if",
  "result",
  "[",
  "'status'",
  "]",
  ":",
  "self",
  ".",
  "__success_count",
  "+=",
  "1",
  "for",
  "k",
  ",",
  "v",
  "in",
  "result",
  "[",
  "'edits'",
  "]",
  ".",
  "iteritems",
  "(",
  ")",
  ":",
  "self",
  ".",
  "__edits",
  "[",
  "k",
  "]",
  ".",
  "extend",
  "(",
  "v",
  ")",
  "else",
  ":",
  "self",
  ".",
  "__failed_count",
  "+=",
  "1",
  "sys",
  ".",
  "stdout",
  ".",
  "write",
  "(",
  "'\\nFailed to process %s\\n'",
  "%",
  "result",
  "[",
  "'filename'",
  "]",
  ")",
  "sys",
  ".",
  "stdout",
  ".",
  "write",
  "(",
  "result",
  "[",
  "'stderr'",
  "]",
  ")",
  "sys",
  ".",
  "stdout",
  ".",
  "write",
  "(",
  "'\\n'",
  ")",
  "percentage",
  "=",
  "(",
  "float",
  "(",
  "self",
  ".",
  "__success_count",
  "+",
  "self",
  ".",
  "__failed_count",
  ")",
  "/",
  "len",
  "(",
  "self",
  ".",
  "__filenames",
  ")",
  ")",
  "*",
  "100",
  "sys",
  ".",
  "stdout",
  ".",
  "write",
  "(",
  "'Succeeded: %d, Failed: %d [%.2f%%]\\r'",
  "%",
  "(",
  "self",
  ".",
  "__success_count",
  ",",
  "self",
  ".",
  "__failed_count",
  ",",
  "percentage",
  ")",
  ")",
  "sys",
  ".",
  "stdout",
  ".",
  "flush",
  "(",
  ")"
] | 
	https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/clang/scripts/run_tool.py#L163-L183 | ||
| 
	BlzFans/wke | 
	b0fa21158312e40c5fbd84682d643022b6c34a93 | 
	cygwin/lib/python2.6/distutils/command/build_py.py | 
	python | 
	build_py.find_data_files | 
	(self, package, src_dir) | 
	return files | 
	Return filenames for package's data files in 'src_dir | 
	Return filenames for package's data files in 'src_dir | 
	[
  "Return",
  "filenames",
  "for",
  "package",
  "s",
  "data",
  "files",
  "in",
  "src_dir"
] | 
	def find_data_files (self, package, src_dir):
        """Return filenames for package's data files in 'src_dir'"""
        globs = (self.package_data.get('', [])
                 + self.package_data.get(package, []))
        files = []
        for pattern in globs:
            # Each pattern has to be converted to a platform-specific path
            filelist = glob(os.path.join(src_dir, convert_path(pattern)))
            # Files that match more than one pattern are only added once
            files.extend([fn for fn in filelist if fn not in files])
        return files | 
	[
  "def",
  "find_data_files",
  "(",
  "self",
  ",",
  "package",
  ",",
  "src_dir",
  ")",
  ":",
  "globs",
  "=",
  "(",
  "self",
  ".",
  "package_data",
  ".",
  "get",
  "(",
  "''",
  ",",
  "[",
  "]",
  ")",
  "+",
  "self",
  ".",
  "package_data",
  ".",
  "get",
  "(",
  "package",
  ",",
  "[",
  "]",
  ")",
  ")",
  "files",
  "=",
  "[",
  "]",
  "for",
  "pattern",
  "in",
  "globs",
  ":",
  "# Each pattern has to be converted to a platform-specific path",
  "filelist",
  "=",
  "glob",
  "(",
  "os",
  ".",
  "path",
  ".",
  "join",
  "(",
  "src_dir",
  ",",
  "convert_path",
  "(",
  "pattern",
  ")",
  ")",
  ")",
  "# Files that match more than one pattern are only added once",
  "files",
  ".",
  "extend",
  "(",
  "[",
  "fn",
  "for",
  "fn",
  "in",
  "filelist",
  "if",
  "fn",
  "not",
  "in",
  "files",
  "]",
  ")",
  "return",
  "files"
] | 
	https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/distutils/command/build_py.py#L129-L139 | |
| 
	kichik/nsis | 
	e39fe70400b823ac3d00321e338cf3410634b10a | 
	SCons/Tools/mstoolkit.py | 
	python | 
	pch_emitter | 
	(target, source, env) | 
	return (target, source) | 
	Sets up the PDB dependencies for a pch file, and adds the object
	file target. | 
	Sets up the PDB dependencies for a pch file, and adds the object
	file target. | 
	[
  "Sets",
  "up",
  "the",
  "PDB",
  "dependencies",
  "for",
  "a",
  "pch",
  "file",
  "and",
  "adds",
  "the",
  "object",
  "file",
  "target",
  "."
] | 
	def pch_emitter(target, source, env):
	"""Sets up the PDB dependencies for a pch file, and adds the object
	file target."""
	validate_vars(env)
	pch = None
	obj = None
	for t in target:
		if SCons.Util.splitext(str(t))[1] == '.pch':
			pch = t
		if SCons.Util.splitext(str(t))[1] == '.obj':
			obj = t
	if not obj:
		obj = SCons.Util.splitext(str(pch))[0]+'.obj'
	target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
	if 'PDB' in env and env['PDB']:
		env.SideEffect(env['PDB'], target)
		env.Precious(env['PDB'])
	return (target, source) | 
	[
  "def",
  "pch_emitter",
  "(",
  "target",
  ",",
  "source",
  ",",
  "env",
  ")",
  ":",
  "validate_vars",
  "(",
  "env",
  ")",
  "pch",
  "=",
  "None",
  "obj",
  "=",
  "None",
  "for",
  "t",
  "in",
  "target",
  ":",
  "if",
  "SCons",
  ".",
  "Util",
  ".",
  "splitext",
  "(",
  "str",
  "(",
  "t",
  ")",
  ")",
  "[",
  "1",
  "]",
  "==",
  "'.pch'",
  ":",
  "pch",
  "=",
  "t",
  "if",
  "SCons",
  ".",
  "Util",
  ".",
  "splitext",
  "(",
  "str",
  "(",
  "t",
  ")",
  ")",
  "[",
  "1",
  "]",
  "==",
  "'.obj'",
  ":",
  "obj",
  "=",
  "t",
  "if",
  "not",
  "obj",
  ":",
  "obj",
  "=",
  "SCons",
  ".",
  "Util",
  ".",
  "splitext",
  "(",
  "str",
  "(",
  "pch",
  ")",
  ")",
  "[",
  "0",
  "]",
  "+",
  "'.obj'",
  "target",
  "=",
  "[",
  "pch",
  ",",
  "obj",
  "]",
  "# pch must be first, and obj second for the PCHCOM to work",
  "if",
  "'PDB'",
  "in",
  "env",
  "and",
  "env",
  "[",
  "'PDB'",
  "]",
  ":",
  "env",
  ".",
  "SideEffect",
  "(",
  "env",
  "[",
  "'PDB'",
  "]",
  ",",
  "target",
  ")",
  "env",
  ".",
  "Precious",
  "(",
  "env",
  "[",
  "'PDB'",
  "]",
  ")",
  "return",
  "(",
  "target",
  ",",
  "source",
  ")"
] | 
	https://github.com/kichik/nsis/blob/e39fe70400b823ac3d00321e338cf3410634b10a/SCons/Tools/mstoolkit.py#L104-L128 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_carbon/_core.py | 
	python | 
	WindowCreateEvent.__init__ | 
	(self, *args, **kwargs) | 
	__init__(self, Window win=None) -> WindowCreateEvent
        The EVT_WINDOW_CREATE event is sent as soon as the window object (the
        underlying GUI object) exists. | 
	__init__(self, Window win=None) -> WindowCreateEvent | 
	[
  "__init__",
  "(",
  "self",
  "Window",
  "win",
  "=",
  "None",
  ")",
  "-",
  ">",
  "WindowCreateEvent"
] | 
	def __init__(self, *args, **kwargs): 
        """
        __init__(self, Window win=None) -> WindowCreateEvent
        The EVT_WINDOW_CREATE event is sent as soon as the window object (the
        underlying GUI object) exists.
        """
        _core_.WindowCreateEvent_swiginit(self,_core_.new_WindowCreateEvent(*args, **kwargs)) | 
	[
  "def",
  "__init__",
  "(",
  "self",
  ",",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "_core_",
  ".",
  "WindowCreateEvent_swiginit",
  "(",
  "self",
  ",",
  "_core_",
  ".",
  "new_WindowCreateEvent",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L7332-L7339 | ||
| 
	Polidea/SiriusObfuscator | 
	b0e590d8130e97856afe578869b83a209e2b19be | 
	SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | 
	python | 
	SBValue.__eol_test__ | 
	(val) | 
	Default function for end of list test takes an SBValue object.
        Return True if val is invalid or it corresponds to a null pointer.
        Otherwise, return False. | 
	Default function for end of list test takes an SBValue object. | 
	[
  "Default",
  "function",
  "for",
  "end",
  "of",
  "list",
  "test",
  "takes",
  "an",
  "SBValue",
  "object",
  "."
] | 
	def __eol_test__(val):
        """Default function for end of list test takes an SBValue object.
        Return True if val is invalid or it corresponds to a null pointer.
        Otherwise, return False.
        """
        if not val or val.GetValueAsUnsigned() == 0:
            return True
        else:
            return False | 
	[
  "def",
  "__eol_test__",
  "(",
  "val",
  ")",
  ":",
  "if",
  "not",
  "val",
  "or",
  "val",
  ".",
  "GetValueAsUnsigned",
  "(",
  ")",
  "==",
  "0",
  ":",
  "return",
  "True",
  "else",
  ":",
  "return",
  "False"
] | 
	https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L11724-L11733 | ||
| 
	apple/swift-lldb | 
	d74be846ef3e62de946df343e8c234bde93a8912 | 
	scripts/Python/static-binding/lldb.py | 
	python | 
	SBDebugger.SetErrorFileHandle | 
	(self, f, transfer_ownership) | 
	return _lldb.SBDebugger_SetErrorFileHandle(self, f, transfer_ownership) | 
	SetErrorFileHandle(SBDebugger self, FILE * f, bool transfer_ownership) | 
	SetErrorFileHandle(SBDebugger self, FILE * f, bool transfer_ownership) | 
	[
  "SetErrorFileHandle",
  "(",
  "SBDebugger",
  "self",
  "FILE",
  "*",
  "f",
  "bool",
  "transfer_ownership",
  ")"
] | 
	def SetErrorFileHandle(self, f, transfer_ownership):
        """SetErrorFileHandle(SBDebugger self, FILE * f, bool transfer_ownership)"""
        return _lldb.SBDebugger_SetErrorFileHandle(self, f, transfer_ownership) | 
	[
  "def",
  "SetErrorFileHandle",
  "(",
  "self",
  ",",
  "f",
  ",",
  "transfer_ownership",
  ")",
  ":",
  "return",
  "_lldb",
  ".",
  "SBDebugger_SetErrorFileHandle",
  "(",
  "self",
  ",",
  "f",
  ",",
  "transfer_ownership",
  ")"
] | 
	https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L3887-L3889 | |
| 
	miyosuda/TensorFlowAndroidMNIST | 
	7b5a4603d2780a8a2834575706e9001977524007 | 
	jni-build/jni/include/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py | 
	python | 
	ReaderSource.__init__ | 
	(self,
               reader_cls,
               work_units,
               reader_kwargs=None,
               enqueue_size=None,
               batch_size=1,
               queue_capacity=None,
               shuffle=False,
               min_after_dequeue=None,
               num_threads=1,
               seed=None) | 
	Initializes a ReaderSource.
    Args:
      reader_cls: A subclass of `tesorflow.ReaderBase` that will be used to read
        from `work_units`.
      work_units: A list that describes the source(s) of data to read.
        Typically, this is a list of filenames.
      reader_kwargs: A dictionary of kwargs to be passed to `reader_cls` when it
        is constructed.
      enqueue_size: block size for each read operation.
      batch_size: The desired batch size of output. Defaults to 1.
      queue_capacity: Capacity of the queue. Defaults to 10 * `batch_size`.
      shuffle: Whether records will be shuffled before returning. Defaults to
        false.
      min_after_dequeue: Minimum number of elements in the queue to allow a
        dequeue operation. Only used when `shuffle` is true. Defaults to
        `queue_capacity` / 4.
      num_threads: Number of threads that will be used for reading. Each thread
        has its own instance of `reader_cls`.
      seed: A seed used for shuffling. Only used if `shuffle` is true. | 
	Initializes a ReaderSource. | 
	[
  "Initializes",
  "a",
  "ReaderSource",
  "."
] | 
	def __init__(self,
               reader_cls,
               work_units,
               reader_kwargs=None,
               enqueue_size=None,
               batch_size=1,
               queue_capacity=None,
               shuffle=False,
               min_after_dequeue=None,
               num_threads=1,
               seed=None):
    """Initializes a ReaderSource.
    Args:
      reader_cls: A subclass of `tesorflow.ReaderBase` that will be used to read
        from `work_units`.
      work_units: A list that describes the source(s) of data to read.
        Typically, this is a list of filenames.
      reader_kwargs: A dictionary of kwargs to be passed to `reader_cls` when it
        is constructed.
      enqueue_size: block size for each read operation.
      batch_size: The desired batch size of output. Defaults to 1.
      queue_capacity: Capacity of the queue. Defaults to 10 * `batch_size`.
      shuffle: Whether records will be shuffled before returning. Defaults to
        false.
      min_after_dequeue: Minimum number of elements in the queue to allow a
        dequeue operation. Only used when `shuffle` is true. Defaults to
        `queue_capacity` / 4.
      num_threads: Number of threads that will be used for reading. Each thread
        has its own instance of `reader_cls`.
      seed: A seed used for shuffling. Only used if `shuffle` is true.
    """
    super(ReaderSource, self).__init__()
    self._reader_cls = reader_cls
    self._reader_kwargs = reader_kwargs
    self._work_units = work_units
    self._reader_kwargs = {} if reader_kwargs is None else reader_kwargs
    if enqueue_size is None:
      enqueue_size = max(1, int(batch_size / num_threads))
    self._enqueue_size = enqueue_size
    self._batch_size = batch_size
    self._queue_capacity = (batch_size * 10 if queue_capacity is None else
                            queue_capacity)
    self._shuffle = shuffle
    self._min_after_dequeue = int(self.queue_capacity / 4 if min_after_dequeue
                                  is None else min_after_dequeue)
    self._num_threads = num_threads
    self._seed = seed | 
	[
  "def",
  "__init__",
  "(",
  "self",
  ",",
  "reader_cls",
  ",",
  "work_units",
  ",",
  "reader_kwargs",
  "=",
  "None",
  ",",
  "enqueue_size",
  "=",
  "None",
  ",",
  "batch_size",
  "=",
  "1",
  ",",
  "queue_capacity",
  "=",
  "None",
  ",",
  "shuffle",
  "=",
  "False",
  ",",
  "min_after_dequeue",
  "=",
  "None",
  ",",
  "num_threads",
  "=",
  "1",
  ",",
  "seed",
  "=",
  "None",
  ")",
  ":",
  "super",
  "(",
  "ReaderSource",
  ",",
  "self",
  ")",
  ".",
  "__init__",
  "(",
  ")",
  "self",
  ".",
  "_reader_cls",
  "=",
  "reader_cls",
  "self",
  ".",
  "_reader_kwargs",
  "=",
  "reader_kwargs",
  "self",
  ".",
  "_work_units",
  "=",
  "work_units",
  "self",
  ".",
  "_reader_kwargs",
  "=",
  "{",
  "}",
  "if",
  "reader_kwargs",
  "is",
  "None",
  "else",
  "reader_kwargs",
  "if",
  "enqueue_size",
  "is",
  "None",
  ":",
  "enqueue_size",
  "=",
  "max",
  "(",
  "1",
  ",",
  "int",
  "(",
  "batch_size",
  "/",
  "num_threads",
  ")",
  ")",
  "self",
  ".",
  "_enqueue_size",
  "=",
  "enqueue_size",
  "self",
  ".",
  "_batch_size",
  "=",
  "batch_size",
  "self",
  ".",
  "_queue_capacity",
  "=",
  "(",
  "batch_size",
  "*",
  "10",
  "if",
  "queue_capacity",
  "is",
  "None",
  "else",
  "queue_capacity",
  ")",
  "self",
  ".",
  "_shuffle",
  "=",
  "shuffle",
  "self",
  ".",
  "_min_after_dequeue",
  "=",
  "int",
  "(",
  "self",
  ".",
  "queue_capacity",
  "/",
  "4",
  "if",
  "min_after_dequeue",
  "is",
  "None",
  "else",
  "min_after_dequeue",
  ")",
  "self",
  ".",
  "_num_threads",
  "=",
  "num_threads",
  "self",
  ".",
  "_seed",
  "=",
  "seed"
] | 
	https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py#L29-L76 | ||
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/windows/Lib/email/_policybase.py | 
	python | 
	Policy.header_fetch_parse | 
	(self, name, value) | 
	Given the header name and the value from the model, return the value
        to be returned to the application program that is requesting that
        header.  The value passed in by the email package may contain
        surrogateescaped binary data if the lines were parsed by a BytesParser.
        The returned value should not contain any surrogateescaped data. | 
	Given the header name and the value from the model, return the value
        to be returned to the application program that is requesting that
        header.  The value passed in by the email package may contain
        surrogateescaped binary data if the lines were parsed by a BytesParser.
        The returned value should not contain any surrogateescaped data. | 
	[
  "Given",
  "the",
  "header",
  "name",
  "and",
  "the",
  "value",
  "from",
  "the",
  "model",
  "return",
  "the",
  "value",
  "to",
  "be",
  "returned",
  "to",
  "the",
  "application",
  "program",
  "that",
  "is",
  "requesting",
  "that",
  "header",
  ".",
  "The",
  "value",
  "passed",
  "in",
  "by",
  "the",
  "email",
  "package",
  "may",
  "contain",
  "surrogateescaped",
  "binary",
  "data",
  "if",
  "the",
  "lines",
  "were",
  "parsed",
  "by",
  "a",
  "BytesParser",
  ".",
  "The",
  "returned",
  "value",
  "should",
  "not",
  "contain",
  "any",
  "surrogateescaped",
  "data",
  "."
] | 
	def header_fetch_parse(self, name, value):
        """Given the header name and the value from the model, return the value
        to be returned to the application program that is requesting that
        header.  The value passed in by the email package may contain
        surrogateescaped binary data if the lines were parsed by a BytesParser.
        The returned value should not contain any surrogateescaped data.
        """
        raise NotImplementedError | 
	[
  "def",
  "header_fetch_parse",
  "(",
  "self",
  ",",
  "name",
  ",",
  "value",
  ")",
  ":",
  "raise",
  "NotImplementedError"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/email/_policybase.py#L238-L246 | ||
| 
	zju3dv/clean-pvnet | 
	5870c509e3cc205e1bb28910a7b1a9a3c8add9a8 | 
	lib/utils/meshrenderer/pysixd/transform.py | 
	python | 
	is_same_transform | 
	(matrix0, matrix1) | 
	return numpy.allclose(matrix0, matrix1) | 
	Return True if two matrices perform same transformation.
    >>> is_same_transform(numpy.identity(4), numpy.identity(4))
    True
    >>> is_same_transform(numpy.identity(4), random_rotation_matrix())
    False | 
	Return True if two matrices perform same transformation. | 
	[
  "Return",
  "True",
  "if",
  "two",
  "matrices",
  "perform",
  "same",
  "transformation",
  "."
] | 
	def is_same_transform(matrix0, matrix1):
    """Return True if two matrices perform same transformation.
    >>> is_same_transform(numpy.identity(4), numpy.identity(4))
    True
    >>> is_same_transform(numpy.identity(4), random_rotation_matrix())
    False
    """
    matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
    matrix0 /= matrix0[3, 3]
    matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
    matrix1 /= matrix1[3, 3]
    return numpy.allclose(matrix0, matrix1) | 
	[
  "def",
  "is_same_transform",
  "(",
  "matrix0",
  ",",
  "matrix1",
  ")",
  ":",
  "matrix0",
  "=",
  "numpy",
  ".",
  "array",
  "(",
  "matrix0",
  ",",
  "dtype",
  "=",
  "numpy",
  ".",
  "float64",
  ",",
  "copy",
  "=",
  "True",
  ")",
  "matrix0",
  "/=",
  "matrix0",
  "[",
  "3",
  ",",
  "3",
  "]",
  "matrix1",
  "=",
  "numpy",
  ".",
  "array",
  "(",
  "matrix1",
  ",",
  "dtype",
  "=",
  "numpy",
  ".",
  "float64",
  ",",
  "copy",
  "=",
  "True",
  ")",
  "matrix1",
  "/=",
  "matrix1",
  "[",
  "3",
  ",",
  "3",
  "]",
  "return",
  "numpy",
  ".",
  "allclose",
  "(",
  "matrix0",
  ",",
  "matrix1",
  ")"
] | 
	https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/meshrenderer/pysixd/transform.py#L1861-L1874 | |
| 
	flink-extended/dl-on-flink | 
	60646aa9520f49619b64e9ff03ce73959e8a3858 | 
	flink-ml-tensorflow/python/flink_ml_tensorflow/gpu_info.py | 
	python | 
	_get_free_gpu | 
	(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1) | 
	return gpus_to_use, free_memory | 
	Get available GPUs according to utilization thresholds.
    Args:
      :max_gpu_utilization: percent utilization threshold to consider a GPU "free"
      :min_free_memory: percent free memory to consider a GPU "free"
      :num_gpu: number of requested GPUs
    Returns:
      A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
      is the lowest amount of free memory available on the available_gpus. | 
	Get available GPUs according to utilization thresholds. | 
	[
  "Get",
  "available",
  "GPUs",
  "according",
  "to",
  "utilization",
  "thresholds",
  "."
] | 
	def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):
    """Get available GPUs according to utilization thresholds.
    Args:
      :max_gpu_utilization: percent utilization threshold to consider a GPU "free"
      :min_free_memory: percent free memory to consider a GPU "free"
      :num_gpu: number of requested GPUs
    Returns:
      A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
      is the lowest amount of free memory available on the available_gpus.
    """
    def get_gpu_info():
        # Get the gpu information
        gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits",
                                            "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode()
        gpu_info = gpu_info.split('\n')
        gpu_info_array = []
        # Check each gpu
        for line in gpu_info:
            if len(line) > 0:
                gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split(',')
                gpu_memory_util = float(used_memory) / float(total_memory)
                gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id))
        return (gpu_info_array)
    # Read the gpu information multiple times
    num_times_to_average = 5
    current_array = []
    for ind in range(num_times_to_average):
        current_array.append(get_gpu_info())
        time.sleep(1)
    # Get number of gpus
    num_gpus = len(current_array[0])
    # Average the gpu information
    avg_array = [(0, 0, str(x)) for x in range(num_gpus)]
    for ind in range(num_times_to_average):
        for gpu_ind in range(num_gpus):
            avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0],
                                  avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2])
    for gpu_ind in range(num_gpus):
        avg_array[gpu_ind] = (
        float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average,
        avg_array[gpu_ind][2])
    avg_array.sort()
    gpus_found = 0
    gpus_to_use = ""
    free_memory = 1.0
    # Return the least utilized GPUs if it's utilized less than max_gpu_utilization and amount of free memory is at least min_free_memory
    # Otherwise, run in cpu only mode
    for current_gpu in avg_array:
        if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory:
            if gpus_found == 0:
                gpus_to_use = current_gpu[2]
                free_memory = 1 - current_gpu[1]
            else:
                gpus_to_use = gpus_to_use + "," + current_gpu[2]
                free_memory = min(free_memory, 1 - current_gpu[1])
            gpus_found = gpus_found + 1
        if gpus_found == num_gpu:
            break
    return gpus_to_use, free_memory | 
	[
  "def",
  "_get_free_gpu",
  "(",
  "max_gpu_utilization",
  "=",
  "40",
  ",",
  "min_free_memory",
  "=",
  "0.5",
  ",",
  "num_gpu",
  "=",
  "1",
  ")",
  ":",
  "def",
  "get_gpu_info",
  "(",
  ")",
  ":",
  "# Get the gpu information",
  "gpu_info",
  "=",
  "subprocess",
  ".",
  "check_output",
  "(",
  "[",
  "\"nvidia-smi\"",
  ",",
  "\"--format=csv,noheader,nounits\"",
  ",",
  "\"--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu\"",
  "]",
  ")",
  ".",
  "decode",
  "(",
  ")",
  "gpu_info",
  "=",
  "gpu_info",
  ".",
  "split",
  "(",
  "'\\n'",
  ")",
  "gpu_info_array",
  "=",
  "[",
  "]",
  "# Check each gpu",
  "for",
  "line",
  "in",
  "gpu_info",
  ":",
  "if",
  "len",
  "(",
  "line",
  ")",
  ">",
  "0",
  ":",
  "gpu_id",
  ",",
  "total_memory",
  ",",
  "free_memory",
  ",",
  "used_memory",
  ",",
  "gpu_util",
  "=",
  "line",
  ".",
  "split",
  "(",
  "','",
  ")",
  "gpu_memory_util",
  "=",
  "float",
  "(",
  "used_memory",
  ")",
  "/",
  "float",
  "(",
  "total_memory",
  ")",
  "gpu_info_array",
  ".",
  "append",
  "(",
  "(",
  "float",
  "(",
  "gpu_util",
  ")",
  ",",
  "gpu_memory_util",
  ",",
  "gpu_id",
  ")",
  ")",
  "return",
  "(",
  "gpu_info_array",
  ")",
  "# Read the gpu information multiple times",
  "num_times_to_average",
  "=",
  "5",
  "current_array",
  "=",
  "[",
  "]",
  "for",
  "ind",
  "in",
  "range",
  "(",
  "num_times_to_average",
  ")",
  ":",
  "current_array",
  ".",
  "append",
  "(",
  "get_gpu_info",
  "(",
  ")",
  ")",
  "time",
  ".",
  "sleep",
  "(",
  "1",
  ")",
  "# Get number of gpus",
  "num_gpus",
  "=",
  "len",
  "(",
  "current_array",
  "[",
  "0",
  "]",
  ")",
  "# Average the gpu information",
  "avg_array",
  "=",
  "[",
  "(",
  "0",
  ",",
  "0",
  ",",
  "str",
  "(",
  "x",
  ")",
  ")",
  "for",
  "x",
  "in",
  "range",
  "(",
  "num_gpus",
  ")",
  "]",
  "for",
  "ind",
  "in",
  "range",
  "(",
  "num_times_to_average",
  ")",
  ":",
  "for",
  "gpu_ind",
  "in",
  "range",
  "(",
  "num_gpus",
  ")",
  ":",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "=",
  "(",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "0",
  "]",
  "+",
  "current_array",
  "[",
  "ind",
  "]",
  "[",
  "gpu_ind",
  "]",
  "[",
  "0",
  "]",
  ",",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "1",
  "]",
  "+",
  "current_array",
  "[",
  "ind",
  "]",
  "[",
  "gpu_ind",
  "]",
  "[",
  "1",
  "]",
  ",",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "2",
  "]",
  ")",
  "for",
  "gpu_ind",
  "in",
  "range",
  "(",
  "num_gpus",
  ")",
  ":",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "=",
  "(",
  "float",
  "(",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "0",
  "]",
  ")",
  "/",
  "num_times_to_average",
  ",",
  "float",
  "(",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "1",
  "]",
  ")",
  "/",
  "num_times_to_average",
  ",",
  "avg_array",
  "[",
  "gpu_ind",
  "]",
  "[",
  "2",
  "]",
  ")",
  "avg_array",
  ".",
  "sort",
  "(",
  ")",
  "gpus_found",
  "=",
  "0",
  "gpus_to_use",
  "=",
  "\"\"",
  "free_memory",
  "=",
  "1.0",
  "# Return the least utilized GPUs if it's utilized less than max_gpu_utilization and amount of free memory is at least min_free_memory",
  "# Otherwise, run in cpu only mode",
  "for",
  "current_gpu",
  "in",
  "avg_array",
  ":",
  "if",
  "current_gpu",
  "[",
  "0",
  "]",
  "<",
  "max_gpu_utilization",
  "and",
  "(",
  "1",
  "-",
  "current_gpu",
  "[",
  "1",
  "]",
  ")",
  ">",
  "min_free_memory",
  ":",
  "if",
  "gpus_found",
  "==",
  "0",
  ":",
  "gpus_to_use",
  "=",
  "current_gpu",
  "[",
  "2",
  "]",
  "free_memory",
  "=",
  "1",
  "-",
  "current_gpu",
  "[",
  "1",
  "]",
  "else",
  ":",
  "gpus_to_use",
  "=",
  "gpus_to_use",
  "+",
  "\",\"",
  "+",
  "current_gpu",
  "[",
  "2",
  "]",
  "free_memory",
  "=",
  "min",
  "(",
  "free_memory",
  ",",
  "1",
  "-",
  "current_gpu",
  "[",
  "1",
  "]",
  ")",
  "gpus_found",
  "=",
  "gpus_found",
  "+",
  "1",
  "if",
  "gpus_found",
  "==",
  "num_gpu",
  ":",
  "break",
  "return",
  "gpus_to_use",
  ",",
  "free_memory"
] | 
	https://github.com/flink-extended/dl-on-flink/blob/60646aa9520f49619b64e9ff03ce73959e8a3858/flink-ml-tensorflow/python/flink_ml_tensorflow/gpu_info.py#L128-L202 | |
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/compat/numpy/function.py | 
	python | 
	validate_argsort_with_ascending | 
	(ascending, args, kwargs) | 
	return ascending | 
	If 'Categorical.argsort' is called via the 'numpy' library, the
    first parameter in its signature is 'axis', which takes either
    an integer or 'None', so check if the 'ascending' parameter has
    either integer type or is None, since 'ascending' itself should
    be a boolean | 
	If 'Categorical.argsort' is called via the 'numpy' library, the
    first parameter in its signature is 'axis', which takes either
    an integer or 'None', so check if the 'ascending' parameter has
    either integer type or is None, since 'ascending' itself should
    be a boolean | 
	[
  "If",
  "Categorical",
  ".",
  "argsort",
  "is",
  "called",
  "via",
  "the",
  "numpy",
  "library",
  "the",
  "first",
  "parameter",
  "in",
  "its",
  "signature",
  "is",
  "axis",
  "which",
  "takes",
  "either",
  "an",
  "integer",
  "or",
  "None",
  "so",
  "check",
  "if",
  "the",
  "ascending",
  "parameter",
  "has",
  "either",
  "integer",
  "type",
  "or",
  "is",
  "None",
  "since",
  "ascending",
  "itself",
  "should",
  "be",
  "a",
  "boolean"
] | 
	def validate_argsort_with_ascending(ascending, args, kwargs):
    """
    If 'Categorical.argsort' is called via the 'numpy' library, the
    first parameter in its signature is 'axis', which takes either
    an integer or 'None', so check if the 'ascending' parameter has
    either integer type or is None, since 'ascending' itself should
    be a boolean
    """
    if is_integer(ascending) or ascending is None:
        args = (ascending,) + args
        ascending = True
    validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
    return ascending | 
	[
  "def",
  "validate_argsort_with_ascending",
  "(",
  "ascending",
  ",",
  "args",
  ",",
  "kwargs",
  ")",
  ":",
  "if",
  "is_integer",
  "(",
  "ascending",
  ")",
  "or",
  "ascending",
  "is",
  "None",
  ":",
  "args",
  "=",
  "(",
  "ascending",
  ",",
  ")",
  "+",
  "args",
  "ascending",
  "=",
  "True",
  "validate_argsort_kind",
  "(",
  "args",
  ",",
  "kwargs",
  ",",
  "max_fname_arg_count",
  "=",
  "3",
  ")",
  "return",
  "ascending"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/compat/numpy/function.py#L133-L147 | |
| 
	miyosuda/TensorFlowAndroidDemo | 
	35903e0221aa5f109ea2dbef27f20b52e317f42d | 
	jni-build/jni/include/tensorflow/contrib/graph_editor/util.py | 
	python | 
	get_tensors | 
	(graph) | 
	return ts | 
	get all the tensors which are input or output of an op in the graph.
  Args:
    graph: a tf.Graph.
  Returns:
    A list of tf.Tensor.
  Raises:
    TypeError: if graph is not a tf.Graph. | 
	get all the tensors which are input or output of an op in the graph. | 
	[
  "get",
  "all",
  "the",
  "tensors",
  "which",
  "are",
  "input",
  "or",
  "output",
  "of",
  "an",
  "op",
  "in",
  "the",
  "graph",
  "."
] | 
	def get_tensors(graph):
  """get all the tensors which are input or output of an op in the graph.
  Args:
    graph: a tf.Graph.
  Returns:
    A list of tf.Tensor.
  Raises:
    TypeError: if graph is not a tf.Graph.
  """
  if not isinstance(graph, tf_ops.Graph):
    raise TypeError("Expected a graph, got: {}".format(type(graph)))
  ts = []
  for op in graph.get_operations():
    concatenate_unique(ts, op.inputs)
    concatenate_unique(ts, op.outputs)
  return ts | 
	[
  "def",
  "get_tensors",
  "(",
  "graph",
  ")",
  ":",
  "if",
  "not",
  "isinstance",
  "(",
  "graph",
  ",",
  "tf_ops",
  ".",
  "Graph",
  ")",
  ":",
  "raise",
  "TypeError",
  "(",
  "\"Expected a graph, got: {}\"",
  ".",
  "format",
  "(",
  "type",
  "(",
  "graph",
  ")",
  ")",
  ")",
  "ts",
  "=",
  "[",
  "]",
  "for",
  "op",
  "in",
  "graph",
  ".",
  "get_operations",
  "(",
  ")",
  ":",
  "concatenate_unique",
  "(",
  "ts",
  ",",
  "op",
  ".",
  "inputs",
  ")",
  "concatenate_unique",
  "(",
  "ts",
  ",",
  "op",
  ".",
  "outputs",
  ")",
  "return",
  "ts"
] | 
	https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/graph_editor/util.py#L152-L168 | |
| 
	pytorch/pytorch | 
	7176c92687d3cc847cc046bf002269c6949a21c2 | 
	torch/ao/ns/fx/graph_passes.py | 
	python | 
	create_a_shadows_b | 
	(
    name_a: str,
    gm_a: GraphModule,
    name_b: str,
    gm_b: GraphModule,
    matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
    logger_cls: Callable,
    should_log_inputs: bool,
    node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) | 
	return gm_c | 
	Creates a new GraphModule consisting of the graph of C, with the meaningful
    nodes of A shadowing the corresponding nodes of B.  For example,
    Graph A:
    a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2
    Graph B:
    b0 -> op0_int8 -> b1 -> op1_int8 -> b2
    matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)}
    Graph C (A shadows B):
        / dequant0 -> op0_fp32 -> logger_a_0  / dequant_1 -> op1_fp32 -> logger_a_1
       /                                     /
    b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1
    In a nutshell, this function does the following for each node pair:
    * copies the necessary attributes and modules from gm_a to gm_b,
      keeping names unique
    * adds a dtype cast op (dequant, quant, etc)
    * adds a copy of node_a in gm_b's graph
    * adds loggers to the outputs of node_a and node_b | 
	Creates a new GraphModule consisting of the graph of C, with the meaningful
    nodes of A shadowing the corresponding nodes of B.  For example, | 
	[
  "Creates",
  "a",
  "new",
  "GraphModule",
  "consisting",
  "of",
  "the",
  "graph",
  "of",
  "C",
  "with",
  "the",
  "meaningful",
  "nodes",
  "of",
  "A",
  "shadowing",
  "the",
  "corresponding",
  "nodes",
  "of",
  "B",
  ".",
  "For",
  "example"
] | 
	def create_a_shadows_b(
    name_a: str,
    gm_a: GraphModule,
    name_b: str,
    gm_b: GraphModule,
    matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
    logger_cls: Callable,
    should_log_inputs: bool,
    node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> GraphModule:
    """
    Creates a new GraphModule consisting of the graph of C, with the meaningful
    nodes of A shadowing the corresponding nodes of B.  For example,
    Graph A:
    a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2
    Graph B:
    b0 -> op0_int8 -> b1 -> op1_int8 -> b2
    matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)}
    Graph C (A shadows B):
        / dequant0 -> op0_fp32 -> logger_a_0  / dequant_1 -> op1_fp32 -> logger_a_1
       /                                     /
    b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1
    In a nutshell, this function does the following for each node pair:
    * copies the necessary attributes and modules from gm_a to gm_b,
      keeping names unique
    * adds a dtype cast op (dequant, quant, etc)
    * adds a copy of node_a in gm_b's graph
    * adds loggers to the outputs of node_a and node_b
    """
    if node_type_to_io_type_map is None:
        node_type_to_io_type_map = get_node_type_to_io_type_map()
    # graph_c is the graph created from copying the nodes of graph_b and inserting
    # the shadows with the nodes copied from graph_a
    graph_c = Graph()
    env_c: Dict[str, Any] = {}
    modules = dict(gm_b.named_modules())
    def load_arg(a):
        return map_arg(a, lambda node: env_c[node.name])
    start_node_b_to_matched_subgraph_a_and_name = {}
    end_node_b_to_matched_subgraph_a_and_name = {}
    for match_name, match in matched_subgraph_pairs.items():
        subgraph_a, subgraph_b = match
        ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
        ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
        start_node_b_to_matched_subgraph_a_and_name[subgraph_b.start_node] = \
            (subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
        end_node_b_to_matched_subgraph_a_and_name[subgraph_b.end_node] = \
            (subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
    for node_b in gm_b.graph.nodes:
        if node_b.op == 'output':
            graph_c.output(map_arg(node_b.args[0], load_arg))
            continue
        # calculate the flags to determine what to do with this node
        node_b_is_start_node = node_b in start_node_b_to_matched_subgraph_a_and_name
        node_b_is_end_node = node_b in end_node_b_to_matched_subgraph_a_and_name
        if (node_b_is_start_node or node_b_is_end_node):
            if node_b_is_start_node:
                subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
                    start_node_b_to_matched_subgraph_a_and_name[node_b]
            else:
                assert node_b_is_end_node
                subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
                    end_node_b_to_matched_subgraph_a_and_name[node_b]
            # For both start_node and end_node verify that we know how to do
            # the dtype cast. If we do not, skip.
            node_input_type_a, node_output_type_a = \
                get_node_first_input_and_output_type(
                    subgraph_a.start_node, gm_a, logger_cls,
                    node_type_to_io_type_map)
            node_input_type_b, node_output_type_b = \
                get_node_first_input_and_output_type(
                    node_b, gm_b, logger_cls,
                    node_type_to_io_type_map)
            node_io_types_known_a_and_b = (
                node_input_type_a != NodeInputOrOutputType.UNKNOWN and
                node_output_type_a != NodeInputOrOutputType.UNKNOWN and
                node_input_type_b != NodeInputOrOutputType.UNKNOWN and
                node_output_type_b != NodeInputOrOutputType.UNKNOWN
            )
            if not node_io_types_known_a_and_b:
                print(
                    f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
                    f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
                    ', unknown dtype cast')
                env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
                continue
            # If we are shadowing from fp32 to int8, we need to insert
            # quantize_per_tensor call with qparams from the previous node.
            # Only do this if we are able to infer these qparams from the graph.
            if (
                node_input_type_a == NodeInputOrOutputType.INT8 and
                node_input_type_b == NodeInputOrOutputType.FP32
            ):
                node_a_input_qparams = get_node_input_qparams(
                    subgraph_a.start_node, gm_a, node_type_to_io_type_map)
                if not node_a_input_qparams:
                    print(
                        f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
                        f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
                        ', unknown input qparams')
                    env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
                    continue
            fqn_base_a = _maybe_get_fqn(subgraph_a.base_op_node, gm_a)
            fqn_base_b = _maybe_get_fqn(subgraph_b.base_op_node, gm_b)
            if node_b_is_start_node:
                # if necessary, log the input of node_c
                if should_log_inputs:
                    if isinstance(node_b.args[0], Node):
                        prev_node_c = env_c[node_b.args[0].name]
                        env_c[prev_node_c.name] = _insert_logger_after_node(
                            prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
                            node_b.name, name_b, ref_name, ref_node_type_b,
                            NSSingleResultValuesType.NODE_INPUT.value,
                            index_within_arg=0, index_of_arg=0,
                            fqn=fqn_base_b)
                    elif isinstance(node_b.args[0], list):
                        # first, save the prev_node instances, because they
                        # will be overwritten in the env after the first logger
                        # is added
                        prev_node_c_list = [env_c[arg.name] for arg in node_b.args[0]]
                        for arg_idx, arg in enumerate(node_b.args[0]):
                            prev_node_c = prev_node_c_list[arg_idx]
                            env_c[prev_node_c.name] = _insert_logger_after_node(
                                prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
                                node_b.name, name_b, ref_name, ref_node_type_b,
                                NSSingleResultValuesType.NODE_INPUT.value,
                                index_within_arg=arg_idx, index_of_arg=0,
                                fqn=fqn_base_b)
                    else:
                        # logging of inputs which are not lists is not supported yet
                        raise AssertionError(f"type {type(node_b.args[0])} is not handled yet")
                # subgraph so far:
                #
                # (prev_node_c)+ -> (logger_c_input)?
            # Note: this if statement is always True, spelling it out to clarify code
            # intent.
            if node_b_is_start_node or node_b_is_end_node:
                # ensure env_c is populated with base node
                env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
                node_c = env_c[node_b.name]
                # after this point,
                #
                # node_a is the original node from graph_a, with parent module gm_a
                # node_b is the original node from graph_b, with parent module gm_b
                # node_c is the copy of node_b in graph_c
                #
                # subgraph so far:
                #
                # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
            if node_b_is_start_node:
                # cast dtype from the dtype of node_c's input to the dtype of
                # node_a's input (dequant, etc)
                prev_node_c = node_c.args[0]
                if should_log_inputs:
                    # skip the input logger when inserting a dtype cast
                    if isinstance(prev_node_c, Node):
                        prev_node_c = prev_node_c.args[0]
                    elif isinstance(prev_node_c, list):
                        prev_node_c = [arg.args[0] for arg in prev_node_c]
                dtype_cast_node = _insert_dtype_cast_after_node(
                    subgraph_a.start_node, node_c, prev_node_c, gm_a, gm_b, graph_c,
                    node_b.name + '_dtype_cast_', logger_cls,
                    node_type_to_io_type_map)
                # note: not inserting to env_c because all nodes which use the dtype
                #   casts are copied from graph_a
                #
                # subgraph so far:
                #
                #           (dtype_cast_node)+
                #                  /
                # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
                # if input logging is enabled, log the input to the subgraph
                if should_log_inputs:
                    # TODO: explain this
                    ref_node_name = ''
                    if isinstance(dtype_cast_node, Node):
                        dtype_cast_node = _insert_logger_after_node(
                            dtype_cast_node, gm_b, logger_cls, '_ns_logger_a_inp_',
                            ref_node_name, name_a, ref_name, ref_node_type_a,
                            NSSingleResultValuesType.NODE_INPUT.value,
                            index_within_arg=0, index_of_arg=0,
                            fqn=fqn_base_a)
                        input_logger: Union[Node, List[Node]] = dtype_cast_node
                    else:
                        assert isinstance(dtype_cast_node, list)
                        new_loggers = []
                        for dtype_cast_idx, dtype_cast_node_inner in enumerate(dtype_cast_node):
                            dtype_cast_logger = _insert_logger_after_node(
                                dtype_cast_node_inner, gm_b, logger_cls, '_ns_logger_a_inp_',
                                ref_node_name, name_a, ref_name, ref_node_type_a,
                                NSSingleResultValuesType.NODE_INPUT.value,
                                index_within_arg=dtype_cast_idx,
                                index_of_arg=0,
                                fqn=fqn_base_a)
                            new_loggers.append(dtype_cast_logger)
                        dtype_cast_node = new_loggers
                        input_logger = dtype_cast_node
                    # subgraph so far:
                    #
                    #       (dtype_cast_node)+ -> (logger_a_input)?
                    #                  /
                    # prev_node_c -> (logger_c_input)? -> node_start_c
                # hook up the new mod_a copy to be in the graph, receiving the
                # same inputs as mod_b does, with dtype cast to match a
                # Some ops, such as LSTMs, have two non-param inputs. If we have
                # such an op, pass the second param as well. Note: dtype casting
                # for the second param is not implemented yet, it can be added
                # later if there is a use case.
                node_c_second_non_param_arg = None
                num_non_param_args_node_a = get_number_of_non_param_args(subgraph_a.start_node, gm_a)
                if num_non_param_args_node_a == 2:
                    node_c_second_non_param_arg = node_c.args[1]
                node_a_shadows_c = _insert_copy_of_subgraph_a_after_input_node_c(
                    dtype_cast_node, node_c_second_non_param_arg,
                    subgraph_a, gm_a, gm_b, node_c.name + '_shadow_copy_')
                env_c[node_a_shadows_c.name] = node_a_shadows_c
                # subgraph so far:
                #
                #       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown)
                #                  /
                # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
                if should_log_inputs:
                    # When we created the input logger, we left the ref_node_name
                    # as an empty string, because the subgraph copy did not exist
                    # yet. Now that the subgraph copy exists, we modify this name
                    # to its true value.
                    # Note: the alternative to this is to create the input logger
                    # after creating the subgraph, which is slightly more
                    # complicated. This is the lesser of two evils.
                    # input_logger = env_c[dtype_cast_node.name]
                    # Find the first node in the subgraph
                    cur_node = node_a_shadows_c
                    while cur_node.args[0] != input_logger:
                        cur_node = cur_node.args[0]  # type: ignore[assignment]
                    if isinstance(input_logger, Node):
                        input_logger_mod = getattr(gm_b, input_logger.name)
                        input_logger_mod.ref_node_name = cur_node.name
                    else:
                        assert isinstance(input_logger, list)
                        for input_logger_inner in input_logger:
                            input_logger_mod = getattr(gm_b, input_logger_inner.name)
                            input_logger_mod.ref_node_name = cur_node.name
                # hook up a logger to the mod_a copy
                env_c[node_a_shadows_c.name] = _insert_logger_after_node(
                    env_c[node_a_shadows_c.name], gm_b, logger_cls, '_ns_logger_a_',
                    node_a_shadows_c.name, name_a, ref_name, ref_node_type_a,
                    NSSingleResultValuesType.NODE_OUTPUT.value,
                    index_within_arg=0, index_of_arg=0,
                    fqn=fqn_base_a)
                # subgraph so far:
                #
                #       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
                #                  /
                # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
            if node_b_is_end_node:
                # hook up a logger to the mod_b copy
                env_c[node_b.name] = _insert_logger_after_node(
                    env_c[node_b.name], gm_b, logger_cls, '_ns_logger_b_',
                    node_b.name, name_b, ref_name, ref_node_type_b,
                    NSSingleResultValuesType.NODE_OUTPUT.value,
                    index_within_arg=0, index_of_arg=0,
                    fqn=fqn_base_b)
                # subgraph so far:
                #
                #       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
                #                  /
                # (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c
                #
                # Note: node_start_c may be the same node as node_end_c, or they
                # may have nodes inbetween.
        else:
            env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
    gm_c = GraphModule(gm_b, graph_c)
    return gm_c | 
	[
  "def",
  "create_a_shadows_b",
  "(",
  "name_a",
  ":",
  "str",
  ",",
  "gm_a",
  ":",
  "GraphModule",
  ",",
  "name_b",
  ":",
  "str",
  ",",
  "gm_b",
  ":",
  "GraphModule",
  ",",
  "matched_subgraph_pairs",
  ":",
  "Dict",
  "[",
  "str",
  ",",
  "Tuple",
  "[",
  "NSSubgraph",
  ",",
  "NSSubgraph",
  "]",
  "]",
  ",",
  "logger_cls",
  ":",
  "Callable",
  ",",
  "should_log_inputs",
  ":",
  "bool",
  ",",
  "node_type_to_io_type_map",
  ":",
  "Optional",
  "[",
  "Dict",
  "[",
  "str",
  ",",
  "Set",
  "[",
  "NSNodeTargetType",
  "]",
  "]",
  "]",
  "=",
  "None",
  ",",
  ")",
  "->",
  "GraphModule",
  ":",
  "if",
  "node_type_to_io_type_map",
  "is",
  "None",
  ":",
  "node_type_to_io_type_map",
  "=",
  "get_node_type_to_io_type_map",
  "(",
  ")",
  "# graph_c is the graph created from copying the nodes of graph_b and inserting",
  "# the shadows with the nodes copied from graph_a",
  "graph_c",
  "=",
  "Graph",
  "(",
  ")",
  "env_c",
  ":",
  "Dict",
  "[",
  "str",
  ",",
  "Any",
  "]",
  "=",
  "{",
  "}",
  "modules",
  "=",
  "dict",
  "(",
  "gm_b",
  ".",
  "named_modules",
  "(",
  ")",
  ")",
  "def",
  "load_arg",
  "(",
  "a",
  ")",
  ":",
  "return",
  "map_arg",
  "(",
  "a",
  ",",
  "lambda",
  "node",
  ":",
  "env_c",
  "[",
  "node",
  ".",
  "name",
  "]",
  ")",
  "start_node_b_to_matched_subgraph_a_and_name",
  "=",
  "{",
  "}",
  "end_node_b_to_matched_subgraph_a_and_name",
  "=",
  "{",
  "}",
  "for",
  "match_name",
  ",",
  "match",
  "in",
  "matched_subgraph_pairs",
  ".",
  "items",
  "(",
  ")",
  ":",
  "subgraph_a",
  ",",
  "subgraph_b",
  "=",
  "match",
  "ref_node_type_a",
  "=",
  "get_target_type_str",
  "(",
  "subgraph_a",
  ".",
  "base_op_node",
  ",",
  "gm_a",
  ")",
  "ref_node_type_b",
  "=",
  "get_target_type_str",
  "(",
  "subgraph_b",
  ".",
  "base_op_node",
  ",",
  "gm_b",
  ")",
  "start_node_b_to_matched_subgraph_a_and_name",
  "[",
  "subgraph_b",
  ".",
  "start_node",
  "]",
  "=",
  "(",
  "subgraph_a",
  ",",
  "match_name",
  ",",
  "ref_node_type_a",
  ",",
  "ref_node_type_b",
  ")",
  "end_node_b_to_matched_subgraph_a_and_name",
  "[",
  "subgraph_b",
  ".",
  "end_node",
  "]",
  "=",
  "(",
  "subgraph_a",
  ",",
  "match_name",
  ",",
  "ref_node_type_a",
  ",",
  "ref_node_type_b",
  ")",
  "for",
  "node_b",
  "in",
  "gm_b",
  ".",
  "graph",
  ".",
  "nodes",
  ":",
  "if",
  "node_b",
  ".",
  "op",
  "==",
  "'output'",
  ":",
  "graph_c",
  ".",
  "output",
  "(",
  "map_arg",
  "(",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  ",",
  "load_arg",
  ")",
  ")",
  "continue",
  "# calculate the flags to determine what to do with this node",
  "node_b_is_start_node",
  "=",
  "node_b",
  "in",
  "start_node_b_to_matched_subgraph_a_and_name",
  "node_b_is_end_node",
  "=",
  "node_b",
  "in",
  "end_node_b_to_matched_subgraph_a_and_name",
  "if",
  "(",
  "node_b_is_start_node",
  "or",
  "node_b_is_end_node",
  ")",
  ":",
  "if",
  "node_b_is_start_node",
  ":",
  "subgraph_a",
  ",",
  "ref_name",
  ",",
  "ref_node_type_a",
  ",",
  "ref_node_type_b",
  "=",
  "start_node_b_to_matched_subgraph_a_and_name",
  "[",
  "node_b",
  "]",
  "else",
  ":",
  "assert",
  "node_b_is_end_node",
  "subgraph_a",
  ",",
  "ref_name",
  ",",
  "ref_node_type_a",
  ",",
  "ref_node_type_b",
  "=",
  "end_node_b_to_matched_subgraph_a_and_name",
  "[",
  "node_b",
  "]",
  "# For both start_node and end_node verify that we know how to do",
  "# the dtype cast. If we do not, skip.",
  "node_input_type_a",
  ",",
  "node_output_type_a",
  "=",
  "get_node_first_input_and_output_type",
  "(",
  "subgraph_a",
  ".",
  "start_node",
  ",",
  "gm_a",
  ",",
  "logger_cls",
  ",",
  "node_type_to_io_type_map",
  ")",
  "node_input_type_b",
  ",",
  "node_output_type_b",
  "=",
  "get_node_first_input_and_output_type",
  "(",
  "node_b",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "node_type_to_io_type_map",
  ")",
  "node_io_types_known_a_and_b",
  "=",
  "(",
  "node_input_type_a",
  "!=",
  "NodeInputOrOutputType",
  ".",
  "UNKNOWN",
  "and",
  "node_output_type_a",
  "!=",
  "NodeInputOrOutputType",
  ".",
  "UNKNOWN",
  "and",
  "node_input_type_b",
  "!=",
  "NodeInputOrOutputType",
  ".",
  "UNKNOWN",
  "and",
  "node_output_type_b",
  "!=",
  "NodeInputOrOutputType",
  ".",
  "UNKNOWN",
  ")",
  "if",
  "not",
  "node_io_types_known_a_and_b",
  ":",
  "print",
  "(",
  "f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}'",
  "+",
  "f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}'",
  "+",
  "', unknown dtype cast'",
  ")",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "=",
  "graph_c",
  ".",
  "node_copy",
  "(",
  "node_b",
  ",",
  "load_arg",
  ")",
  "continue",
  "# If we are shadowing from fp32 to int8, we need to insert",
  "# quantize_per_tensor call with qparams from the previous node.",
  "# Only do this if we are able to infer these qparams from the graph.",
  "if",
  "(",
  "node_input_type_a",
  "==",
  "NodeInputOrOutputType",
  ".",
  "INT8",
  "and",
  "node_input_type_b",
  "==",
  "NodeInputOrOutputType",
  ".",
  "FP32",
  ")",
  ":",
  "node_a_input_qparams",
  "=",
  "get_node_input_qparams",
  "(",
  "subgraph_a",
  ".",
  "start_node",
  ",",
  "gm_a",
  ",",
  "node_type_to_io_type_map",
  ")",
  "if",
  "not",
  "node_a_input_qparams",
  ":",
  "print",
  "(",
  "f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}'",
  "+",
  "f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}'",
  "+",
  "', unknown input qparams'",
  ")",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "=",
  "graph_c",
  ".",
  "node_copy",
  "(",
  "node_b",
  ",",
  "load_arg",
  ")",
  "continue",
  "fqn_base_a",
  "=",
  "_maybe_get_fqn",
  "(",
  "subgraph_a",
  ".",
  "base_op_node",
  ",",
  "gm_a",
  ")",
  "fqn_base_b",
  "=",
  "_maybe_get_fqn",
  "(",
  "subgraph_b",
  ".",
  "base_op_node",
  ",",
  "gm_b",
  ")",
  "if",
  "node_b_is_start_node",
  ":",
  "# if necessary, log the input of node_c",
  "if",
  "should_log_inputs",
  ":",
  "if",
  "isinstance",
  "(",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  ",",
  "Node",
  ")",
  ":",
  "prev_node_c",
  "=",
  "env_c",
  "[",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  ".",
  "name",
  "]",
  "env_c",
  "[",
  "prev_node_c",
  ".",
  "name",
  "]",
  "=",
  "_insert_logger_after_node",
  "(",
  "prev_node_c",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_b_inp_'",
  ",",
  "node_b",
  ".",
  "name",
  ",",
  "name_b",
  ",",
  "ref_name",
  ",",
  "ref_node_type_b",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_INPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "0",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_b",
  ")",
  "elif",
  "isinstance",
  "(",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  ",",
  "list",
  ")",
  ":",
  "# first, save the prev_node instances, because they",
  "# will be overwritten in the env after the first logger",
  "# is added",
  "prev_node_c_list",
  "=",
  "[",
  "env_c",
  "[",
  "arg",
  ".",
  "name",
  "]",
  "for",
  "arg",
  "in",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  "]",
  "for",
  "arg_idx",
  ",",
  "arg",
  "in",
  "enumerate",
  "(",
  "node_b",
  ".",
  "args",
  "[",
  "0",
  "]",
  ")",
  ":",
  "prev_node_c",
  "=",
  "prev_node_c_list",
  "[",
  "arg_idx",
  "]",
  "env_c",
  "[",
  "prev_node_c",
  ".",
  "name",
  "]",
  "=",
  "_insert_logger_after_node",
  "(",
  "prev_node_c",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_b_inp_'",
  ",",
  "node_b",
  ".",
  "name",
  ",",
  "name_b",
  ",",
  "ref_name",
  ",",
  "ref_node_type_b",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_INPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "arg_idx",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_b",
  ")",
  "else",
  ":",
  "# logging of inputs which are not lists is not supported yet",
  "raise",
  "AssertionError",
  "(",
  "f\"type {type(node_b.args[0])} is not handled yet\"",
  ")",
  "# subgraph so far:",
  "#",
  "# (prev_node_c)+ -> (logger_c_input)?",
  "# Note: this if statement is always True, spelling it out to clarify code",
  "# intent.",
  "if",
  "node_b_is_start_node",
  "or",
  "node_b_is_end_node",
  ":",
  "# ensure env_c is populated with base node",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "=",
  "graph_c",
  ".",
  "node_copy",
  "(",
  "node_b",
  ",",
  "load_arg",
  ")",
  "node_c",
  "=",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "# after this point,",
  "#",
  "# node_a is the original node from graph_a, with parent module gm_a",
  "# node_b is the original node from graph_b, with parent module gm_b",
  "# node_c is the copy of node_b in graph_c",
  "#",
  "# subgraph so far:",
  "#",
  "# (prev_node_c)+ -> (logger_c_input)? -> node_start_c",
  "if",
  "node_b_is_start_node",
  ":",
  "# cast dtype from the dtype of node_c's input to the dtype of",
  "# node_a's input (dequant, etc)",
  "prev_node_c",
  "=",
  "node_c",
  ".",
  "args",
  "[",
  "0",
  "]",
  "if",
  "should_log_inputs",
  ":",
  "# skip the input logger when inserting a dtype cast",
  "if",
  "isinstance",
  "(",
  "prev_node_c",
  ",",
  "Node",
  ")",
  ":",
  "prev_node_c",
  "=",
  "prev_node_c",
  ".",
  "args",
  "[",
  "0",
  "]",
  "elif",
  "isinstance",
  "(",
  "prev_node_c",
  ",",
  "list",
  ")",
  ":",
  "prev_node_c",
  "=",
  "[",
  "arg",
  ".",
  "args",
  "[",
  "0",
  "]",
  "for",
  "arg",
  "in",
  "prev_node_c",
  "]",
  "dtype_cast_node",
  "=",
  "_insert_dtype_cast_after_node",
  "(",
  "subgraph_a",
  ".",
  "start_node",
  ",",
  "node_c",
  ",",
  "prev_node_c",
  ",",
  "gm_a",
  ",",
  "gm_b",
  ",",
  "graph_c",
  ",",
  "node_b",
  ".",
  "name",
  "+",
  "'_dtype_cast_'",
  ",",
  "logger_cls",
  ",",
  "node_type_to_io_type_map",
  ")",
  "# note: not inserting to env_c because all nodes which use the dtype",
  "#   casts are copied from graph_a",
  "#",
  "# subgraph so far:",
  "#",
  "#           (dtype_cast_node)+",
  "#                  /",
  "# (prev_node_c)+ -> (logger_c_input)? -> node_start_c",
  "# if input logging is enabled, log the input to the subgraph",
  "if",
  "should_log_inputs",
  ":",
  "# TODO: explain this",
  "ref_node_name",
  "=",
  "''",
  "if",
  "isinstance",
  "(",
  "dtype_cast_node",
  ",",
  "Node",
  ")",
  ":",
  "dtype_cast_node",
  "=",
  "_insert_logger_after_node",
  "(",
  "dtype_cast_node",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_a_inp_'",
  ",",
  "ref_node_name",
  ",",
  "name_a",
  ",",
  "ref_name",
  ",",
  "ref_node_type_a",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_INPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "0",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_a",
  ")",
  "input_logger",
  ":",
  "Union",
  "[",
  "Node",
  ",",
  "List",
  "[",
  "Node",
  "]",
  "]",
  "=",
  "dtype_cast_node",
  "else",
  ":",
  "assert",
  "isinstance",
  "(",
  "dtype_cast_node",
  ",",
  "list",
  ")",
  "new_loggers",
  "=",
  "[",
  "]",
  "for",
  "dtype_cast_idx",
  ",",
  "dtype_cast_node_inner",
  "in",
  "enumerate",
  "(",
  "dtype_cast_node",
  ")",
  ":",
  "dtype_cast_logger",
  "=",
  "_insert_logger_after_node",
  "(",
  "dtype_cast_node_inner",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_a_inp_'",
  ",",
  "ref_node_name",
  ",",
  "name_a",
  ",",
  "ref_name",
  ",",
  "ref_node_type_a",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_INPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "dtype_cast_idx",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_a",
  ")",
  "new_loggers",
  ".",
  "append",
  "(",
  "dtype_cast_logger",
  ")",
  "dtype_cast_node",
  "=",
  "new_loggers",
  "input_logger",
  "=",
  "dtype_cast_node",
  "# subgraph so far:",
  "#",
  "#       (dtype_cast_node)+ -> (logger_a_input)?",
  "#                  /",
  "# prev_node_c -> (logger_c_input)? -> node_start_c",
  "# hook up the new mod_a copy to be in the graph, receiving the",
  "# same inputs as mod_b does, with dtype cast to match a",
  "# Some ops, such as LSTMs, have two non-param inputs. If we have",
  "# such an op, pass the second param as well. Note: dtype casting",
  "# for the second param is not implemented yet, it can be added",
  "# later if there is a use case.",
  "node_c_second_non_param_arg",
  "=",
  "None",
  "num_non_param_args_node_a",
  "=",
  "get_number_of_non_param_args",
  "(",
  "subgraph_a",
  ".",
  "start_node",
  ",",
  "gm_a",
  ")",
  "if",
  "num_non_param_args_node_a",
  "==",
  "2",
  ":",
  "node_c_second_non_param_arg",
  "=",
  "node_c",
  ".",
  "args",
  "[",
  "1",
  "]",
  "node_a_shadows_c",
  "=",
  "_insert_copy_of_subgraph_a_after_input_node_c",
  "(",
  "dtype_cast_node",
  ",",
  "node_c_second_non_param_arg",
  ",",
  "subgraph_a",
  ",",
  "gm_a",
  ",",
  "gm_b",
  ",",
  "node_c",
  ".",
  "name",
  "+",
  "'_shadow_copy_'",
  ")",
  "env_c",
  "[",
  "node_a_shadows_c",
  ".",
  "name",
  "]",
  "=",
  "node_a_shadows_c",
  "# subgraph so far:",
  "#",
  "#       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown)",
  "#                  /",
  "# (prev_node_c)+ -> (logger_c_input)? -> node_start_c",
  "if",
  "should_log_inputs",
  ":",
  "# When we created the input logger, we left the ref_node_name",
  "# as an empty string, because the subgraph copy did not exist",
  "# yet. Now that the subgraph copy exists, we modify this name",
  "# to its true value.",
  "# Note: the alternative to this is to create the input logger",
  "# after creating the subgraph, which is slightly more",
  "# complicated. This is the lesser of two evils.",
  "# input_logger = env_c[dtype_cast_node.name]",
  "# Find the first node in the subgraph",
  "cur_node",
  "=",
  "node_a_shadows_c",
  "while",
  "cur_node",
  ".",
  "args",
  "[",
  "0",
  "]",
  "!=",
  "input_logger",
  ":",
  "cur_node",
  "=",
  "cur_node",
  ".",
  "args",
  "[",
  "0",
  "]",
  "# type: ignore[assignment]",
  "if",
  "isinstance",
  "(",
  "input_logger",
  ",",
  "Node",
  ")",
  ":",
  "input_logger_mod",
  "=",
  "getattr",
  "(",
  "gm_b",
  ",",
  "input_logger",
  ".",
  "name",
  ")",
  "input_logger_mod",
  ".",
  "ref_node_name",
  "=",
  "cur_node",
  ".",
  "name",
  "else",
  ":",
  "assert",
  "isinstance",
  "(",
  "input_logger",
  ",",
  "list",
  ")",
  "for",
  "input_logger_inner",
  "in",
  "input_logger",
  ":",
  "input_logger_mod",
  "=",
  "getattr",
  "(",
  "gm_b",
  ",",
  "input_logger_inner",
  ".",
  "name",
  ")",
  "input_logger_mod",
  ".",
  "ref_node_name",
  "=",
  "cur_node",
  ".",
  "name",
  "# hook up a logger to the mod_a copy",
  "env_c",
  "[",
  "node_a_shadows_c",
  ".",
  "name",
  "]",
  "=",
  "_insert_logger_after_node",
  "(",
  "env_c",
  "[",
  "node_a_shadows_c",
  ".",
  "name",
  "]",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_a_'",
  ",",
  "node_a_shadows_c",
  ".",
  "name",
  ",",
  "name_a",
  ",",
  "ref_name",
  ",",
  "ref_node_type_a",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_OUTPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "0",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_a",
  ")",
  "# subgraph so far:",
  "#",
  "#       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a",
  "#                  /",
  "# (prev_node_c)+ -> (logger_c_input)? -> node_start_c",
  "if",
  "node_b_is_end_node",
  ":",
  "# hook up a logger to the mod_b copy",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "=",
  "_insert_logger_after_node",
  "(",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  ",",
  "gm_b",
  ",",
  "logger_cls",
  ",",
  "'_ns_logger_b_'",
  ",",
  "node_b",
  ".",
  "name",
  ",",
  "name_b",
  ",",
  "ref_name",
  ",",
  "ref_node_type_b",
  ",",
  "NSSingleResultValuesType",
  ".",
  "NODE_OUTPUT",
  ".",
  "value",
  ",",
  "index_within_arg",
  "=",
  "0",
  ",",
  "index_of_arg",
  "=",
  "0",
  ",",
  "fqn",
  "=",
  "fqn_base_b",
  ")",
  "# subgraph so far:",
  "#",
  "#       dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a",
  "#                  /",
  "# (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c",
  "#",
  "# Note: node_start_c may be the same node as node_end_c, or they",
  "# may have nodes inbetween.",
  "else",
  ":",
  "env_c",
  "[",
  "node_b",
  ".",
  "name",
  "]",
  "=",
  "graph_c",
  ".",
  "node_copy",
  "(",
  "node_b",
  ",",
  "load_arg",
  ")",
  "gm_c",
  "=",
  "GraphModule",
  "(",
  "gm_b",
  ",",
  "graph_c",
  ")",
  "return",
  "gm_c"
] | 
	https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/ao/ns/fx/graph_passes.py#L514-L819 | |
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py | 
	python | 
	MultiIndex._hashed_values | 
	(self) | 
	return hash_tuples(self) | 
	return a uint64 ndarray of my hashed values | 
	return a uint64 ndarray of my hashed values | 
	[
  "return",
  "a",
  "uint64",
  "ndarray",
  "of",
  "my",
  "hashed",
  "values"
] | 
	def _hashed_values(self):
        """ return a uint64 ndarray of my hashed values """
        return hash_tuples(self) | 
	[
  "def",
  "_hashed_values",
  "(",
  "self",
  ")",
  ":",
  "return",
  "hash_tuples",
  "(",
  "self",
  ")"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py#L1408-L1410 | |
| 
	bigartm/bigartm | 
	47e37f982de87aa67bfd475ff1f39da696b181b3 | 
	utils/cpplint.py | 
	python | 
	FileInfo.NoExtension | 
	(self) | 
	return '/'.join(self.Split()[0:2]) | 
	File has no source file extension. | 
	File has no source file extension. | 
	[
  "File",
  "has",
  "no",
  "source",
  "file",
  "extension",
  "."
] | 
	def NoExtension(self):
    """File has no source file extension."""
    return '/'.join(self.Split()[0:2]) | 
	[
  "def",
  "NoExtension",
  "(",
  "self",
  ")",
  ":",
  "return",
  "'/'",
  ".",
  "join",
  "(",
  "self",
  ".",
  "Split",
  "(",
  ")",
  "[",
  "0",
  ":",
  "2",
  "]",
  ")"
] | 
	https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/utils/cpplint.py#L1055-L1057 | |
| 
	neoml-lib/neoml | 
	a0d370fba05269a1b2258cef126f77bbd2054a3e | 
	NeoML/Python/neoml/Dnn/Dnn.py | 
	python | 
	Dnn.initializer | 
	(self, new_initializer) | 
	Sets the initializer that will fill in the weight values 
        before training starts. | 
	Sets the initializer that will fill in the weight values 
        before training starts. | 
	[
  "Sets",
  "the",
  "initializer",
  "that",
  "will",
  "fill",
  "in",
  "the",
  "weight",
  "values",
  "before",
  "training",
  "starts",
  "."
] | 
	def initializer(self, new_initializer):
        """Sets the initializer that will fill in the weight values 
        before training starts.
        """
        self.set_initializer(new_initializer._internal) | 
	[
  "def",
  "initializer",
  "(",
  "self",
  ",",
  "new_initializer",
  ")",
  ":",
  "self",
  ".",
  "set_initializer",
  "(",
  "new_initializer",
  ".",
  "_internal",
  ")"
] | 
	https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Dnn.py#L106-L110 | ||
| 
	llvm/llvm-project | 
	ffa6262cb4e2a335d26416fad39a581b4f98c5f4 | 
	llvm/utils/lit/lit/LitConfig.py | 
	python | 
	LitConfig.maxIndividualTestTimeIsSupported | 
	(self) | 
	return lit.util.killProcessAndChildrenIsSupported() | 
	Returns a tuple (<supported> , <error message>)
            where
            `<supported>` is True if setting maxIndividualTestTime is supported
                on the current host, returns False otherwise.
            `<error message>` is an empty string if `<supported>` is True,
                otherwise is contains a string describing why setting
                maxIndividualTestTime is not supported. | 
	Returns a tuple (<supported> , <error message>)
            where
            `<supported>` is True if setting maxIndividualTestTime is supported
                on the current host, returns False otherwise.
            `<error message>` is an empty string if `<supported>` is True,
                otherwise is contains a string describing why setting
                maxIndividualTestTime is not supported. | 
	[
  "Returns",
  "a",
  "tuple",
  "(",
  "<supported",
  ">",
  "<error",
  "message",
  ">",
  ")",
  "where",
  "<supported",
  ">",
  "is",
  "True",
  "if",
  "setting",
  "maxIndividualTestTime",
  "is",
  "supported",
  "on",
  "the",
  "current",
  "host",
  "returns",
  "False",
  "otherwise",
  ".",
  "<error",
  "message",
  ">",
  "is",
  "an",
  "empty",
  "string",
  "if",
  "<supported",
  ">",
  "is",
  "True",
  "otherwise",
  "is",
  "contains",
  "a",
  "string",
  "describing",
  "why",
  "setting",
  "maxIndividualTestTime",
  "is",
  "not",
  "supported",
  "."
] | 
	def maxIndividualTestTimeIsSupported(self):
        """
            Returns a tuple (<supported> , <error message>)
            where
            `<supported>` is True if setting maxIndividualTestTime is supported
                on the current host, returns False otherwise.
            `<error message>` is an empty string if `<supported>` is True,
                otherwise is contains a string describing why setting
                maxIndividualTestTime is not supported.
        """
        return lit.util.killProcessAndChildrenIsSupported() | 
	[
  "def",
  "maxIndividualTestTimeIsSupported",
  "(",
  "self",
  ")",
  ":",
  "return",
  "lit",
  ".",
  "util",
  ".",
  "killProcessAndChildrenIsSupported",
  "(",
  ")"
] | 
	https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/llvm/utils/lit/lit/LitConfig.py#L79-L89 | |
| 
	miyosuda/TensorFlowAndroidMNIST | 
	7b5a4603d2780a8a2834575706e9001977524007 | 
	jni-build/jni/include/tensorflow/python/ops/control_flow_ops.py | 
	python | 
	CondContext.BuildCondBranch | 
	(self, fn) | 
	return original_r, result | 
	Add the subgraph defined by fn() to the graph. | 
	Add the subgraph defined by fn() to the graph. | 
	[
  "Add",
  "the",
  "subgraph",
  "defined",
  "by",
  "fn",
  "()",
  "to",
  "the",
  "graph",
  "."
] | 
	def BuildCondBranch(self, fn):
    """Add the subgraph defined by fn() to the graph."""
    r = fn()
    original_r = r
    result = []
    if r is not None:
      if not isinstance(r, list) and not isinstance(r, _basetuple):
        r = [r]
        original_r = [original_r]
      r = _convert_tensorarrays_to_flows(r)
      for v in r:
        real_v = v
        if isinstance(v, ops.Operation):
          # Use pivot as the proxy for this op.
          real_v = with_dependencies([v], self._pivot)
        elif v.name not in self._values:
          # Handle the special case of lambda: x
          self._values.add(v.name)
          if self._outer_context:
            real_v = self._outer_context.AddValue(v)
            self._values.add(real_v.name)
          real_v = _SwitchRefOrTensor(real_v, self._pred)[self._branch]
          self._external_values[v.name] = real_v
        else:
          external_v = self._external_values.get(v.name)
          if external_v is not None:
            real_v = external_v
        result.append(real_v)
    return original_r, result | 
	[
  "def",
  "BuildCondBranch",
  "(",
  "self",
  ",",
  "fn",
  ")",
  ":",
  "r",
  "=",
  "fn",
  "(",
  ")",
  "original_r",
  "=",
  "r",
  "result",
  "=",
  "[",
  "]",
  "if",
  "r",
  "is",
  "not",
  "None",
  ":",
  "if",
  "not",
  "isinstance",
  "(",
  "r",
  ",",
  "list",
  ")",
  "and",
  "not",
  "isinstance",
  "(",
  "r",
  ",",
  "_basetuple",
  ")",
  ":",
  "r",
  "=",
  "[",
  "r",
  "]",
  "original_r",
  "=",
  "[",
  "original_r",
  "]",
  "r",
  "=",
  "_convert_tensorarrays_to_flows",
  "(",
  "r",
  ")",
  "for",
  "v",
  "in",
  "r",
  ":",
  "real_v",
  "=",
  "v",
  "if",
  "isinstance",
  "(",
  "v",
  ",",
  "ops",
  ".",
  "Operation",
  ")",
  ":",
  "# Use pivot as the proxy for this op.",
  "real_v",
  "=",
  "with_dependencies",
  "(",
  "[",
  "v",
  "]",
  ",",
  "self",
  ".",
  "_pivot",
  ")",
  "elif",
  "v",
  ".",
  "name",
  "not",
  "in",
  "self",
  ".",
  "_values",
  ":",
  "# Handle the special case of lambda: x",
  "self",
  ".",
  "_values",
  ".",
  "add",
  "(",
  "v",
  ".",
  "name",
  ")",
  "if",
  "self",
  ".",
  "_outer_context",
  ":",
  "real_v",
  "=",
  "self",
  ".",
  "_outer_context",
  ".",
  "AddValue",
  "(",
  "v",
  ")",
  "self",
  ".",
  "_values",
  ".",
  "add",
  "(",
  "real_v",
  ".",
  "name",
  ")",
  "real_v",
  "=",
  "_SwitchRefOrTensor",
  "(",
  "real_v",
  ",",
  "self",
  ".",
  "_pred",
  ")",
  "[",
  "self",
  ".",
  "_branch",
  "]",
  "self",
  ".",
  "_external_values",
  "[",
  "v",
  ".",
  "name",
  "]",
  "=",
  "real_v",
  "else",
  ":",
  "external_v",
  "=",
  "self",
  ".",
  "_external_values",
  ".",
  "get",
  "(",
  "v",
  ".",
  "name",
  ")",
  "if",
  "external_v",
  "is",
  "not",
  "None",
  ":",
  "real_v",
  "=",
  "external_v",
  "result",
  ".",
  "append",
  "(",
  "real_v",
  ")",
  "return",
  "original_r",
  ",",
  "result"
] | 
	https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/control_flow_ops.py#L1244-L1272 | |
| 
	Tencent/CMONGO | 
	c40380caa14e05509f46993aa8b8da966b09b0b5 | 
	buildscripts/packager.py | 
	python | 
	Distro.repo_os_version | 
	(self, build_os) | 
	Return an OS version suitable for package repo directory
        naming - e.g. 5, 6 or 7 for redhat/centos, "precise," "wheezy," etc.
        for Ubuntu/Debian, 11 for suse, "2013.03" for amazon | 
	Return an OS version suitable for package repo directory
        naming - e.g. 5, 6 or 7 for redhat/centos, "precise," "wheezy," etc.
        for Ubuntu/Debian, 11 for suse, "2013.03" for amazon | 
	[
  "Return",
  "an",
  "OS",
  "version",
  "suitable",
  "for",
  "package",
  "repo",
  "directory",
  "naming",
  "-",
  "e",
  ".",
  "g",
  ".",
  "5",
  "6",
  "or",
  "7",
  "for",
  "redhat",
  "/",
  "centos",
  "precise",
  "wheezy",
  "etc",
  ".",
  "for",
  "Ubuntu",
  "/",
  "Debian",
  "11",
  "for",
  "suse",
  "2013",
  ".",
  "03",
  "for",
  "amazon"
] | 
	def repo_os_version(self, build_os):
        """Return an OS version suitable for package repo directory
        naming - e.g. 5, 6 or 7 for redhat/centos, "precise," "wheezy," etc.
        for Ubuntu/Debian, 11 for suse, "2013.03" for amazon"""
        if self.n == 'suse':
            return re.sub(r'^suse(\d+)$', r'\1', build_os)
        if self.n == 'redhat':
            return re.sub(r'^rhel(\d).*$', r'\1', build_os)
        if self.n == 'amazon':
            return "2013.03"
        elif self.n == 'ubuntu':
            if build_os == 'ubuntu1204':
                return "precise"
            elif build_os == 'ubuntu1404':
                return "trusty"
            elif build_os == 'ubuntu1604':
                return "xenial"
            else:
                raise Exception("unsupported build_os: %s" % build_os)
        elif self.n == 'debian':
            if build_os == 'debian71':
                return 'wheezy'
            elif build_os == 'debian81':
                return 'jessie'
            else:
                raise Exception("unsupported build_os: %s" % build_os)
        else:
            raise Exception("unsupported distro: %s" % self.n) | 
	[
  "def",
  "repo_os_version",
  "(",
  "self",
  ",",
  "build_os",
  ")",
  ":",
  "if",
  "self",
  ".",
  "n",
  "==",
  "'suse'",
  ":",
  "return",
  "re",
  ".",
  "sub",
  "(",
  "r'^suse(\\d+)$'",
  ",",
  "r'\\1'",
  ",",
  "build_os",
  ")",
  "if",
  "self",
  ".",
  "n",
  "==",
  "'redhat'",
  ":",
  "return",
  "re",
  ".",
  "sub",
  "(",
  "r'^rhel(\\d).*$'",
  ",",
  "r'\\1'",
  ",",
  "build_os",
  ")",
  "if",
  "self",
  ".",
  "n",
  "==",
  "'amazon'",
  ":",
  "return",
  "\"2013.03\"",
  "elif",
  "self",
  ".",
  "n",
  "==",
  "'ubuntu'",
  ":",
  "if",
  "build_os",
  "==",
  "'ubuntu1204'",
  ":",
  "return",
  "\"precise\"",
  "elif",
  "build_os",
  "==",
  "'ubuntu1404'",
  ":",
  "return",
  "\"trusty\"",
  "elif",
  "build_os",
  "==",
  "'ubuntu1604'",
  ":",
  "return",
  "\"xenial\"",
  "else",
  ":",
  "raise",
  "Exception",
  "(",
  "\"unsupported build_os: %s\"",
  "%",
  "build_os",
  ")",
  "elif",
  "self",
  ".",
  "n",
  "==",
  "'debian'",
  ":",
  "if",
  "build_os",
  "==",
  "'debian71'",
  ":",
  "return",
  "'wheezy'",
  "elif",
  "build_os",
  "==",
  "'debian81'",
  ":",
  "return",
  "'jessie'",
  "else",
  ":",
  "raise",
  "Exception",
  "(",
  "\"unsupported build_os: %s\"",
  "%",
  "build_os",
  ")",
  "else",
  ":",
  "raise",
  "Exception",
  "(",
  "\"unsupported distro: %s\"",
  "%",
  "self",
  ".",
  "n",
  ")"
] | 
	https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/buildscripts/packager.py#L197-L224 | ||
| 
	infinit/memo | 
	3a8394d0f647efe03ccb8bfe885a7279cb8be8a6 | 
	elle/drake/src/drake/__init__.py | 
	python | 
	BaseNode.drake_type | 
	(self) | 
	return '%s.%s' % (self.__module__, self.__name__) | 
	The qualified name of this type. | 
	The qualified name of this type. | 
	[
  "The",
  "qualified",
  "name",
  "of",
  "this",
  "type",
  "."
] | 
	def drake_type(self):
      """The qualified name of this type."""
      return '%s.%s' % (self.__module__, self.__name__) | 
	[
  "def",
  "drake_type",
  "(",
  "self",
  ")",
  ":",
  "return",
  "'%s.%s'",
  "%",
  "(",
  "self",
  ".",
  "__module__",
  ",",
  "self",
  ".",
  "__name__",
  ")"
] | 
	https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L1391-L1393 | |
| 
	benoitsteiner/tensorflow-opencl | 
	cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | 
	tensorflow/contrib/distributions/python/ops/binomial.py | 
	python | 
	Binomial.probs | 
	(self) | 
	return self._probs | 
	Probability of drawing a `1`. | 
	Probability of drawing a `1`. | 
	[
  "Probability",
  "of",
  "drawing",
  "a",
  "1",
  "."
] | 
	def probs(self):
    """Probability of drawing a `1`."""
    return self._probs | 
	[
  "def",
  "probs",
  "(",
  "self",
  ")",
  ":",
  "return",
  "self",
  ".",
  "_probs"
] | 
	https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/distributions/python/ops/binomial.py#L198-L200 | |
| 
	Kitware/kwiver | 
	7ed70308905698b6e88d27ae3dc028c9b016ca0a | 
	python/kwiver/sprokit/processes/kwiver_process.py | 
	python | 
	KwiverProcess.config_value_using_trait | 
	(self, name) | 
	return self.config_value(ct.name) | 
	Get value from config using trait.
        An exception will be thrown if the config trait has not been registered
        with the process.
        :param name: Name of the configuration trait. | 
	Get value from config using trait.
        An exception will be thrown if the config trait has not been registered
        with the process. | 
	[
  "Get",
  "value",
  "from",
  "config",
  "using",
  "trait",
  ".",
  "An",
  "exception",
  "will",
  "be",
  "thrown",
  "if",
  "the",
  "config",
  "trait",
  "has",
  "not",
  "been",
  "registered",
  "with",
  "the",
  "process",
  "."
] | 
	def config_value_using_trait(self, name):
        """
        Get value from config using trait.
        An exception will be thrown if the config trait has not been registered
        with the process.
        :param name: Name of the configuration trait.
        """
        ct = self._config_trait_set.get(name, None)
        if ct is None:
            raise ValueError('config trait name \"%s\" not registered' % name)
        return self.config_value(ct.name) | 
	[
  "def",
  "config_value_using_trait",
  "(",
  "self",
  ",",
  "name",
  ")",
  ":",
  "ct",
  "=",
  "self",
  ".",
  "_config_trait_set",
  ".",
  "get",
  "(",
  "name",
  ",",
  "None",
  ")",
  "if",
  "ct",
  "is",
  "None",
  ":",
  "raise",
  "ValueError",
  "(",
  "'config trait name \\\"%s\\\" not registered'",
  "%",
  "name",
  ")",
  "return",
  "self",
  ".",
  "config_value",
  "(",
  "ct",
  ".",
  "name",
  ")"
] | 
	https://github.com/Kitware/kwiver/blob/7ed70308905698b6e88d27ae3dc028c9b016ca0a/python/kwiver/sprokit/processes/kwiver_process.py#L369-L382 | |
| 
	luliyucoordinate/Leetcode | 
	96afcdc54807d1d184e881a075d1dbf3371e31fb | 
	src/0019-Remove-Nth-Node-From-End-of-List/0019.py | 
	python | 
	Solution.removeNthFromEnd | 
	(self, head, n) | 
	return h.next | 
	:type head: ListNode
        :type n: int
        :rtype: ListNode | 
	:type head: ListNode
        :type n: int
        :rtype: ListNode | 
	[
  ":",
  "type",
  "head",
  ":",
  "ListNode",
  ":",
  "type",
  "n",
  ":",
  "int",
  ":",
  "rtype",
  ":",
  "ListNode"
] | 
	def removeNthFromEnd(self, head, n):
        """
        :type head: ListNode
        :type n: int
        :rtype: ListNode
        """
        h = ListNode(-1)
        h.next = head
        p, q = h, h
        for _ in range(n + 1):
            assert (q)
            q = q.next
        while q != None:
            p = p.next
            q = q.next
        p.next = p.next.next
        return h.next | 
	[
  "def",
  "removeNthFromEnd",
  "(",
  "self",
  ",",
  "head",
  ",",
  "n",
  ")",
  ":",
  "h",
  "=",
  "ListNode",
  "(",
  "-",
  "1",
  ")",
  "h",
  ".",
  "next",
  "=",
  "head",
  "p",
  ",",
  "q",
  "=",
  "h",
  ",",
  "h",
  "for",
  "_",
  "in",
  "range",
  "(",
  "n",
  "+",
  "1",
  ")",
  ":",
  "assert",
  "(",
  "q",
  ")",
  "q",
  "=",
  "q",
  ".",
  "next",
  "while",
  "q",
  "!=",
  "None",
  ":",
  "p",
  "=",
  "p",
  ".",
  "next",
  "q",
  "=",
  "q",
  ".",
  "next",
  "p",
  ".",
  "next",
  "=",
  "p",
  ".",
  "next",
  ".",
  "next",
  "return",
  "h",
  ".",
  "next"
] | 
	https://github.com/luliyucoordinate/Leetcode/blob/96afcdc54807d1d184e881a075d1dbf3371e31fb/src/0019-Remove-Nth-Node-From-End-of-List/0019.py#L8-L27 | |
| 
	stulp/dmpbbo | 
	ca900e3b851d25faaf59ea296650370c70ed7d0f | 
	python/bbo/updaters.py | 
	python | 
	costsToWeights | 
	(costs, weighting_method, eliteness) | 
	return weights | 
	Convert costs to weights using different weighting methods.
    \param[in] costs A vector of costs.
    \param[in] weighting_method The weighting method ('PI-BB','CMA-ES','CEM')
    \param[in] eliteness The eliteness parameter (h in PI-BB, mu in CMA-ES)
    \return A vector of weights (they always sum to 1). | 
	Convert costs to weights using different weighting methods.
    \param[in] costs A vector of costs.
    \param[in] weighting_method The weighting method ('PI-BB','CMA-ES','CEM')
    \param[in] eliteness The eliteness parameter (h in PI-BB, mu in CMA-ES)
    \return A vector of weights (they always sum to 1). | 
	[
  "Convert",
  "costs",
  "to",
  "weights",
  "using",
  "different",
  "weighting",
  "methods",
  ".",
  "\\",
  "param",
  "[",
  "in",
  "]",
  "costs",
  "A",
  "vector",
  "of",
  "costs",
  ".",
  "\\",
  "param",
  "[",
  "in",
  "]",
  "weighting_method",
  "The",
  "weighting",
  "method",
  "(",
  "PI",
  "-",
  "BB",
  "CMA",
  "-",
  "ES",
  "CEM",
  ")",
  "\\",
  "param",
  "[",
  "in",
  "]",
  "eliteness",
  "The",
  "eliteness",
  "parameter",
  "(",
  "h",
  "in",
  "PI",
  "-",
  "BB",
  "mu",
  "in",
  "CMA",
  "-",
  "ES",
  ")",
  "\\",
  "return",
  "A",
  "vector",
  "of",
  "weights",
  "(",
  "they",
  "always",
  "sum",
  "to",
  "1",
  ")",
  "."
] | 
	def costsToWeights(costs, weighting_method, eliteness):
    """ Convert costs to weights using different weighting methods.
    \param[in] costs A vector of costs.
    \param[in] weighting_method The weighting method ('PI-BB','CMA-ES','CEM')
    \param[in] eliteness The eliteness parameter (h in PI-BB, mu in CMA-ES)
    \return A vector of weights (they always sum to 1).
    """
    
    # Costs can be a 2D array or a list of lists. In this case, the first
    # column is the sum of the other columns (which contain the different cost
    # components). In this  case, we should use only the first column.
    costs = np.asarray([np.atleast_1d(x)[0] for x in costs])
    #np.set_printoptions(precision=4, suppress=True)
    if weighting_method == 'PI-BB':
        # PI^2 style weighting: continuous, cost exponention
        h = eliteness # In PI^2, eliteness parameter is known as "h"
        costs_range = max(costs)-min(costs)
        if costs_range==0:
            weights = np.full(costs.shape,1.0)
        else:
            costs_norm = np.asarray([-h*(x-min(costs))/costs_range for x in costs])
            weights = np.exp(costs_norm)
    elif weighting_method=='CEM' or weighting_method=='CMA-ES':
        # CEM/CMA-ES style weights: rank-based, uses defaults
        mu = eliteness # In CMA-ES, eliteness parameter is known as "mu"
        indices = np.argsort(costs)
        weights = np.full(costs.size,0.0)
        if weighting_method=='CEM':
            # CEM
            weights[indices[0:mu]] = 1.0/mu
        else:
            # CMA-ES
            for ii in range(mu):
                weights[indices[ii]] = np.log(mu+0.5)-np.log(ii+1)
            
    else:
        print("WARNING: Unknown weighting method '", weighting_method, "'. Calling with PI-BB weighting."); 
        return costsToWeights(costs, 'PI-BB', eliteness);
  
    #// Relative standard deviation of total costs
    #double mean = weights.mean();
    #double std = sqrt((weights.array()-mean).pow(2).mean());
    #double rel_std = std/mean;
    #if (rel_std<1e-10)
    #{
    #    // Special case: all costs are the same
    #    // Set same weights for all.
    #    weights.fill(1);
    #}
    # Normalize weights
    weights = weights/sum(weights)
    
    return weights | 
	[
  "def",
  "costsToWeights",
  "(",
  "costs",
  ",",
  "weighting_method",
  ",",
  "eliteness",
  ")",
  ":",
  "# Costs can be a 2D array or a list of lists. In this case, the first",
  "# column is the sum of the other columns (which contain the different cost",
  "# components). In this  case, we should use only the first column.",
  "costs",
  "=",
  "np",
  ".",
  "asarray",
  "(",
  "[",
  "np",
  ".",
  "atleast_1d",
  "(",
  "x",
  ")",
  "[",
  "0",
  "]",
  "for",
  "x",
  "in",
  "costs",
  "]",
  ")",
  "#np.set_printoptions(precision=4, suppress=True)",
  "if",
  "weighting_method",
  "==",
  "'PI-BB'",
  ":",
  "# PI^2 style weighting: continuous, cost exponention",
  "h",
  "=",
  "eliteness",
  "# In PI^2, eliteness parameter is known as \"h\"",
  "costs_range",
  "=",
  "max",
  "(",
  "costs",
  ")",
  "-",
  "min",
  "(",
  "costs",
  ")",
  "if",
  "costs_range",
  "==",
  "0",
  ":",
  "weights",
  "=",
  "np",
  ".",
  "full",
  "(",
  "costs",
  ".",
  "shape",
  ",",
  "1.0",
  ")",
  "else",
  ":",
  "costs_norm",
  "=",
  "np",
  ".",
  "asarray",
  "(",
  "[",
  "-",
  "h",
  "*",
  "(",
  "x",
  "-",
  "min",
  "(",
  "costs",
  ")",
  ")",
  "/",
  "costs_range",
  "for",
  "x",
  "in",
  "costs",
  "]",
  ")",
  "weights",
  "=",
  "np",
  ".",
  "exp",
  "(",
  "costs_norm",
  ")",
  "elif",
  "weighting_method",
  "==",
  "'CEM'",
  "or",
  "weighting_method",
  "==",
  "'CMA-ES'",
  ":",
  "# CEM/CMA-ES style weights: rank-based, uses defaults",
  "mu",
  "=",
  "eliteness",
  "# In CMA-ES, eliteness parameter is known as \"mu\"",
  "indices",
  "=",
  "np",
  ".",
  "argsort",
  "(",
  "costs",
  ")",
  "weights",
  "=",
  "np",
  ".",
  "full",
  "(",
  "costs",
  ".",
  "size",
  ",",
  "0.0",
  ")",
  "if",
  "weighting_method",
  "==",
  "'CEM'",
  ":",
  "# CEM",
  "weights",
  "[",
  "indices",
  "[",
  "0",
  ":",
  "mu",
  "]",
  "]",
  "=",
  "1.0",
  "/",
  "mu",
  "else",
  ":",
  "# CMA-ES",
  "for",
  "ii",
  "in",
  "range",
  "(",
  "mu",
  ")",
  ":",
  "weights",
  "[",
  "indices",
  "[",
  "ii",
  "]",
  "]",
  "=",
  "np",
  ".",
  "log",
  "(",
  "mu",
  "+",
  "0.5",
  ")",
  "-",
  "np",
  ".",
  "log",
  "(",
  "ii",
  "+",
  "1",
  ")",
  "else",
  ":",
  "print",
  "(",
  "\"WARNING: Unknown weighting method '\"",
  ",",
  "weighting_method",
  ",",
  "\"'. Calling with PI-BB weighting.\"",
  ")",
  "return",
  "costsToWeights",
  "(",
  "costs",
  ",",
  "'PI-BB'",
  ",",
  "eliteness",
  ")",
  "#// Relative standard deviation of total costs",
  "#double mean = weights.mean();",
  "#double std = sqrt((weights.array()-mean).pow(2).mean());",
  "#double rel_std = std/mean;",
  "#if (rel_std<1e-10)",
  "#{",
  "#    // Special case: all costs are the same",
  "#    // Set same weights for all.",
  "#    weights.fill(1);",
  "#}",
  "# Normalize weights",
  "weights",
  "=",
  "weights",
  "/",
  "sum",
  "(",
  "weights",
  ")",
  "return",
  "weights"
] | 
	https://github.com/stulp/dmpbbo/blob/ca900e3b851d25faaf59ea296650370c70ed7d0f/python/bbo/updaters.py#L199-L254 | |
| 
	apple/turicreate | 
	cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | 
	src/external/coremltools_wrap/coremltools/coremltools/converters/mil/frontend/torch/ops.py | 
	python | 
	convert_block | 
	(context, block, inputs) | 
	return outputs | 
	Convert a block (sub-graph) to MIL. Conversion happens within a new
        context frame.
        Arguments:
            context: A TranscriptionContext object to pull node inputs and
                assign node outputs.
            block: An InternalTorchIRBlock object.
            inputs: List of Vars from the outer context that map to the block's
                expected inputs. The number of inputs provided must match the
                number expected by the block. | 
	Convert a block (sub-graph) to MIL. Conversion happens within a new
        context frame. | 
	[
  "Convert",
  "a",
  "block",
  "(",
  "sub",
  "-",
  "graph",
  ")",
  "to",
  "MIL",
  ".",
  "Conversion",
  "happens",
  "within",
  "a",
  "new",
  "context",
  "frame",
  "."
] | 
	def convert_block(context, block, inputs):
    """Convert a block (sub-graph) to MIL. Conversion happens within a new
        context frame.
        Arguments:
            context: A TranscriptionContext object to pull node inputs and
                assign node outputs.
            block: An InternalTorchIRBlock object.
            inputs: List of Vars from the outer context that map to the block's
                expected inputs. The number of inputs provided must match the
                number expected by the block.
    """
    assert len(block.inputs) == len(inputs)
    # Start a new context frame.
    context.push((block.inputs, inputs))
    # Add the block ops.
    convert_nodes(context, block)
    # Collect the block outputs.
    outputs = [context[outp] for outp in block.outputs]
    # Return to the previous context frame.
    context.pop()
    return outputs | 
	[
  "def",
  "convert_block",
  "(",
  "context",
  ",",
  "block",
  ",",
  "inputs",
  ")",
  ":",
  "assert",
  "len",
  "(",
  "block",
  ".",
  "inputs",
  ")",
  "==",
  "len",
  "(",
  "inputs",
  ")",
  "# Start a new context frame.",
  "context",
  ".",
  "push",
  "(",
  "(",
  "block",
  ".",
  "inputs",
  ",",
  "inputs",
  ")",
  ")",
  "# Add the block ops.",
  "convert_nodes",
  "(",
  "context",
  ",",
  "block",
  ")",
  "# Collect the block outputs.",
  "outputs",
  "=",
  "[",
  "context",
  "[",
  "outp",
  "]",
  "for",
  "outp",
  "in",
  "block",
  ".",
  "outputs",
  "]",
  "# Return to the previous context frame.",
  "context",
  ".",
  "pop",
  "(",
  ")",
  "return",
  "outputs"
] | 
	https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/converters/mil/frontend/torch/ops.py#L62-L88 | |
| 
	google/llvm-propeller | 
	45c226984fe8377ebfb2ad7713c680d652ba678d | 
	clang/bindings/python/clang/cindex.py | 
	python | 
	Type.get_ref_qualifier | 
	(self) | 
	return RefQualifierKind.from_id(
                conf.lib.clang_Type_getCXXRefQualifier(self)) | 
	Retrieve the ref-qualifier of the type. | 
	Retrieve the ref-qualifier of the type. | 
	[
  "Retrieve",
  "the",
  "ref",
  "-",
  "qualifier",
  "of",
  "the",
  "type",
  "."
] | 
	def get_ref_qualifier(self):
        """
        Retrieve the ref-qualifier of the type.
        """
        return RefQualifierKind.from_id(
                conf.lib.clang_Type_getCXXRefQualifier(self)) | 
	[
  "def",
  "get_ref_qualifier",
  "(",
  "self",
  ")",
  ":",
  "return",
  "RefQualifierKind",
  ".",
  "from_id",
  "(",
  "conf",
  ".",
  "lib",
  ".",
  "clang_Type_getCXXRefQualifier",
  "(",
  "self",
  ")",
  ")"
] | 
	https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/clang/bindings/python/clang/cindex.py#L2396-L2401 | |
| 
	MegEngine/MegEngine | 
	ce9ad07a27ec909fb8db4dd67943d24ba98fb93a | 
	imperative/python/megengine/functional/math.py | 
	python | 
	dot | 
	(inp1: Tensor, inp2: Tensor) | 
	return result | 
	r"""Computes dot-product of two vectors ``inp1`` and ``inp2``.
    inputs must be 1-dimensional or scalar. A scalar input is automatically broadcasted.
    Refer to :func:`~.matmul` for more general usage.
    Args:
        inp1: first vector.
        inp2: second vector.
    Returns:
        output value.
    Examples:
        .. testcode::
            import numpy as np
            from megengine import tensor
            import megengine.functional as F
            data1 = tensor(np.arange(0, 6, dtype=np.float32))
            data2 = tensor(np.arange(0, 6, dtype=np.float32))
            out = F.dot(data1, data2)
            print(out.numpy())
        Outputs:
        .. testoutput::
            55. | 
	r"""Computes dot-product of two vectors ``inp1`` and ``inp2``.
    inputs must be 1-dimensional or scalar. A scalar input is automatically broadcasted.
    Refer to :func:`~.matmul` for more general usage. | 
	[
  "r",
  "Computes",
  "dot",
  "-",
  "product",
  "of",
  "two",
  "vectors",
  "inp1",
  "and",
  "inp2",
  ".",
  "inputs",
  "must",
  "be",
  "1",
  "-",
  "dimensional",
  "or",
  "scalar",
  ".",
  "A",
  "scalar",
  "input",
  "is",
  "automatically",
  "broadcasted",
  ".",
  "Refer",
  "to",
  ":",
  "func",
  ":",
  "~",
  ".",
  "matmul",
  "for",
  "more",
  "general",
  "usage",
  "."
] | 
	def dot(inp1: Tensor, inp2: Tensor) -> Tensor:
    r"""Computes dot-product of two vectors ``inp1`` and ``inp2``.
    inputs must be 1-dimensional or scalar. A scalar input is automatically broadcasted.
    Refer to :func:`~.matmul` for more general usage.
    Args:
        inp1: first vector.
        inp2: second vector.
    Returns:
        output value.
    Examples:
        .. testcode::
            import numpy as np
            from megengine import tensor
            import megengine.functional as F
            data1 = tensor(np.arange(0, 6, dtype=np.float32))
            data2 = tensor(np.arange(0, 6, dtype=np.float32))
            out = F.dot(data1, data2)
            print(out.numpy())
        Outputs:
        .. testoutput::
            55.
    """
    op = builtin.Dot()
    assert (
        inp1.ndim <= 1 and inp2.ndim <= 1
    ), "Input tensors for dot must be 1-dimensional or scalar"
    (result,) = apply(op, inp1, inp2)
    return result | 
	[
  "def",
  "dot",
  "(",
  "inp1",
  ":",
  "Tensor",
  ",",
  "inp2",
  ":",
  "Tensor",
  ")",
  "->",
  "Tensor",
  ":",
  "op",
  "=",
  "builtin",
  ".",
  "Dot",
  "(",
  ")",
  "assert",
  "(",
  "inp1",
  ".",
  "ndim",
  "<=",
  "1",
  "and",
  "inp2",
  ".",
  "ndim",
  "<=",
  "1",
  ")",
  ",",
  "\"Input tensors for dot must be 1-dimensional or scalar\"",
  "(",
  "result",
  ",",
  ")",
  "=",
  "apply",
  "(",
  "op",
  ",",
  "inp1",
  ",",
  "inp2",
  ")",
  "return",
  "result"
] | 
	https://github.com/MegEngine/MegEngine/blob/ce9ad07a27ec909fb8db4dd67943d24ba98fb93a/imperative/python/megengine/functional/math.py#L1116-L1152 | |
| 
	microsoft/DirectXShaderCompiler | 
	8348ff8d9e0287610ba05d3a828e10af981a1c05 | 
	tools/clang/bindings/python/clang/cindex.py | 
	python | 
	SourceLocation.offset | 
	(self) | 
	return self._get_instantiation()[3] | 
	Get the file offset represented by this source location. | 
	Get the file offset represented by this source location. | 
	[
  "Get",
  "the",
  "file",
  "offset",
  "represented",
  "by",
  "this",
  "source",
  "location",
  "."
] | 
	def offset(self):
        """Get the file offset represented by this source location."""
        return self._get_instantiation()[3] | 
	[
  "def",
  "offset",
  "(",
  "self",
  ")",
  ":",
  "return",
  "self",
  ".",
  "_get_instantiation",
  "(",
  ")",
  "[",
  "3",
  "]"
] | 
	https://github.com/microsoft/DirectXShaderCompiler/blob/8348ff8d9e0287610ba05d3a828e10af981a1c05/tools/clang/bindings/python/clang/cindex.py#L213-L215 | |
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/build_py.py | 
	python | 
	build_py.build_package_data | 
	(self) | 
	Copy data files into build directory | 
	Copy data files into build directory | 
	[
  "Copy",
  "data",
  "files",
  "into",
  "build",
  "directory"
] | 
	def build_package_data(self):
        """Copy data files into build directory"""
        lastdir = None
        for package, src_dir, build_dir, filenames in self.data_files:
            for filename in filenames:
                target = os.path.join(build_dir, filename)
                self.mkpath(os.path.dirname(target))
                self.copy_file(os.path.join(src_dir, filename), target,
                               preserve_mode=False) | 
	[
  "def",
  "build_package_data",
  "(",
  "self",
  ")",
  ":",
  "lastdir",
  "=",
  "None",
  "for",
  "package",
  ",",
  "src_dir",
  ",",
  "build_dir",
  ",",
  "filenames",
  "in",
  "self",
  ".",
  "data_files",
  ":",
  "for",
  "filename",
  "in",
  "filenames",
  ":",
  "target",
  "=",
  "os",
  ".",
  "path",
  ".",
  "join",
  "(",
  "build_dir",
  ",",
  "filename",
  ")",
  "self",
  ".",
  "mkpath",
  "(",
  "os",
  ".",
  "path",
  ".",
  "dirname",
  "(",
  "target",
  ")",
  ")",
  "self",
  ".",
  "copy_file",
  "(",
  "os",
  ".",
  "path",
  ".",
  "join",
  "(",
  "src_dir",
  ",",
  "filename",
  ")",
  ",",
  "target",
  ",",
  "preserve_mode",
  "=",
  "False",
  ")"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/build_py.py#L134-L142 | ||
| 
	google/brunsli | 
	e811197ab1ad8ddde3e3cf444548e42e2bdacf92 | 
	contrib/py/jxl_library_patches/jxl_utils.py | 
	python | 
	is_jpegxl_recompressed_jpeg_file | 
	(filename) | 
	Returns True iff the given filename is a genuine JPEG-XL file. | 
	Returns True iff the given filename is a genuine JPEG-XL file. | 
	[
  "Returns",
  "True",
  "iff",
  "the",
  "given",
  "filename",
  "is",
  "a",
  "genuine",
  "JPEG",
  "-",
  "XL",
  "file",
  "."
] | 
	def is_jpegxl_recompressed_jpeg_file(filename):
  """Returns True iff the given filename is a genuine JPEG-XL file."""
  try:
    with open(filename, 'rb') as h:
      header = h.read(len(JPEGXL_RECOMPRESSED_JPEG_HEADER))
      # Cf. https://arxiv.org/pdf/1908.03565.pdf, section 9.1,
      # on recompressed-JPEG header.
      return header == JPEGXL_RECOMPRESSED_JPEG_HEADER
  except:  # pylint:disable=bare-except
    # If anything failed, this means that we cannot establish that the file
    # has the expected header, so we return False.
    return False | 
	[
  "def",
  "is_jpegxl_recompressed_jpeg_file",
  "(",
  "filename",
  ")",
  ":",
  "try",
  ":",
  "with",
  "open",
  "(",
  "filename",
  ",",
  "'rb'",
  ")",
  "as",
  "h",
  ":",
  "header",
  "=",
  "h",
  ".",
  "read",
  "(",
  "len",
  "(",
  "JPEGXL_RECOMPRESSED_JPEG_HEADER",
  ")",
  ")",
  "# Cf. https://arxiv.org/pdf/1908.03565.pdf, section 9.1,",
  "# on recompressed-JPEG header.",
  "return",
  "header",
  "==",
  "JPEGXL_RECOMPRESSED_JPEG_HEADER",
  "except",
  ":",
  "# pylint:disable=bare-except",
  "# If anything failed, this means that we cannot establish that the file",
  "# has the expected header, so we return False.",
  "return",
  "False"
] | 
	https://github.com/google/brunsli/blob/e811197ab1ad8ddde3e3cf444548e42e2bdacf92/contrib/py/jxl_library_patches/jxl_utils.py#L44-L55 | ||
| 
	sdhash/sdhash | 
	b9eff63e4e5867e910f41fd69032bbb1c94a2a5e | 
	sdhash-ui/cherrypy/process/servers.py | 
	python | 
	client_host | 
	(server_host) | 
	return server_host | 
	Return the host on which a client can connect to the given listener. | 
	Return the host on which a client can connect to the given listener. | 
	[
  "Return",
  "the",
  "host",
  "on",
  "which",
  "a",
  "client",
  "can",
  "connect",
  "to",
  "the",
  "given",
  "listener",
  "."
] | 
	def client_host(server_host):
    """Return the host on which a client can connect to the given listener."""
    if server_host == '0.0.0.0':
        # 0.0.0.0 is INADDR_ANY, which should answer on localhost.
        return '127.0.0.1'
    if server_host in ('::', '::0', '::0.0.0.0'):
        # :: is IN6ADDR_ANY, which should answer on localhost.
        # ::0 and ::0.0.0.0 are non-canonical but common ways to write IN6ADDR_ANY.
        return '::1'
    return server_host | 
	[
  "def",
  "client_host",
  "(",
  "server_host",
  ")",
  ":",
  "if",
  "server_host",
  "==",
  "'0.0.0.0'",
  ":",
  "# 0.0.0.0 is INADDR_ANY, which should answer on localhost.",
  "return",
  "'127.0.0.1'",
  "if",
  "server_host",
  "in",
  "(",
  "'::'",
  ",",
  "'::0'",
  ",",
  "'::0.0.0.0'",
  ")",
  ":",
  "# :: is IN6ADDR_ANY, which should answer on localhost.",
  "# ::0 and ::0.0.0.0 are non-canonical but common ways to write IN6ADDR_ANY.",
  "return",
  "'::1'",
  "return",
  "server_host"
] | 
	https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/cherrypy/process/servers.py#L340-L349 | |
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/scipy/py2/scipy/signal/waveforms.py | 
	python | 
	unit_impulse | 
	(shape, idx=None, dtype=float) | 
	return out | 
	Unit impulse signal (discrete delta function) or unit basis vector.
    Parameters
    ----------
    shape : int or tuple of int
        Number of samples in the output (1-D), or a tuple that represents the
        shape of the output (N-D).
    idx : None or int or tuple of int or 'mid', optional
        Index at which the value is 1.  If None, defaults to the 0th element.
        If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
        all dimensions.  If an int, the impulse will be at `idx` in all
        dimensions.
    dtype : data-type, optional
        The desired data-type for the array, e.g., `numpy.int8`.  Default is
        `numpy.float64`.
    Returns
    -------
    y : ndarray
        Output array containing an impulse signal.
    Notes
    -----
    The 1D case is also known as the Kronecker delta.
    .. versionadded:: 0.19.0
    Examples
    --------
    An impulse at the 0th element (:math:`\\delta[n]`):
    >>> from scipy import signal
    >>> signal.unit_impulse(8)
    array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.])
    Impulse offset by 2 samples (:math:`\\delta[n-2]`):
    >>> signal.unit_impulse(7, 2)
    array([ 0.,  0.,  1.,  0.,  0.,  0.,  0.])
    2-dimensional impulse, centered:
    >>> signal.unit_impulse((3, 3), 'mid')
    array([[ 0.,  0.,  0.],
           [ 0.,  1.,  0.],
           [ 0.,  0.,  0.]])
    Impulse at (2, 2), using broadcasting:
    >>> signal.unit_impulse((4, 4), 2)
    array([[ 0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.],
           [ 0.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.]])
    Plot the impulse response of a 4th-order Butterworth lowpass filter:
    >>> imp = signal.unit_impulse(100, 'mid')
    >>> b, a = signal.butter(4, 0.2)
    >>> response = signal.lfilter(b, a, imp)
    >>> import matplotlib.pyplot as plt
    >>> plt.plot(np.arange(-50, 50), imp)
    >>> plt.plot(np.arange(-50, 50), response)
    >>> plt.margins(0.1, 0.1)
    >>> plt.xlabel('Time [samples]')
    >>> plt.ylabel('Amplitude')
    >>> plt.grid(True)
    >>> plt.show() | 
	Unit impulse signal (discrete delta function) or unit basis vector. | 
	[
  "Unit",
  "impulse",
  "signal",
  "(",
  "discrete",
  "delta",
  "function",
  ")",
  "or",
  "unit",
  "basis",
  "vector",
  "."
] | 
	def unit_impulse(shape, idx=None, dtype=float):
    """
    Unit impulse signal (discrete delta function) or unit basis vector.
    Parameters
    ----------
    shape : int or tuple of int
        Number of samples in the output (1-D), or a tuple that represents the
        shape of the output (N-D).
    idx : None or int or tuple of int or 'mid', optional
        Index at which the value is 1.  If None, defaults to the 0th element.
        If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
        all dimensions.  If an int, the impulse will be at `idx` in all
        dimensions.
    dtype : data-type, optional
        The desired data-type for the array, e.g., `numpy.int8`.  Default is
        `numpy.float64`.
    Returns
    -------
    y : ndarray
        Output array containing an impulse signal.
    Notes
    -----
    The 1D case is also known as the Kronecker delta.
    .. versionadded:: 0.19.0
    Examples
    --------
    An impulse at the 0th element (:math:`\\delta[n]`):
    >>> from scipy import signal
    >>> signal.unit_impulse(8)
    array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.])
    Impulse offset by 2 samples (:math:`\\delta[n-2]`):
    >>> signal.unit_impulse(7, 2)
    array([ 0.,  0.,  1.,  0.,  0.,  0.,  0.])
    2-dimensional impulse, centered:
    >>> signal.unit_impulse((3, 3), 'mid')
    array([[ 0.,  0.,  0.],
           [ 0.,  1.,  0.],
           [ 0.,  0.,  0.]])
    Impulse at (2, 2), using broadcasting:
    >>> signal.unit_impulse((4, 4), 2)
    array([[ 0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.],
           [ 0.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.]])
    Plot the impulse response of a 4th-order Butterworth lowpass filter:
    >>> imp = signal.unit_impulse(100, 'mid')
    >>> b, a = signal.butter(4, 0.2)
    >>> response = signal.lfilter(b, a, imp)
    >>> import matplotlib.pyplot as plt
    >>> plt.plot(np.arange(-50, 50), imp)
    >>> plt.plot(np.arange(-50, 50), response)
    >>> plt.margins(0.1, 0.1)
    >>> plt.xlabel('Time [samples]')
    >>> plt.ylabel('Amplitude')
    >>> plt.grid(True)
    >>> plt.show()
    """
    out = zeros(shape, dtype)
    shape = np.atleast_1d(shape)
    if idx is None:
        idx = (0,) * len(shape)
    elif idx == 'mid':
        idx = tuple(shape // 2)
    elif not hasattr(idx, "__iter__"):
        idx = (idx,) * len(shape)
    out[idx] = 1
    return out | 
	[
  "def",
  "unit_impulse",
  "(",
  "shape",
  ",",
  "idx",
  "=",
  "None",
  ",",
  "dtype",
  "=",
  "float",
  ")",
  ":",
  "out",
  "=",
  "zeros",
  "(",
  "shape",
  ",",
  "dtype",
  ")",
  "shape",
  "=",
  "np",
  ".",
  "atleast_1d",
  "(",
  "shape",
  ")",
  "if",
  "idx",
  "is",
  "None",
  ":",
  "idx",
  "=",
  "(",
  "0",
  ",",
  ")",
  "*",
  "len",
  "(",
  "shape",
  ")",
  "elif",
  "idx",
  "==",
  "'mid'",
  ":",
  "idx",
  "=",
  "tuple",
  "(",
  "shape",
  "//",
  "2",
  ")",
  "elif",
  "not",
  "hasattr",
  "(",
  "idx",
  ",",
  "\"__iter__\"",
  ")",
  ":",
  "idx",
  "=",
  "(",
  "idx",
  ",",
  ")",
  "*",
  "len",
  "(",
  "shape",
  ")",
  "out",
  "[",
  "idx",
  "]",
  "=",
  "1",
  "return",
  "out"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/signal/waveforms.py#L596-L681 | |
| 
	psnonis/FinBERT | 
	c0c555d833a14e2316a3701e59c0b5156f804b4e | 
	bert/optimization.py | 
	python | 
	AdamWeightDecayOptimizer._do_use_weight_decay | 
	(self, param_name) | 
	return True | 
	Whether to use L2 weight decay for `param_name`. | 
	Whether to use L2 weight decay for `param_name`. | 
	[
  "Whether",
  "to",
  "use",
  "L2",
  "weight",
  "decay",
  "for",
  "param_name",
  "."
] | 
	def _do_use_weight_decay(self, param_name):
    """Whether to use L2 weight decay for `param_name`."""
    if not self.weight_decay_rate:
      return False
    if self.exclude_from_weight_decay:
      for r in self.exclude_from_weight_decay:
        if re.search(r, param_name) is not None:
          return False
    return True | 
	[
  "def",
  "_do_use_weight_decay",
  "(",
  "self",
  ",",
  "param_name",
  ")",
  ":",
  "if",
  "not",
  "self",
  ".",
  "weight_decay_rate",
  ":",
  "return",
  "False",
  "if",
  "self",
  ".",
  "exclude_from_weight_decay",
  ":",
  "for",
  "r",
  "in",
  "self",
  ".",
  "exclude_from_weight_decay",
  ":",
  "if",
  "re",
  ".",
  "search",
  "(",
  "r",
  ",",
  "param_name",
  ")",
  "is",
  "not",
  "None",
  ":",
  "return",
  "False",
  "return",
  "True"
] | 
	https://github.com/psnonis/FinBERT/blob/c0c555d833a14e2316a3701e59c0b5156f804b4e/bert/optimization.py#L159-L167 | |
| 
	Xilinx/Vitis-AI | 
	fc74d404563d9951b57245443c73bef389f3657f | 
	tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/quantization/quant_strategy.py | 
	python | 
	QuantStrategyBase._get_default_quant_config | 
	(self,
                                quant_info_mgr,
                                lstm=False) | 
	return config | 
	1. unified activation bits
    2 .mixed bits for lstm | 
	1. unified activation bits
    2 .mixed bits for lstm | 
	[
  "1",
  ".",
  "unified",
  "activation",
  "bits",
  "2",
  ".",
  "mixed",
  "bits",
  "for",
  "lstm"
] | 
	def _get_default_quant_config(self,
                                quant_info_mgr,
                                lstm=False):
    """
    1. unified activation bits
    2 .mixed bits for lstm 
    
    """
    # import ipdb
    # ipdb.set_trace()
    config = {'param': {}, 'output': {}, 'input': {}}
    for node in quant_info_mgr.Nndctgraph.nodes:
      # print('---- Handling node %s type: %s' % (node.name, node.op.type))
      if quant_info_mgr.is_node_quantizable(node, lstm):
        # parameters
        for k in quant_info_mgr.quant_node_params(node).keys():
          p = quant_info_mgr.quant_node_params(node)[k]
          # for mix precision quantization
          bw = self._bits_act
          if (node.has_bound_params() and 
            (hasattr(node.op.ParamName, 'WEIGHTS') and k == node.op.ParamName.WEIGHTS or
             hasattr(node.op.ParamName, 'GAMMA') and k == node.op.ParamName.GAMMA)):
            bw = self._bits_weight
          config['param'][p.name] = [bw, None]
          # print('---- Add fix of param %s' % p.name)
        # output blobs
        end = quant_info_mgr.quant_output(node.name).name
        if end not in config['output']:
          config['output'][end] = [self._bits_act, None]
          # print('---- Add fix of output blob %s' % end)
        # input blobs (for mix precision quantization)
        if self._bits_weight != self._bits_act:
          if node.op.type in [NNDCT_OP.DENSE, NNDCT_OP.CONV2D]:
            config['input'][node.name] = [self._bits_weight, None]
            # print('---- Add fix of input blob %s' % end)
      elif (lstm and (node in quant_info_mgr.Nndctgraph.inputs)):
        # print('---- Handling input node %s' % (node.name))
        # this path is only for quantizing a whole graph without quant stub OP
        # for lstm, check the following node type
        if (node.in_quant_part or (any(
            (quant_info_mgr.is_node_quantizable(c, lstm) and
             c.op.type is not NNDCT_OP.QUANT_STUB)
            for c in quant_info_mgr.Nndctgraph.children(node.name)))):
          end = quant_info_mgr.quant_output(node.name).name
          if end not in config['output']:
            config['output'][end] = [self._bits_act, None]
            # print('---- Add fix of quant net input blob %s' % end)
    
    # check the input fix of all quantized ops 
    # import ipdb
    # ipdb.set_trace()
    if not lstm:
      for node in quant_info_mgr.Nndctgraph.nodes:
        if quant_info_mgr.is_node_quantizable(node, lstm):
          #print('---- Check input of node %s type: %s' % (node.name, node.op.type))
          if node.op.type not in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB, NNDCT_OP.CONCAT]:
            for p_n in quant_info_mgr.Nndctgraph.parents(node):
              # if not quant_info_mgr.op_unquantizable(p_n.op.type):
                end = quant_info_mgr.quant_output(p_n.name).name
                end_node = quant_info_mgr.Nndctgraph.node(end)
                out_is_tensor = True
                for tensor in end_node.out_tensors:
                  if tensor.shape == None:
                    out_is_tensor = False
                if end not in config['output'] and out_is_tensor:
                  config['output'][end] = [self._bits_act, None]
                  #print('---- Add fix of output blob %s type: %s' % (end, end_node.op.type))
                  
          elif node.op.type in [NNDCT_OP.INPUT]:
            cn_nodes = quant_info_mgr.Nndctgraph.children(node)
            if len(cn_nodes) == 1 and cn_nodes[0].op.is_custom_op:
              end = quant_info_mgr.quant_output(node.name).name
              if end in config['output']:
                del config['output'][end]
                node.in_quant_part = False
              
    return config | 
	[
  "def",
  "_get_default_quant_config",
  "(",
  "self",
  ",",
  "quant_info_mgr",
  ",",
  "lstm",
  "=",
  "False",
  ")",
  ":",
  "# import ipdb",
  "# ipdb.set_trace()",
  "config",
  "=",
  "{",
  "'param'",
  ":",
  "{",
  "}",
  ",",
  "'output'",
  ":",
  "{",
  "}",
  ",",
  "'input'",
  ":",
  "{",
  "}",
  "}",
  "for",
  "node",
  "in",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "nodes",
  ":",
  "# print('---- Handling node %s type: %s' % (node.name, node.op.type))",
  "if",
  "quant_info_mgr",
  ".",
  "is_node_quantizable",
  "(",
  "node",
  ",",
  "lstm",
  ")",
  ":",
  "# parameters",
  "for",
  "k",
  "in",
  "quant_info_mgr",
  ".",
  "quant_node_params",
  "(",
  "node",
  ")",
  ".",
  "keys",
  "(",
  ")",
  ":",
  "p",
  "=",
  "quant_info_mgr",
  ".",
  "quant_node_params",
  "(",
  "node",
  ")",
  "[",
  "k",
  "]",
  "# for mix precision quantization",
  "bw",
  "=",
  "self",
  ".",
  "_bits_act",
  "if",
  "(",
  "node",
  ".",
  "has_bound_params",
  "(",
  ")",
  "and",
  "(",
  "hasattr",
  "(",
  "node",
  ".",
  "op",
  ".",
  "ParamName",
  ",",
  "'WEIGHTS'",
  ")",
  "and",
  "k",
  "==",
  "node",
  ".",
  "op",
  ".",
  "ParamName",
  ".",
  "WEIGHTS",
  "or",
  "hasattr",
  "(",
  "node",
  ".",
  "op",
  ".",
  "ParamName",
  ",",
  "'GAMMA'",
  ")",
  "and",
  "k",
  "==",
  "node",
  ".",
  "op",
  ".",
  "ParamName",
  ".",
  "GAMMA",
  ")",
  ")",
  ":",
  "bw",
  "=",
  "self",
  ".",
  "_bits_weight",
  "config",
  "[",
  "'param'",
  "]",
  "[",
  "p",
  ".",
  "name",
  "]",
  "=",
  "[",
  "bw",
  ",",
  "None",
  "]",
  "# print('---- Add fix of param %s' % p.name)",
  "# output blobs",
  "end",
  "=",
  "quant_info_mgr",
  ".",
  "quant_output",
  "(",
  "node",
  ".",
  "name",
  ")",
  ".",
  "name",
  "if",
  "end",
  "not",
  "in",
  "config",
  "[",
  "'output'",
  "]",
  ":",
  "config",
  "[",
  "'output'",
  "]",
  "[",
  "end",
  "]",
  "=",
  "[",
  "self",
  ".",
  "_bits_act",
  ",",
  "None",
  "]",
  "# print('---- Add fix of output blob %s' % end)",
  "# input blobs (for mix precision quantization)",
  "if",
  "self",
  ".",
  "_bits_weight",
  "!=",
  "self",
  ".",
  "_bits_act",
  ":",
  "if",
  "node",
  ".",
  "op",
  ".",
  "type",
  "in",
  "[",
  "NNDCT_OP",
  ".",
  "DENSE",
  ",",
  "NNDCT_OP",
  ".",
  "CONV2D",
  "]",
  ":",
  "config",
  "[",
  "'input'",
  "]",
  "[",
  "node",
  ".",
  "name",
  "]",
  "=",
  "[",
  "self",
  ".",
  "_bits_weight",
  ",",
  "None",
  "]",
  "# print('---- Add fix of input blob %s' % end)",
  "elif",
  "(",
  "lstm",
  "and",
  "(",
  "node",
  "in",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "inputs",
  ")",
  ")",
  ":",
  "# print('---- Handling input node %s' % (node.name))",
  "# this path is only for quantizing a whole graph without quant stub OP",
  "# for lstm, check the following node type",
  "if",
  "(",
  "node",
  ".",
  "in_quant_part",
  "or",
  "(",
  "any",
  "(",
  "(",
  "quant_info_mgr",
  ".",
  "is_node_quantizable",
  "(",
  "c",
  ",",
  "lstm",
  ")",
  "and",
  "c",
  ".",
  "op",
  ".",
  "type",
  "is",
  "not",
  "NNDCT_OP",
  ".",
  "QUANT_STUB",
  ")",
  "for",
  "c",
  "in",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "children",
  "(",
  "node",
  ".",
  "name",
  ")",
  ")",
  ")",
  ")",
  ":",
  "end",
  "=",
  "quant_info_mgr",
  ".",
  "quant_output",
  "(",
  "node",
  ".",
  "name",
  ")",
  ".",
  "name",
  "if",
  "end",
  "not",
  "in",
  "config",
  "[",
  "'output'",
  "]",
  ":",
  "config",
  "[",
  "'output'",
  "]",
  "[",
  "end",
  "]",
  "=",
  "[",
  "self",
  ".",
  "_bits_act",
  ",",
  "None",
  "]",
  "# print('---- Add fix of quant net input blob %s' % end)",
  "# check the input fix of all quantized ops ",
  "# import ipdb",
  "# ipdb.set_trace()",
  "if",
  "not",
  "lstm",
  ":",
  "for",
  "node",
  "in",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "nodes",
  ":",
  "if",
  "quant_info_mgr",
  ".",
  "is_node_quantizable",
  "(",
  "node",
  ",",
  "lstm",
  ")",
  ":",
  "#print('---- Check input of node %s type: %s' % (node.name, node.op.type))",
  "if",
  "node",
  ".",
  "op",
  ".",
  "type",
  "not",
  "in",
  "[",
  "NNDCT_OP",
  ".",
  "INPUT",
  ",",
  "NNDCT_OP",
  ".",
  "QUANT_STUB",
  ",",
  "NNDCT_OP",
  ".",
  "CONCAT",
  "]",
  ":",
  "for",
  "p_n",
  "in",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "parents",
  "(",
  "node",
  ")",
  ":",
  "# if not quant_info_mgr.op_unquantizable(p_n.op.type):",
  "end",
  "=",
  "quant_info_mgr",
  ".",
  "quant_output",
  "(",
  "p_n",
  ".",
  "name",
  ")",
  ".",
  "name",
  "end_node",
  "=",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "node",
  "(",
  "end",
  ")",
  "out_is_tensor",
  "=",
  "True",
  "for",
  "tensor",
  "in",
  "end_node",
  ".",
  "out_tensors",
  ":",
  "if",
  "tensor",
  ".",
  "shape",
  "==",
  "None",
  ":",
  "out_is_tensor",
  "=",
  "False",
  "if",
  "end",
  "not",
  "in",
  "config",
  "[",
  "'output'",
  "]",
  "and",
  "out_is_tensor",
  ":",
  "config",
  "[",
  "'output'",
  "]",
  "[",
  "end",
  "]",
  "=",
  "[",
  "self",
  ".",
  "_bits_act",
  ",",
  "None",
  "]",
  "#print('---- Add fix of output blob %s type: %s' % (end, end_node.op.type))",
  "elif",
  "node",
  ".",
  "op",
  ".",
  "type",
  "in",
  "[",
  "NNDCT_OP",
  ".",
  "INPUT",
  "]",
  ":",
  "cn_nodes",
  "=",
  "quant_info_mgr",
  ".",
  "Nndctgraph",
  ".",
  "children",
  "(",
  "node",
  ")",
  "if",
  "len",
  "(",
  "cn_nodes",
  ")",
  "==",
  "1",
  "and",
  "cn_nodes",
  "[",
  "0",
  "]",
  ".",
  "op",
  ".",
  "is_custom_op",
  ":",
  "end",
  "=",
  "quant_info_mgr",
  ".",
  "quant_output",
  "(",
  "node",
  ".",
  "name",
  ")",
  ".",
  "name",
  "if",
  "end",
  "in",
  "config",
  "[",
  "'output'",
  "]",
  ":",
  "del",
  "config",
  "[",
  "'output'",
  "]",
  "[",
  "end",
  "]",
  "node",
  ".",
  "in_quant_part",
  "=",
  "False",
  "return",
  "config"
] | 
	https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/quantization/quant_strategy.py#L56-L132 | |
| 
	mantidproject/mantid | 
	03deeb89254ec4289edb8771e0188c2090a02f32 | 
	scripts/reduction_gui/reduction/scripter.py | 
	python | 
	BaseScriptElement.apply | 
	(self) | 
	return NotImplemented | 
	Method called to apply the reduction script element
            to a Mantid Reducer | 
	Method called to apply the reduction script element
            to a Mantid Reducer | 
	[
  "Method",
  "called",
  "to",
  "apply",
  "the",
  "reduction",
  "script",
  "element",
  "to",
  "a",
  "Mantid",
  "Reducer"
] | 
	def apply(self):
        """
            Method called to apply the reduction script element
            to a Mantid Reducer
        """
        return NotImplemented | 
	[
  "def",
  "apply",
  "(",
  "self",
  ")",
  ":",
  "return",
  "NotImplemented"
] | 
	https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/reduction_gui/reduction/scripter.py#L66-L71 | |
| 
	ycm-core/ycmd | 
	fc0fb7e5e15176cc5a2a30c80956335988c6b59a | 
	ycmd/completers/language_server/language_server_completer.py | 
	python | 
	LanguageServerCompleter.WorkspaceConfigurationResponse | 
	( self, request ) | 
	return None | 
	If the concrete completer wants to respond to workspace/configuration
       requests, it should override this method. | 
	If the concrete completer wants to respond to workspace/configuration
       requests, it should override this method. | 
	[
  "If",
  "the",
  "concrete",
  "completer",
  "wants",
  "to",
  "respond",
  "to",
  "workspace",
  "/",
  "configuration",
  "requests",
  "it",
  "should",
  "override",
  "this",
  "method",
  "."
] | 
	def WorkspaceConfigurationResponse( self, request ):
    """If the concrete completer wants to respond to workspace/configuration
       requests, it should override this method."""
    return None | 
	[
  "def",
  "WorkspaceConfigurationResponse",
  "(",
  "self",
  ",",
  "request",
  ")",
  ":",
  "return",
  "None"
] | 
	https://github.com/ycm-core/ycmd/blob/fc0fb7e5e15176cc5a2a30c80956335988c6b59a/ycmd/completers/language_server/language_server_completer.py#L1597-L1600 | |
| 
	echronos/echronos | 
	c996f1d2c8af6c6536205eb319c1bf1d4d84569c | 
	external_tools/pystache/renderer.py | 
	python | 
	Renderer._bytes_to_str | 
	(self, _bytes) | 
	return str(_bytes, self.string_encoding, self.decode_errors) | 
	Convert a byte string to str, using string_encoding and decode_errors. | 
	Convert a byte string to str, using string_encoding and decode_errors. | 
	[
  "Convert",
  "a",
  "byte",
  "string",
  "to",
  "str",
  "using",
  "string_encoding",
  "and",
  "decode_errors",
  "."
] | 
	def _bytes_to_str(self, _bytes):
        """Convert a byte string to str, using string_encoding and decode_errors.
        """
        assert type(_bytes) == bytes
        return str(_bytes, self.string_encoding, self.decode_errors) | 
	[
  "def",
  "_bytes_to_str",
  "(",
  "self",
  ",",
  "_bytes",
  ")",
  ":",
  "assert",
  "type",
  "(",
  "_bytes",
  ")",
  "==",
  "bytes",
  "return",
  "str",
  "(",
  "_bytes",
  ",",
  "self",
  ".",
  "string_encoding",
  ",",
  "self",
  ".",
  "decode_errors",
  ")"
] | 
	https://github.com/echronos/echronos/blob/c996f1d2c8af6c6536205eb319c1bf1d4d84569c/external_tools/pystache/renderer.py#L182-L187 | |
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/pexpect/pexpect/screen.py | 
	python | 
	screen._decode | 
	(self, s) | 
	This converts from the external coding system (as passed to
        the constructor) to the internal one (unicode). | 
	This converts from the external coding system (as passed to
        the constructor) to the internal one (unicode). | 
	[
  "This",
  "converts",
  "from",
  "the",
  "external",
  "coding",
  "system",
  "(",
  "as",
  "passed",
  "to",
  "the",
  "constructor",
  ")",
  "to",
  "the",
  "internal",
  "one",
  "(",
  "unicode",
  ")",
  "."
] | 
	def _decode(self, s):
        '''This converts from the external coding system (as passed to
        the constructor) to the internal one (unicode). '''
        if self.decoder is not None:
            return self.decoder.decode(s)
        else:
            raise TypeError("This screen was constructed with encoding=None, "
                            "so it does not handle bytes.") | 
	[
  "def",
  "_decode",
  "(",
  "self",
  ",",
  "s",
  ")",
  ":",
  "if",
  "self",
  ".",
  "decoder",
  "is",
  "not",
  "None",
  ":",
  "return",
  "self",
  ".",
  "decoder",
  ".",
  "decode",
  "(",
  "s",
  ")",
  "else",
  ":",
  "raise",
  "TypeError",
  "(",
  "\"This screen was constructed with encoding=None, \"",
  "\"so it does not handle bytes.\"",
  ")"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pexpect/pexpect/screen.py#L104-L111 | ||
| 
	trilinos/Trilinos | 
	6168be6dd51e35e1cd681e9c4b24433e709df140 | 
	packages/muelu/utils/analysis/tableau.py | 
	python | 
	tableau10 | 
	() | 
	return rgb2float(colors) | 
	Tableau 10' colors as RGB | 
	Tableau 10' colors as RGB | 
	[
  "Tableau",
  "10",
  "colors",
  "as",
  "RGB"
] | 
	def tableau10():
    """'Tableau 10' colors as RGB"""
    colors = [
        ( 31, 119, 180), (255, 127,  14), ( 44, 160,  44), (214,  39,  40),
        (148, 103, 189), (140,  86,  75), (227, 119, 194), (127, 127, 127),
        (188, 189,  34), ( 23, 190, 207)
    ]
    return rgb2float(colors) | 
	[
  "def",
  "tableau10",
  "(",
  ")",
  ":",
  "colors",
  "=",
  "[",
  "(",
  "31",
  ",",
  "119",
  ",",
  "180",
  ")",
  ",",
  "(",
  "255",
  ",",
  "127",
  ",",
  "14",
  ")",
  ",",
  "(",
  "44",
  ",",
  "160",
  ",",
  "44",
  ")",
  ",",
  "(",
  "214",
  ",",
  "39",
  ",",
  "40",
  ")",
  ",",
  "(",
  "148",
  ",",
  "103",
  ",",
  "189",
  ")",
  ",",
  "(",
  "140",
  ",",
  "86",
  ",",
  "75",
  ")",
  ",",
  "(",
  "227",
  ",",
  "119",
  ",",
  "194",
  ")",
  ",",
  "(",
  "127",
  ",",
  "127",
  ",",
  "127",
  ")",
  ",",
  "(",
  "188",
  ",",
  "189",
  ",",
  "34",
  ")",
  ",",
  "(",
  "23",
  ",",
  "190",
  ",",
  "207",
  ")",
  "]",
  "return",
  "rgb2float",
  "(",
  "colors",
  ")"
] | 
	https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/muelu/utils/analysis/tableau.py#L10-L17 | |
| 
	takemaru/graphillion | 
	51879f92bb96b53ef8f914ef37a05252ce383617 | 
	graphillion/graphset.py | 
	python | 
	GraphSet.paths | 
	(terminal1, terminal2, is_hamilton=False, graphset=None) | 
	return GraphSet.graphs(vertex_groups=[[terminal1, terminal2]],
                               degree_constraints=dc,
                               no_loop=True, graphset=graphset) | 
	Returns a GraphSet of paths.
        This method can be parallelized with OpenMP by specifying the
        environmental variable `OMP_NUM_THREADS`:
          `$ OMP_NUM_THREADS=4 python your_graphillion_script.py`
        Examples:
          >>> GraphSet.paths(1, 6)
          GraphSet([[(1, 2), (2, 3), (3, 6)], [(1, 2), (2, 5), (5, 6)], [(1, 4), (4, 5 ...
        Args:
          terminal1 and terminal2: Both end vertices of a paths.
          graphset: Optional.  A GraphSet object.  Paths to be stored
            are selected from this object.
        Returns:
          A new GraphSet object.
        See Also:
          graphs() | 
	Returns a GraphSet of paths. | 
	[
  "Returns",
  "a",
  "GraphSet",
  "of",
  "paths",
  "."
] | 
	def paths(terminal1, terminal2, is_hamilton=False, graphset=None):
        """Returns a GraphSet of paths.
        This method can be parallelized with OpenMP by specifying the
        environmental variable `OMP_NUM_THREADS`:
          `$ OMP_NUM_THREADS=4 python your_graphillion_script.py`
        Examples:
          >>> GraphSet.paths(1, 6)
          GraphSet([[(1, 2), (2, 3), (3, 6)], [(1, 2), (2, 5), (5, 6)], [(1, 4), (4, 5 ...
        Args:
          terminal1 and terminal2: Both end vertices of a paths.
          graphset: Optional.  A GraphSet object.  Paths to be stored
            are selected from this object.
        Returns:
          A new GraphSet object.
        See Also:
          graphs()
        """
        dc = {}
        for v in GraphSet._vertices:
            if v in (terminal1, terminal2):
                dc[v] = 1
            else:
                dc[v] = 2 if is_hamilton else range(0, 3, 2)
        return GraphSet.graphs(vertex_groups=[[terminal1, terminal2]],
                               degree_constraints=dc,
                               no_loop=True, graphset=graphset) | 
	[
  "def",
  "paths",
  "(",
  "terminal1",
  ",",
  "terminal2",
  ",",
  "is_hamilton",
  "=",
  "False",
  ",",
  "graphset",
  "=",
  "None",
  ")",
  ":",
  "dc",
  "=",
  "{",
  "}",
  "for",
  "v",
  "in",
  "GraphSet",
  ".",
  "_vertices",
  ":",
  "if",
  "v",
  "in",
  "(",
  "terminal1",
  ",",
  "terminal2",
  ")",
  ":",
  "dc",
  "[",
  "v",
  "]",
  "=",
  "1",
  "else",
  ":",
  "dc",
  "[",
  "v",
  "]",
  "=",
  "2",
  "if",
  "is_hamilton",
  "else",
  "range",
  "(",
  "0",
  ",",
  "3",
  ",",
  "2",
  ")",
  "return",
  "GraphSet",
  ".",
  "graphs",
  "(",
  "vertex_groups",
  "=",
  "[",
  "[",
  "terminal1",
  ",",
  "terminal2",
  "]",
  "]",
  ",",
  "degree_constraints",
  "=",
  "dc",
  ",",
  "no_loop",
  "=",
  "True",
  ",",
  "graphset",
  "=",
  "graphset",
  ")"
] | 
	https://github.com/takemaru/graphillion/blob/51879f92bb96b53ef8f914ef37a05252ce383617/graphillion/graphset.py#L1941-L1973 | |
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/plistlib.py | 
	python | 
	_BinaryPlistParser._get_size | 
	(self, tokenL) | 
	return tokenL | 
	return the size of the next object. | 
	return the size of the next object. | 
	[
  "return",
  "the",
  "size",
  "of",
  "the",
  "next",
  "object",
  "."
] | 
	def _get_size(self, tokenL):
        """ return the size of the next object."""
        if tokenL == 0xF:
            m = self._fp.read(1)[0] & 0x3
            s = 1 << m
            f = '>' + _BINARY_FORMAT[s]
            return struct.unpack(f, self._fp.read(s))[0]
        return tokenL | 
	[
  "def",
  "_get_size",
  "(",
  "self",
  ",",
  "tokenL",
  ")",
  ":",
  "if",
  "tokenL",
  "==",
  "0xF",
  ":",
  "m",
  "=",
  "self",
  ".",
  "_fp",
  ".",
  "read",
  "(",
  "1",
  ")",
  "[",
  "0",
  "]",
  "&",
  "0x3",
  "s",
  "=",
  "1",
  "<<",
  "m",
  "f",
  "=",
  "'>'",
  "+",
  "_BINARY_FORMAT",
  "[",
  "s",
  "]",
  "return",
  "struct",
  ".",
  "unpack",
  "(",
  "f",
  ",",
  "self",
  ".",
  "_fp",
  ".",
  "read",
  "(",
  "s",
  ")",
  ")",
  "[",
  "0",
  "]",
  "return",
  "tokenL"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/plistlib.py#L574-L582 | |
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32_pipe.py | 
	python | 
	Win32PipeInput.send_bytes | 
	(self, data: bytes) | 
	Send bytes to the input. | 
	Send bytes to the input. | 
	[
  "Send",
  "bytes",
  "to",
  "the",
  "input",
  "."
] | 
	def send_bytes(self, data: bytes) -> None:
        "Send bytes to the input."
        self.send_text(data.decode("utf-8", "ignore")) | 
	[
  "def",
  "send_bytes",
  "(",
  "self",
  ",",
  "data",
  ":",
  "bytes",
  ")",
  "->",
  "None",
  ":",
  "self",
  ".",
  "send_text",
  "(",
  "data",
  ".",
  "decode",
  "(",
  "\"utf-8\"",
  ",",
  "\"ignore\"",
  ")",
  ")"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32_pipe.py#L108-L110 | ||
| 
	baidu-research/tensorflow-allreduce | 
	66d5b855e90b0949e9fa5cca5599fd729a70e874 | 
	tensorflow/contrib/tensor_forest/client/eval_metrics.py | 
	python | 
	get_metric | 
	(metric_name) | 
	return _EVAL_METRICS[metric_name] | 
	Given a metric name, return the corresponding metric function. | 
	Given a metric name, return the corresponding metric function. | 
	[
  "Given",
  "a",
  "metric",
  "name",
  "return",
  "the",
  "corresponding",
  "metric",
  "function",
  "."
] | 
	def get_metric(metric_name):
  """Given a metric name, return the corresponding metric function."""
  return _EVAL_METRICS[metric_name] | 
	[
  "def",
  "get_metric",
  "(",
  "metric_name",
  ")",
  ":",
  "return",
  "_EVAL_METRICS",
  "[",
  "metric_name",
  "]"
] | 
	https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/tensor_forest/client/eval_metrics.py#L154-L156 | |
| 
	pytorch/pytorch | 
	7176c92687d3cc847cc046bf002269c6949a21c2 | 
	torch/distributed/pipeline/sync/phony.py | 
	python | 
	get_phony | 
	(device: torch.device, *, requires_grad: bool) | 
	return phony | 
	Gets a phony. Phony is tensor without space. It is useful to make
    arbitrary dependency in a autograd graph because it doesn't require any
    gradient accumulation.
    .. note::
        Phonies for each device are cached. If an autograd function gets a phony
        internally, the phony must be detached to be returned. Otherwise, the
        autograd engine will mutate the cached phony in-place::
            class Phonify(torch.autograd.Function):
                @staticmethod
                def forward(ctx, input):
                    phony = get_phony(input.device, requires_grad=False)
                    return phony.detach()  # detach() is necessary. | 
	Gets a phony. Phony is tensor without space. It is useful to make
    arbitrary dependency in a autograd graph because it doesn't require any
    gradient accumulation. | 
	[
  "Gets",
  "a",
  "phony",
  ".",
  "Phony",
  "is",
  "tensor",
  "without",
  "space",
  ".",
  "It",
  "is",
  "useful",
  "to",
  "make",
  "arbitrary",
  "dependency",
  "in",
  "a",
  "autograd",
  "graph",
  "because",
  "it",
  "doesn",
  "t",
  "require",
  "any",
  "gradient",
  "accumulation",
  "."
] | 
	def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
    """Gets a phony. Phony is tensor without space. It is useful to make
    arbitrary dependency in a autograd graph because it doesn't require any
    gradient accumulation.
    .. note::
        Phonies for each device are cached. If an autograd function gets a phony
        internally, the phony must be detached to be returned. Otherwise, the
        autograd engine will mutate the cached phony in-place::
            class Phonify(torch.autograd.Function):
                @staticmethod
                def forward(ctx, input):
                    phony = get_phony(input.device, requires_grad=False)
                    return phony.detach()  # detach() is necessary.
    """
    key = (device, requires_grad)
    try:
        phony = _phonies[key]
    except KeyError:
        with use_stream(default_stream(device)):
            phony = torch.empty(0, device=device, requires_grad=requires_grad)
        _phonies[key] = phony
    return phony | 
	[
  "def",
  "get_phony",
  "(",
  "device",
  ":",
  "torch",
  ".",
  "device",
  ",",
  "*",
  ",",
  "requires_grad",
  ":",
  "bool",
  ")",
  "->",
  "Tensor",
  ":",
  "key",
  "=",
  "(",
  "device",
  ",",
  "requires_grad",
  ")",
  "try",
  ":",
  "phony",
  "=",
  "_phonies",
  "[",
  "key",
  "]",
  "except",
  "KeyError",
  ":",
  "with",
  "use_stream",
  "(",
  "default_stream",
  "(",
  "device",
  ")",
  ")",
  ":",
  "phony",
  "=",
  "torch",
  ".",
  "empty",
  "(",
  "0",
  ",",
  "device",
  "=",
  "device",
  ",",
  "requires_grad",
  "=",
  "requires_grad",
  ")",
  "_phonies",
  "[",
  "key",
  "]",
  "=",
  "phony",
  "return",
  "phony"
] | 
	https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/pipeline/sync/phony.py#L21-L49 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/gtk/_core.py | 
	python | 
	Control_RemoveMnemonics | 
	(*args, **kwargs) | 
	return _core_.Control_RemoveMnemonics(*args, **kwargs) | 
	Control_RemoveMnemonics(String str) -> String
    removes the mnemonics characters | 
	Control_RemoveMnemonics(String str) -> String | 
	[
  "Control_RemoveMnemonics",
  "(",
  "String",
  "str",
  ")",
  "-",
  ">",
  "String"
] | 
	def Control_RemoveMnemonics(*args, **kwargs):
  """
    Control_RemoveMnemonics(String str) -> String
    removes the mnemonics characters
    """
  return _core_.Control_RemoveMnemonics(*args, **kwargs) | 
	[
  "def",
  "Control_RemoveMnemonics",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_core_",
  ".",
  "Control_RemoveMnemonics",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L12781-L12787 | |
| 
	Xilinx/Vitis-AI | 
	fc74d404563d9951b57245443c73bef389f3657f | 
	tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/input.py | 
	python | 
	slice_input_producer | 
	(tensor_list, num_epochs=None, shuffle=True, seed=None,
                         capacity=32, shared_name=None, name=None) | 
	Produces a slice of each `Tensor` in `tensor_list`.
  Implemented using a Queue -- a `QueueRunner` for the Queue
  is added to the current `Graph`'s `QUEUE_RUNNER` collection.
  Args:
    tensor_list: A list of `Tensor` objects. Every `Tensor` in
      `tensor_list` must have the same size in the first dimension.
    num_epochs: An integer (optional). If specified, `slice_input_producer`
      produces each slice `num_epochs` times before generating
      an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
      through the slices an unlimited number of times.
    shuffle: Boolean. If true, the integers are randomly shuffled within each
      epoch.
    seed: An integer (optional). Seed used if shuffle == True.
    capacity: An integer. Sets the queue capacity.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: A name for the operations (optional).
  Returns:
    A list of tensors, one for each element of `tensor_list`.  If the tensor
    in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
    tensor will have shape `[a, b, ..., z]`.
  Raises:
    ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
  @compatibility(eager)
  Input pipelines based on Queues are not supported when eager execution is
  enabled. Please use the `tf.data` API to ingest data under eager execution.
  @end_compatibility | 
	Produces a slice of each `Tensor` in `tensor_list`. | 
	[
  "Produces",
  "a",
  "slice",
  "of",
  "each",
  "Tensor",
  "in",
  "tensor_list",
  "."
] | 
	def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
                         capacity=32, shared_name=None, name=None):
  """Produces a slice of each `Tensor` in `tensor_list`.
  Implemented using a Queue -- a `QueueRunner` for the Queue
  is added to the current `Graph`'s `QUEUE_RUNNER` collection.
  Args:
    tensor_list: A list of `Tensor` objects. Every `Tensor` in
      `tensor_list` must have the same size in the first dimension.
    num_epochs: An integer (optional). If specified, `slice_input_producer`
      produces each slice `num_epochs` times before generating
      an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
      through the slices an unlimited number of times.
    shuffle: Boolean. If true, the integers are randomly shuffled within each
      epoch.
    seed: An integer (optional). Seed used if shuffle == True.
    capacity: An integer. Sets the queue capacity.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: A name for the operations (optional).
  Returns:
    A list of tensors, one for each element of `tensor_list`.  If the tensor
    in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
    tensor will have shape `[a, b, ..., z]`.
  Raises:
    ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
  @compatibility(eager)
  Input pipelines based on Queues are not supported when eager execution is
  enabled. Please use the `tf.data` API to ingest data under eager execution.
  @end_compatibility
  """
  with ops.name_scope(name, "input_producer", tensor_list):
    tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
    if not tensor_list:
      raise ValueError(
          "Expected at least one tensor in slice_input_producer().")
    range_size = array_ops.shape(tensor_list[0])[0]
    # TODO(josh11b): Add an assertion that the first dimension of
    # everything in TensorList matches. Maybe just check the inferred shapes?
    queue = range_input_producer(range_size, num_epochs=num_epochs,
                                 shuffle=shuffle, seed=seed, capacity=capacity,
                                 shared_name=shared_name)
    index = queue.dequeue()
    output = [array_ops.gather(t, index) for t in tensor_list]
    return output | 
	[
  "def",
  "slice_input_producer",
  "(",
  "tensor_list",
  ",",
  "num_epochs",
  "=",
  "None",
  ",",
  "shuffle",
  "=",
  "True",
  ",",
  "seed",
  "=",
  "None",
  ",",
  "capacity",
  "=",
  "32",
  ",",
  "shared_name",
  "=",
  "None",
  ",",
  "name",
  "=",
  "None",
  ")",
  ":",
  "with",
  "ops",
  ".",
  "name_scope",
  "(",
  "name",
  ",",
  "\"input_producer\"",
  ",",
  "tensor_list",
  ")",
  ":",
  "tensor_list",
  "=",
  "ops",
  ".",
  "convert_n_to_tensor_or_indexed_slices",
  "(",
  "tensor_list",
  ")",
  "if",
  "not",
  "tensor_list",
  ":",
  "raise",
  "ValueError",
  "(",
  "\"Expected at least one tensor in slice_input_producer().\"",
  ")",
  "range_size",
  "=",
  "array_ops",
  ".",
  "shape",
  "(",
  "tensor_list",
  "[",
  "0",
  "]",
  ")",
  "[",
  "0",
  "]",
  "# TODO(josh11b): Add an assertion that the first dimension of",
  "# everything in TensorList matches. Maybe just check the inferred shapes?",
  "queue",
  "=",
  "range_input_producer",
  "(",
  "range_size",
  ",",
  "num_epochs",
  "=",
  "num_epochs",
  ",",
  "shuffle",
  "=",
  "shuffle",
  ",",
  "seed",
  "=",
  "seed",
  ",",
  "capacity",
  "=",
  "capacity",
  ",",
  "shared_name",
  "=",
  "shared_name",
  ")",
  "index",
  "=",
  "queue",
  ".",
  "dequeue",
  "(",
  ")",
  "output",
  "=",
  "[",
  "array_ops",
  ".",
  "gather",
  "(",
  "t",
  ",",
  "index",
  ")",
  "for",
  "t",
  "in",
  "tensor_list",
  "]",
  "return",
  "output"
] | 
	https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/input.py#L328-L376 | ||
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_cocoa/_misc.py | 
	python | 
	DataObjectComposite.GetObject | 
	(*args, **kwargs) | 
	return _misc_.DataObjectComposite_GetObject(*args, **kwargs) | 
	GetObject(self, DataFormat format, wxDataObjectBase::Direction dir=Get) -> DataObjectSimple
        Returns the pointer to the object which supports this format or None.
        TODO: Fix this to use OOR and return the right object type. | 
	GetObject(self, DataFormat format, wxDataObjectBase::Direction dir=Get) -> DataObjectSimple | 
	[
  "GetObject",
  "(",
  "self",
  "DataFormat",
  "format",
  "wxDataObjectBase",
  "::",
  "Direction",
  "dir",
  "=",
  "Get",
  ")",
  "-",
  ">",
  "DataObjectSimple"
] | 
	def GetObject(*args, **kwargs):
        """
        GetObject(self, DataFormat format, wxDataObjectBase::Direction dir=Get) -> DataObjectSimple
        Returns the pointer to the object which supports this format or None.
        TODO: Fix this to use OOR and return the right object type.
        """
        return _misc_.DataObjectComposite_GetObject(*args, **kwargs) | 
	[
  "def",
  "GetObject",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_misc_",
  ".",
  "DataObjectComposite_GetObject",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L5154-L5161 | |
| 
	baidu-research/tensorflow-allreduce | 
	66d5b855e90b0949e9fa5cca5599fd729a70e874 | 
	tensorflow/contrib/data/python/ops/dataset_ops.py | 
	python | 
	ZipDataset.__init__ | 
	(self, datasets) | 
	See `Dataset.zip()` for details. | 
	See `Dataset.zip()` for details. | 
	[
  "See",
  "Dataset",
  ".",
  "zip",
  "()",
  "for",
  "details",
  "."
] | 
	def __init__(self, datasets):
    """See `Dataset.zip()` for details."""
    super(ZipDataset, self).__init__()
    self._datasets = datasets | 
	[
  "def",
  "__init__",
  "(",
  "self",
  ",",
  "datasets",
  ")",
  ":",
  "super",
  "(",
  "ZipDataset",
  ",",
  "self",
  ")",
  ".",
  "__init__",
  "(",
  ")",
  "self",
  ".",
  "_datasets",
  "=",
  "datasets"
] | 
	https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/data/python/ops/dataset_ops.py#L1171-L1174 | ||
| 
	hfinkel/llvm-project-cxxjit | 
	91084ef018240bbb8e24235ff5cd8c355a9c1a1e | 
	clang/bindings/python/clang/cindex.py | 
	python | 
	Type.get_offset | 
	(self, fieldname) | 
	return conf.lib.clang_Type_getOffsetOf(self, fieldname) | 
	Retrieve the offset of a field in the record. | 
	Retrieve the offset of a field in the record. | 
	[
  "Retrieve",
  "the",
  "offset",
  "of",
  "a",
  "field",
  "in",
  "the",
  "record",
  "."
] | 
	def get_offset(self, fieldname):
        """
        Retrieve the offset of a field in the record.
        """
        return conf.lib.clang_Type_getOffsetOf(self, fieldname) | 
	[
  "def",
  "get_offset",
  "(",
  "self",
  ",",
  "fieldname",
  ")",
  ":",
  "return",
  "conf",
  ".",
  "lib",
  ".",
  "clang_Type_getOffsetOf",
  "(",
  "self",
  ",",
  "fieldname",
  ")"
] | 
	https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L2384-L2388 | |
| 
	strasdat/Sophus | 
	36b08885e094fda63e92ad89d65be380c288265a | 
	sympy/sophus/complex.py | 
	python | 
	Complex.Da_a_mul_b | 
	(a, b) | 
	return sympy.Matrix([[b.real, -b.imag],
                             [b.imag, b.real]]) | 
	derivatice of complex muliplication wrt left multiplier a | 
	derivatice of complex muliplication wrt left multiplier a | 
	[
  "derivatice",
  "of",
  "complex",
  "muliplication",
  "wrt",
  "left",
  "multiplier",
  "a"
] | 
	def Da_a_mul_b(a, b):
        """ derivatice of complex muliplication wrt left multiplier a """
        return sympy.Matrix([[b.real, -b.imag],
                             [b.imag, b.real]]) | 
	[
  "def",
  "Da_a_mul_b",
  "(",
  "a",
  ",",
  "b",
  ")",
  ":",
  "return",
  "sympy",
  ".",
  "Matrix",
  "(",
  "[",
  "[",
  "b",
  ".",
  "real",
  ",",
  "-",
  "b",
  ".",
  "imag",
  "]",
  ",",
  "[",
  "b",
  ".",
  "imag",
  ",",
  "b",
  ".",
  "real",
  "]",
  "]",
  ")"
] | 
	https://github.com/strasdat/Sophus/blob/36b08885e094fda63e92ad89d65be380c288265a/sympy/sophus/complex.py#L72-L75 | |
| 
	BlzFans/wke | 
	b0fa21158312e40c5fbd84682d643022b6c34a93 | 
	cygwin/lib/python2.6/platform.py | 
	python | 
	linux_distribution | 
	(distname='', version='', id='',
                       supported_dists=_supported_dists,
                       full_distribution_name=1) | 
	return distname, version, id | 
	Tries to determine the name of the Linux OS distribution name.
        The function first looks for a distribution release file in
        /etc and then reverts to _dist_try_harder() in case no
        suitable files are found.
        supported_dists may be given to define the set of Linux
        distributions to look for. It defaults to a list of currently
        supported Linux distributions identified by their release file
        name.
        If full_distribution_name is true (default), the full
        distribution read from the OS is returned. Otherwise the short
        name taken from supported_dists is used.
        Returns a tuple (distname,version,id) which default to the
        args given as parameters. | 
	Tries to determine the name of the Linux OS distribution name. | 
	[
  "Tries",
  "to",
  "determine",
  "the",
  "name",
  "of",
  "the",
  "Linux",
  "OS",
  "distribution",
  "name",
  "."
] | 
	def linux_distribution(distname='', version='', id='',
                       supported_dists=_supported_dists,
                       full_distribution_name=1):
    """ Tries to determine the name of the Linux OS distribution name.
        The function first looks for a distribution release file in
        /etc and then reverts to _dist_try_harder() in case no
        suitable files are found.
        supported_dists may be given to define the set of Linux
        distributions to look for. It defaults to a list of currently
        supported Linux distributions identified by their release file
        name.
        If full_distribution_name is true (default), the full
        distribution read from the OS is returned. Otherwise the short
        name taken from supported_dists is used.
        Returns a tuple (distname,version,id) which default to the
        args given as parameters.
    """
    try:
        etc = os.listdir('/etc')
    except os.error:
        # Probably not a Unix system
        return distname,version,id
    etc.sort()
    for file in etc:
        m = _release_filename.match(file)
        if m is not None:
            _distname,dummy = m.groups()
            if _distname in supported_dists:
                distname = _distname
                break
    else:
        return _dist_try_harder(distname,version,id)
    # Read the first line
    f = open('/etc/'+file, 'r')
    firstline = f.readline()
    f.close()
    _distname, _version, _id = _parse_release_file(firstline)
    if _distname and full_distribution_name:
        distname = _distname
    if _version:
        version = _version
    if _id:
        id = _id
    return distname, version, id | 
	[
  "def",
  "linux_distribution",
  "(",
  "distname",
  "=",
  "''",
  ",",
  "version",
  "=",
  "''",
  ",",
  "id",
  "=",
  "''",
  ",",
  "supported_dists",
  "=",
  "_supported_dists",
  ",",
  "full_distribution_name",
  "=",
  "1",
  ")",
  ":",
  "try",
  ":",
  "etc",
  "=",
  "os",
  ".",
  "listdir",
  "(",
  "'/etc'",
  ")",
  "except",
  "os",
  ".",
  "error",
  ":",
  "# Probably not a Unix system",
  "return",
  "distname",
  ",",
  "version",
  ",",
  "id",
  "etc",
  ".",
  "sort",
  "(",
  ")",
  "for",
  "file",
  "in",
  "etc",
  ":",
  "m",
  "=",
  "_release_filename",
  ".",
  "match",
  "(",
  "file",
  ")",
  "if",
  "m",
  "is",
  "not",
  "None",
  ":",
  "_distname",
  ",",
  "dummy",
  "=",
  "m",
  ".",
  "groups",
  "(",
  ")",
  "if",
  "_distname",
  "in",
  "supported_dists",
  ":",
  "distname",
  "=",
  "_distname",
  "break",
  "else",
  ":",
  "return",
  "_dist_try_harder",
  "(",
  "distname",
  ",",
  "version",
  ",",
  "id",
  ")",
  "# Read the first line",
  "f",
  "=",
  "open",
  "(",
  "'/etc/'",
  "+",
  "file",
  ",",
  "'r'",
  ")",
  "firstline",
  "=",
  "f",
  ".",
  "readline",
  "(",
  ")",
  "f",
  ".",
  "close",
  "(",
  ")",
  "_distname",
  ",",
  "_version",
  ",",
  "_id",
  "=",
  "_parse_release_file",
  "(",
  "firstline",
  ")",
  "if",
  "_distname",
  "and",
  "full_distribution_name",
  ":",
  "distname",
  "=",
  "_distname",
  "if",
  "_version",
  ":",
  "version",
  "=",
  "_version",
  "if",
  "_id",
  ":",
  "id",
  "=",
  "_id",
  "return",
  "distname",
  ",",
  "version",
  ",",
  "id"
] | 
	https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/platform.py#L293-L345 | |
| 
	apple/swift | 
	469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893 | 
	utils/swift_build_support/swift_build_support/products/swiftsyntax.py | 
	python | 
	SwiftSyntax.product_source_name | 
	(cls) | 
	return "swift-syntax" | 
	product_source_name() -> str
        The name of the source code directory of this product. | 
	product_source_name() -> str | 
	[
  "product_source_name",
  "()",
  "-",
  ">",
  "str"
] | 
	def product_source_name(cls):
        """product_source_name() -> str
        The name of the source code directory of this product.
        """
        return "swift-syntax" | 
	[
  "def",
  "product_source_name",
  "(",
  "cls",
  ")",
  ":",
  "return",
  "\"swift-syntax\""
] | 
	https://github.com/apple/swift/blob/469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893/utils/swift_build_support/swift_build_support/products/swiftsyntax.py#L33-L38 | |
| 
	krishauser/Klampt | 
	972cc83ea5befac3f653c1ba20f80155768ad519 | 
	Python/control-examples/system_id.py | 
	python | 
	LinearSystemID.fixC | 
	(self,i,value) | 
	Sets the i'th entry of the C vector to a fixed value | 
	Sets the i'th entry of the C vector to a fixed value | 
	[
  "Sets",
  "the",
  "i",
  "th",
  "entry",
  "of",
  "the",
  "C",
  "vector",
  "to",
  "a",
  "fixed",
  "value"
] | 
	def fixC(self,i,value):
        """Sets the i'th entry of the C vector to a fixed value"""
        if self.coeffPattern[2] == None:
            m,n=self.m,self.n
            self.coeffPattern[2] = [None]*m
        self.coeffPattern[2][i]=value
        self._updateEstimatorSize(i) | 
	[
  "def",
  "fixC",
  "(",
  "self",
  ",",
  "i",
  ",",
  "value",
  ")",
  ":",
  "if",
  "self",
  ".",
  "coeffPattern",
  "[",
  "2",
  "]",
  "==",
  "None",
  ":",
  "m",
  ",",
  "n",
  "=",
  "self",
  ".",
  "m",
  ",",
  "self",
  ".",
  "n",
  "self",
  ".",
  "coeffPattern",
  "[",
  "2",
  "]",
  "=",
  "[",
  "None",
  "]",
  "*",
  "m",
  "self",
  ".",
  "coeffPattern",
  "[",
  "2",
  "]",
  "[",
  "i",
  "]",
  "=",
  "value",
  "self",
  ".",
  "_updateEstimatorSize",
  "(",
  "i",
  ")"
] | 
	https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/control-examples/system_id.py#L39-L45 | ||
| 
	facebook/proxygen | 
	a9ca025af207787815cb01eee1971cd572c7a81e | 
	build/fbcode_builder/shell_quoting.py | 
	python | 
	ShellQuoted.__new__ | 
	(cls, s) | 
	return super(ShellQuoted, cls).__new__(
            cls, s.do_not_use_raw_str if isinstance(s, ShellQuoted) else s
        ) | 
	No need to nest ShellQuoted. | 
	No need to nest ShellQuoted. | 
	[
  "No",
  "need",
  "to",
  "nest",
  "ShellQuoted",
  "."
] | 
	def __new__(cls, s):
        "No need to nest ShellQuoted."
        return super(ShellQuoted, cls).__new__(
            cls, s.do_not_use_raw_str if isinstance(s, ShellQuoted) else s
        ) | 
	[
  "def",
  "__new__",
  "(",
  "cls",
  ",",
  "s",
  ")",
  ":",
  "return",
  "super",
  "(",
  "ShellQuoted",
  ",",
  "cls",
  ")",
  ".",
  "__new__",
  "(",
  "cls",
  ",",
  "s",
  ".",
  "do_not_use_raw_str",
  "if",
  "isinstance",
  "(",
  "s",
  ",",
  "ShellQuoted",
  ")",
  "else",
  "s",
  ")"
] | 
	https://github.com/facebook/proxygen/blob/a9ca025af207787815cb01eee1971cd572c7a81e/build/fbcode_builder/shell_quoting.py#L34-L38 | |
| 
	Polidea/SiriusObfuscator | 
	b0e590d8130e97856afe578869b83a209e2b19be | 
	SymbolExtractorAndRenamer/lldb/third_party/Python/module/pexpect-2.4/screen.py | 
	python | 
	screen.cr | 
	(self) | 
	This moves the cursor to the beginning (col 1) of the current row. | 
	This moves the cursor to the beginning (col 1) of the current row. | 
	[
  "This",
  "moves",
  "the",
  "cursor",
  "to",
  "the",
  "beginning",
  "(",
  "col",
  "1",
  ")",
  "of",
  "the",
  "current",
  "row",
  "."
] | 
	def cr(self):
        """This moves the cursor to the beginning (col 1) of the current row.
        """
        self.cursor_home(self.cur_r, 1) | 
	[
  "def",
  "cr",
  "(",
  "self",
  ")",
  ":",
  "self",
  ".",
  "cursor_home",
  "(",
  "self",
  ".",
  "cur_r",
  ",",
  "1",
  ")"
] | 
	https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/third_party/Python/module/pexpect-2.4/screen.py#L101-L105 | ||
| 
	zju3dv/clean-pvnet | 
	5870c509e3cc205e1bb28910a7b1a9a3c8add9a8 | 
	lib/utils/meshrenderer/gl_utils/camera.py | 
	python | 
	Camera.setIntrinsic | 
	(self, I, W, H, near, far, scale=1.0, originIsInTopLeft=True) | 
	Args:
            I:                  3x3 intrinsic camera matrix from real camera (without any OpenGL stuff)
            W:                  Width of the camera image
            H:                  Height of the camera image
            near:               Near plane
            far:                Far plane
            originIsInTopLeft:  If True then the image origin is in top left
                                if False the image origin is in image center
        
            Source: http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/ | 
	Args:
            I:                  3x3 intrinsic camera matrix from real camera (without any OpenGL stuff)
            W:                  Width of the camera image
            H:                  Height of the camera image
            near:               Near plane
            far:                Far plane
            originIsInTopLeft:  If True then the image origin is in top left
                                if False the image origin is in image center
        
            Source: http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/ | 
	[
  "Args",
  ":",
  "I",
  ":",
  "3x3",
  "intrinsic",
  "camera",
  "matrix",
  "from",
  "real",
  "camera",
  "(",
  "without",
  "any",
  "OpenGL",
  "stuff",
  ")",
  "W",
  ":",
  "Width",
  "of",
  "the",
  "camera",
  "image",
  "H",
  ":",
  "Height",
  "of",
  "the",
  "camera",
  "image",
  "near",
  ":",
  "Near",
  "plane",
  "far",
  ":",
  "Far",
  "plane",
  "originIsInTopLeft",
  ":",
  "If",
  "True",
  "then",
  "the",
  "image",
  "origin",
  "is",
  "in",
  "top",
  "left",
  "if",
  "False",
  "the",
  "image",
  "origin",
  "is",
  "in",
  "image",
  "center",
  "Source",
  ":",
  "http",
  ":",
  "//",
  "ksimek",
  ".",
  "github",
  ".",
  "io",
  "/",
  "2013",
  "/",
  "06",
  "/",
  "03",
  "/",
  "calibrated_cameras_in_opengl",
  "/"
] | 
	def setIntrinsic(self, I, W, H, near, far, scale=1.0, originIsInTopLeft=True):
        '''
        Args:
            I:                  3x3 intrinsic camera matrix from real camera (without any OpenGL stuff)
            W:                  Width of the camera image
            H:                  Height of the camera image
            near:               Near plane
            far:                Far plane
            originIsInTopLeft:  If True then the image origin is in top left
                                if False the image origin is in image center
        
            Source: http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
        '''
        Camera.__check_matrix__(I)
        A = near + far
        B = near * far
        persp = np.array( [ [ I[0,0]*scale, I[0,1]*scale, -I[0,2]*scale, 0 ],
                            [ 0         , I[1,1]*scale, -I[1,2]*scale, 0 ],
                            [ 0         , 0     , A      , B ],
                            [ 0         , 0     , -1     , 0 ] ] , dtype=np.float64)
        ortho = Camera.__glOrtho__(0, W, H, 0, near, far) if originIsInTopLeft else\
                    Camera.__glOrtho__(-W/2., W/2., -H/2., H/2., near, far)
        self.__T_proj_view[:] = np.dot( ortho, persp ).astype(np.float32)
        self.__T_view_proj[:] = np.linalg.inv(self.__T_proj_view)
        self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
        self.dirty = True | 
	[
  "def",
  "setIntrinsic",
  "(",
  "self",
  ",",
  "I",
  ",",
  "W",
  ",",
  "H",
  ",",
  "near",
  ",",
  "far",
  ",",
  "scale",
  "=",
  "1.0",
  ",",
  "originIsInTopLeft",
  "=",
  "True",
  ")",
  ":",
  "Camera",
  ".",
  "__check_matrix__",
  "(",
  "I",
  ")",
  "A",
  "=",
  "near",
  "+",
  "far",
  "B",
  "=",
  "near",
  "*",
  "far",
  "persp",
  "=",
  "np",
  ".",
  "array",
  "(",
  "[",
  "[",
  "I",
  "[",
  "0",
  ",",
  "0",
  "]",
  "*",
  "scale",
  ",",
  "I",
  "[",
  "0",
  ",",
  "1",
  "]",
  "*",
  "scale",
  ",",
  "-",
  "I",
  "[",
  "0",
  ",",
  "2",
  "]",
  "*",
  "scale",
  ",",
  "0",
  "]",
  ",",
  "[",
  "0",
  ",",
  "I",
  "[",
  "1",
  ",",
  "1",
  "]",
  "*",
  "scale",
  ",",
  "-",
  "I",
  "[",
  "1",
  ",",
  "2",
  "]",
  "*",
  "scale",
  ",",
  "0",
  "]",
  ",",
  "[",
  "0",
  ",",
  "0",
  ",",
  "A",
  ",",
  "B",
  "]",
  ",",
  "[",
  "0",
  ",",
  "0",
  ",",
  "-",
  "1",
  ",",
  "0",
  "]",
  "]",
  ",",
  "dtype",
  "=",
  "np",
  ".",
  "float64",
  ")",
  "ortho",
  "=",
  "Camera",
  ".",
  "__glOrtho__",
  "(",
  "0",
  ",",
  "W",
  ",",
  "H",
  ",",
  "0",
  ",",
  "near",
  ",",
  "far",
  ")",
  "if",
  "originIsInTopLeft",
  "else",
  "Camera",
  ".",
  "__glOrtho__",
  "(",
  "-",
  "W",
  "/",
  "2.",
  ",",
  "W",
  "/",
  "2.",
  ",",
  "-",
  "H",
  "/",
  "2.",
  ",",
  "H",
  "/",
  "2.",
  ",",
  "near",
  ",",
  "far",
  ")",
  "self",
  ".",
  "__T_proj_view",
  "[",
  ":",
  "]",
  "=",
  "np",
  ".",
  "dot",
  "(",
  "ortho",
  ",",
  "persp",
  ")",
  ".",
  "astype",
  "(",
  "np",
  ".",
  "float32",
  ")",
  "self",
  ".",
  "__T_view_proj",
  "[",
  ":",
  "]",
  "=",
  "np",
  ".",
  "linalg",
  ".",
  "inv",
  "(",
  "self",
  ".",
  "__T_proj_view",
  ")",
  "self",
  ".",
  "__T_proj_world",
  "[",
  ":",
  "]",
  "=",
  "np",
  ".",
  "dot",
  "(",
  "self",
  ".",
  "__T_proj_view",
  ",",
  "self",
  ".",
  "__T_view_world",
  ")",
  "self",
  ".",
  "dirty",
  "=",
  "True"
] | 
	https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/meshrenderer/gl_utils/camera.py#L139-L166 | ||
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	wx/py/pseudo.py | 
	python | 
	PseudoFile.__init__ | 
	(self) | 
	Create a file-like object. | 
	Create a file-like object. | 
	[
  "Create",
  "a",
  "file",
  "-",
  "like",
  "object",
  "."
] | 
	def __init__(self):
        """Create a file-like object."""
        pass | 
	[
  "def",
  "__init__",
  "(",
  "self",
  ")",
  ":",
  "pass"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/pseudo.py#L46-L48 | ||
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/ipython/py2/IPython/utils/text.py | 
	python | 
	strip_ansi | 
	(source) | 
	return re.sub(r'\033\[(\d|;)+?m', '', source) | 
	Remove ansi escape codes from text.
    
    Parameters
    ----------
    source : str
        Source to remove the ansi from | 
	Remove ansi escape codes from text.
    
    Parameters
    ----------
    source : str
        Source to remove the ansi from | 
	[
  "Remove",
  "ansi",
  "escape",
  "codes",
  "from",
  "text",
  ".",
  "Parameters",
  "----------",
  "source",
  ":",
  "str",
  "Source",
  "to",
  "remove",
  "the",
  "ansi",
  "from"
] | 
	def strip_ansi(source):
    """
    Remove ansi escape codes from text.
    
    Parameters
    ----------
    source : str
        Source to remove the ansi from
    """
    return re.sub(r'\033\[(\d|;)+?m', '', source) | 
	[
  "def",
  "strip_ansi",
  "(",
  "source",
  ")",
  ":",
  "return",
  "re",
  ".",
  "sub",
  "(",
  "r'\\033\\[(\\d|;)+?m'",
  ",",
  "''",
  ",",
  "source",
  ")"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py2/IPython/utils/text.py#L479-L488 | |
| 
	ApolloAuto/apollo | 
	463fb82f9e979d02dcb25044e60931293ab2dba0 | 
	modules/tools/sensor_calibration/extract_data.py | 
	python | 
	Extractor.generate_compressed_file | 
	(input_path,
                                 input_name,
                                 output_path,
                                 compressed_file='sensor_data') | 
	Compress data extraction directory as a single tar.gz archive | 
	Compress data extraction directory as a single tar.gz archive | 
	[
  "Compress",
  "data",
  "extraction",
  "directory",
  "as",
  "a",
  "single",
  "tar",
  ".",
  "gz",
  "archive"
] | 
	def generate_compressed_file(input_path,
                                 input_name,
                                 output_path,
                                 compressed_file='sensor_data'):
        """
        Compress data extraction directory as a single tar.gz archive
        """
        cwd_path = os.getcwd()
        os.chdir(input_path)
        shutil.make_archive(base_name=os.path.join(output_path,
                                                   compressed_file),
                            format='gztar',
                            root_dir=input_path,
                            base_dir=input_name)
        os.chdir(cwd_path) | 
	[
  "def",
  "generate_compressed_file",
  "(",
  "input_path",
  ",",
  "input_name",
  ",",
  "output_path",
  ",",
  "compressed_file",
  "=",
  "'sensor_data'",
  ")",
  ":",
  "cwd_path",
  "=",
  "os",
  ".",
  "getcwd",
  "(",
  ")",
  "os",
  ".",
  "chdir",
  "(",
  "input_path",
  ")",
  "shutil",
  ".",
  "make_archive",
  "(",
  "base_name",
  "=",
  "os",
  ".",
  "path",
  ".",
  "join",
  "(",
  "output_path",
  ",",
  "compressed_file",
  ")",
  ",",
  "format",
  "=",
  "'gztar'",
  ",",
  "root_dir",
  "=",
  "input_path",
  ",",
  "base_dir",
  "=",
  "input_name",
  ")",
  "os",
  ".",
  "chdir",
  "(",
  "cwd_path",
  ")"
] | 
	https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/modules/tools/sensor_calibration/extract_data.py#L323-L337 | ||
| 
	DmitryKoterov/dklab_realplexor | 
	01281d42fddcf7b9efe763b3ab50191c4429debc | 
	api/python/Dklab/realplexor.py | 
	python | 
	Dklab_Realplexor.send | 
	(self, idsAndCursors, data, showOnlyForIds=None) | 
	Send data to realplexor.
        Throw Dklab_Realplexor_Exception in case of error.
        idsAndCursors -- Target IDs in form of: dictionary(id1 => cursor1, id2 => cursor2, ...)
                                     of dictionary(id1, id2, id3, ...). If sending to a single ID,
                                     you may pass it as a plain string, not dictionary.
        data -- Data to be sent (any format, e.g. nested dictionaries are OK).
        showOnlyForIds  -- Send this message to only those who also listen any of these IDs.
                                     This parameter may be used to limit the visibility to a closed
                                     number of cliens: give each client an unique ID and enumerate
                                     client IDs in $showOnlyForIds to not to send messages to others. | 
	Send data to realplexor.
        Throw Dklab_Realplexor_Exception in case of error. | 
	[
  "Send",
  "data",
  "to",
  "realplexor",
  ".",
  "Throw",
  "Dklab_Realplexor_Exception",
  "in",
  "case",
  "of",
  "error",
  "."
] | 
	def send(self, idsAndCursors, data, showOnlyForIds=None):
        """
        Send data to realplexor.
        Throw Dklab_Realplexor_Exception in case of error.
        idsAndCursors -- Target IDs in form of: dictionary(id1 => cursor1, id2 => cursor2, ...)
                                     of dictionary(id1, id2, id3, ...). If sending to a single ID,
                                     you may pass it as a plain string, not dictionary.
        data -- Data to be sent (any format, e.g. nested dictionaries are OK).
        showOnlyForIds  -- Send this message to only those who also listen any of these IDs.
                                     This parameter may be used to limit the visibility to a closed
                                     number of cliens: give each client an unique ID and enumerate
                                     client IDs in $showOnlyForIds to not to send messages to others.
        """
        data = json.dumps(data)
        pairs = []
        for id in idsAndCursors:
            if type(id) == type(1):
                id = cursor # this is NOT cursor, but ID!
                cursor = None
            if re.search('^\w+$', id) is None:
                raise Dklab_Realplexor_Exception("Identifier must be alphanumeric, \"%s\" given" % id)
            try:
                cursor = idsAndCursors[id]
            except:
                cursor = None
            id = (self._namespace or '') + id
            if cursor is not None:
                try:
                    i = float(cursor)
                except ValueError:
                    raise Dklab_Realplexor_Exception("Cursor must be numeric, \"%s\" given" % cursor)
                pairs.append("%s:%s" % (cursor,id))
            else:
                pairs.append(id)
        if isinstance(showOnlyForIds, (list, tuple)):
            for id in showOnlyForIds:
                pairs.append("*" + (self._namespace or '') + id)
        self._send(",".join(pairs), data) | 
	[
  "def",
  "send",
  "(",
  "self",
  ",",
  "idsAndCursors",
  ",",
  "data",
  ",",
  "showOnlyForIds",
  "=",
  "None",
  ")",
  ":",
  "data",
  "=",
  "json",
  ".",
  "dumps",
  "(",
  "data",
  ")",
  "pairs",
  "=",
  "[",
  "]",
  "for",
  "id",
  "in",
  "idsAndCursors",
  ":",
  "if",
  "type",
  "(",
  "id",
  ")",
  "==",
  "type",
  "(",
  "1",
  ")",
  ":",
  "id",
  "=",
  "cursor",
  "# this is NOT cursor, but ID!",
  "cursor",
  "=",
  "None",
  "if",
  "re",
  ".",
  "search",
  "(",
  "'^\\w+$'",
  ",",
  "id",
  ")",
  "is",
  "None",
  ":",
  "raise",
  "Dklab_Realplexor_Exception",
  "(",
  "\"Identifier must be alphanumeric, \\\"%s\\\" given\"",
  "%",
  "id",
  ")",
  "try",
  ":",
  "cursor",
  "=",
  "idsAndCursors",
  "[",
  "id",
  "]",
  "except",
  ":",
  "cursor",
  "=",
  "None",
  "id",
  "=",
  "(",
  "self",
  ".",
  "_namespace",
  "or",
  "''",
  ")",
  "+",
  "id",
  "if",
  "cursor",
  "is",
  "not",
  "None",
  ":",
  "try",
  ":",
  "i",
  "=",
  "float",
  "(",
  "cursor",
  ")",
  "except",
  "ValueError",
  ":",
  "raise",
  "Dklab_Realplexor_Exception",
  "(",
  "\"Cursor must be numeric, \\\"%s\\\" given\"",
  "%",
  "cursor",
  ")",
  "pairs",
  ".",
  "append",
  "(",
  "\"%s:%s\"",
  "%",
  "(",
  "cursor",
  ",",
  "id",
  ")",
  ")",
  "else",
  ":",
  "pairs",
  ".",
  "append",
  "(",
  "id",
  ")",
  "if",
  "isinstance",
  "(",
  "showOnlyForIds",
  ",",
  "(",
  "list",
  ",",
  "tuple",
  ")",
  ")",
  ":",
  "for",
  "id",
  "in",
  "showOnlyForIds",
  ":",
  "pairs",
  ".",
  "append",
  "(",
  "\"*\"",
  "+",
  "(",
  "self",
  ".",
  "_namespace",
  "or",
  "''",
  ")",
  "+",
  "id",
  ")",
  "self",
  ".",
  "_send",
  "(",
  "\",\"",
  ".",
  "join",
  "(",
  "pairs",
  ")",
  ",",
  "data",
  ")"
] | 
	https://github.com/DmitryKoterov/dklab_realplexor/blob/01281d42fddcf7b9efe763b3ab50191c4429debc/api/python/Dklab/realplexor.py#L36-L74 | ||
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/python/pandas/py2/pandas/core/dtypes/cast.py | 
	python | 
	construct_1d_ndarray_preserving_na | 
	(values, dtype=None, copy=False) | 
	return subarr | 
	Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
    Parameters
    ----------
    values : Sequence
    dtype : numpy.dtype, optional
    copy : bool, default False
        Note that copies may still be made with ``copy=False`` if casting
        is required.
    Returns
    -------
    arr : ndarray[dtype]
    Examples
    --------
    >>> np.array([1.0, 2.0, None], dtype='str')
    array(['1.0', '2.0', 'None'], dtype='<U4')
    >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str') | 
	Construct a new ndarray, coercing `values` to `dtype`, preserving NA. | 
	[
  "Construct",
  "a",
  "new",
  "ndarray",
  "coercing",
  "values",
  "to",
  "dtype",
  "preserving",
  "NA",
  "."
] | 
	def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
    """
    Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
    Parameters
    ----------
    values : Sequence
    dtype : numpy.dtype, optional
    copy : bool, default False
        Note that copies may still be made with ``copy=False`` if casting
        is required.
    Returns
    -------
    arr : ndarray[dtype]
    Examples
    --------
    >>> np.array([1.0, 2.0, None], dtype='str')
    array(['1.0', '2.0', 'None'], dtype='<U4')
    >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
    """
    subarr = np.array(values, dtype=dtype, copy=copy)
    if dtype is not None and dtype.kind in ("U", "S"):
        # GH-21083
        # We can't just return np.array(subarr, dtype='str') since
        # NumPy will convert the non-string objects into strings
        # Including NA values. Se we have to go
        # string -> object -> update NA, which requires an
        # additional pass over the data.
        na_values = isna(values)
        subarr2 = subarr.astype(object)
        subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
        subarr = subarr2
    return subarr | 
	[
  "def",
  "construct_1d_ndarray_preserving_na",
  "(",
  "values",
  ",",
  "dtype",
  "=",
  "None",
  ",",
  "copy",
  "=",
  "False",
  ")",
  ":",
  "subarr",
  "=",
  "np",
  ".",
  "array",
  "(",
  "values",
  ",",
  "dtype",
  "=",
  "dtype",
  ",",
  "copy",
  "=",
  "copy",
  ")",
  "if",
  "dtype",
  "is",
  "not",
  "None",
  "and",
  "dtype",
  ".",
  "kind",
  "in",
  "(",
  "\"U\"",
  ",",
  "\"S\"",
  ")",
  ":",
  "# GH-21083",
  "# We can't just return np.array(subarr, dtype='str') since",
  "# NumPy will convert the non-string objects into strings",
  "# Including NA values. Se we have to go",
  "# string -> object -> update NA, which requires an",
  "# additional pass over the data.",
  "na_values",
  "=",
  "isna",
  "(",
  "values",
  ")",
  "subarr2",
  "=",
  "subarr",
  ".",
  "astype",
  "(",
  "object",
  ")",
  "subarr2",
  "[",
  "na_values",
  "]",
  "=",
  "np",
  ".",
  "asarray",
  "(",
  "values",
  ",",
  "dtype",
  "=",
  "object",
  ")",
  "[",
  "na_values",
  "]",
  "subarr",
  "=",
  "subarr2",
  "return",
  "subarr"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/dtypes/cast.py#L1218-L1257 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	wx/tools/Editra/src/extern/aui/framemanager.py | 
	python | 
	AuiManager.AddPane4 | 
	(self, window, pane_info, target) | 
	return True | 
	See comments on :meth:`AddPane`. | 
	See comments on :meth:`AddPane`. | 
	[
  "See",
  "comments",
  "on",
  ":",
  "meth",
  ":",
  "AddPane",
  "."
] | 
	def AddPane4(self, window, pane_info, target):
        """ See comments on :meth:`AddPane`. """
        if not self.AddPane(window, pane_info):
            return False
        paneInfo = self.GetPane(window)
        if not paneInfo.IsNotebookDockable():
            return self.AddPane1(window, pane_info)
        if not target.IsNotebookDockable() and not target.IsNotebookControl():
            return self.AddPane1(window, pane_info)
        if not target.HasNotebook():
            self.CreateNotebookBase(self._panes, target)
        # Add new item to notebook
        paneInfo.NotebookPage(target.notebook_id)
        # we also want to remove our captions sometimes
        self.RemoveAutoNBCaption(paneInfo)
        self.UpdateNotebook()
        return True | 
	[
  "def",
  "AddPane4",
  "(",
  "self",
  ",",
  "window",
  ",",
  "pane_info",
  ",",
  "target",
  ")",
  ":",
  "if",
  "not",
  "self",
  ".",
  "AddPane",
  "(",
  "window",
  ",",
  "pane_info",
  ")",
  ":",
  "return",
  "False",
  "paneInfo",
  "=",
  "self",
  ".",
  "GetPane",
  "(",
  "window",
  ")",
  "if",
  "not",
  "paneInfo",
  ".",
  "IsNotebookDockable",
  "(",
  ")",
  ":",
  "return",
  "self",
  ".",
  "AddPane1",
  "(",
  "window",
  ",",
  "pane_info",
  ")",
  "if",
  "not",
  "target",
  ".",
  "IsNotebookDockable",
  "(",
  ")",
  "and",
  "not",
  "target",
  ".",
  "IsNotebookControl",
  "(",
  ")",
  ":",
  "return",
  "self",
  ".",
  "AddPane1",
  "(",
  "window",
  ",",
  "pane_info",
  ")",
  "if",
  "not",
  "target",
  ".",
  "HasNotebook",
  "(",
  ")",
  ":",
  "self",
  ".",
  "CreateNotebookBase",
  "(",
  "self",
  ".",
  "_panes",
  ",",
  "target",
  ")",
  "# Add new item to notebook",
  "paneInfo",
  ".",
  "NotebookPage",
  "(",
  "target",
  ".",
  "notebook_id",
  ")",
  "# we also want to remove our captions sometimes",
  "self",
  ".",
  "RemoveAutoNBCaption",
  "(",
  "paneInfo",
  ")",
  "self",
  ".",
  "UpdateNotebook",
  "(",
  ")",
  "return",
  "True"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/aui/framemanager.py#L4847-L4870 | |
| 
	LiquidPlayer/LiquidCore | 
	9405979363f2353ac9a71ad8ab59685dd7f919c9 | 
	deps/node-10.15.3/tools/gyp/pylib/gyp/xcode_emulation.py | 
	python | 
	XcodeSettings.GetInstallName | 
	(self) | 
	return install_name | 
	Return LD_DYLIB_INSTALL_NAME for this target. | 
	Return LD_DYLIB_INSTALL_NAME for this target. | 
	[
  "Return",
  "LD_DYLIB_INSTALL_NAME",
  "for",
  "this",
  "target",
  "."
] | 
	def GetInstallName(self):
    """Return LD_DYLIB_INSTALL_NAME for this target."""
    # Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
    if (self.spec['type'] != 'shared_library' and
        (self.spec['type'] != 'loadable_module' or self._IsBundle())):
      return None
    default_install_name = \
        '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
    install_name = self.GetPerTargetSetting(
        'LD_DYLIB_INSTALL_NAME', default=default_install_name)
    # Hardcode support for the variables used in chromium for now, to
    # unblock people using the make build.
    if '$' in install_name:
      assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
          '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
          'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
          'yet in target \'%s\' (got \'%s\')' %
              (self.spec['target_name'], install_name))
      install_name = install_name.replace(
          '$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
          self._StandardizePath(self.GetInstallNameBase()))
      if self._IsBundle():
        # These are only valid for bundles, hence the |if|.
        install_name = install_name.replace(
            '$(WRAPPER_NAME)', self.GetWrapperName())
        install_name = install_name.replace(
            '$(PRODUCT_NAME)', self.GetProductName())
      else:
        assert '$(WRAPPER_NAME)' not in install_name
        assert '$(PRODUCT_NAME)' not in install_name
      install_name = install_name.replace(
          '$(EXECUTABLE_PATH)', self.GetExecutablePath())
    return install_name | 
	[
  "def",
  "GetInstallName",
  "(",
  "self",
  ")",
  ":",
  "# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.",
  "if",
  "(",
  "self",
  ".",
  "spec",
  "[",
  "'type'",
  "]",
  "!=",
  "'shared_library'",
  "and",
  "(",
  "self",
  ".",
  "spec",
  "[",
  "'type'",
  "]",
  "!=",
  "'loadable_module'",
  "or",
  "self",
  ".",
  "_IsBundle",
  "(",
  ")",
  ")",
  ")",
  ":",
  "return",
  "None",
  "default_install_name",
  "=",
  "'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'",
  "install_name",
  "=",
  "self",
  ".",
  "GetPerTargetSetting",
  "(",
  "'LD_DYLIB_INSTALL_NAME'",
  ",",
  "default",
  "=",
  "default_install_name",
  ")",
  "# Hardcode support for the variables used in chromium for now, to",
  "# unblock people using the make build.",
  "if",
  "'$'",
  "in",
  "install_name",
  ":",
  "assert",
  "install_name",
  "in",
  "(",
  "'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'",
  "'$(WRAPPER_NAME)/$(PRODUCT_NAME)'",
  ",",
  "default_install_name",
  ")",
  ",",
  "(",
  "'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '",
  "'yet in target \\'%s\\' (got \\'%s\\')'",
  "%",
  "(",
  "self",
  ".",
  "spec",
  "[",
  "'target_name'",
  "]",
  ",",
  "install_name",
  ")",
  ")",
  "install_name",
  "=",
  "install_name",
  ".",
  "replace",
  "(",
  "'$(DYLIB_INSTALL_NAME_BASE:standardizepath)'",
  ",",
  "self",
  ".",
  "_StandardizePath",
  "(",
  "self",
  ".",
  "GetInstallNameBase",
  "(",
  ")",
  ")",
  ")",
  "if",
  "self",
  ".",
  "_IsBundle",
  "(",
  ")",
  ":",
  "# These are only valid for bundles, hence the |if|.",
  "install_name",
  "=",
  "install_name",
  ".",
  "replace",
  "(",
  "'$(WRAPPER_NAME)'",
  ",",
  "self",
  ".",
  "GetWrapperName",
  "(",
  ")",
  ")",
  "install_name",
  "=",
  "install_name",
  ".",
  "replace",
  "(",
  "'$(PRODUCT_NAME)'",
  ",",
  "self",
  ".",
  "GetProductName",
  "(",
  ")",
  ")",
  "else",
  ":",
  "assert",
  "'$(WRAPPER_NAME)'",
  "not",
  "in",
  "install_name",
  "assert",
  "'$(PRODUCT_NAME)'",
  "not",
  "in",
  "install_name",
  "install_name",
  "=",
  "install_name",
  ".",
  "replace",
  "(",
  "'$(EXECUTABLE_PATH)'",
  ",",
  "self",
  ".",
  "GetExecutablePath",
  "(",
  ")",
  ")",
  "return",
  "install_name"
] | 
	https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/gyp/pylib/gyp/xcode_emulation.py#L779-L815 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_cocoa/_core.py | 
	python | 
	MemoryFSHandler_AddFile | 
	(filename, dataItem, imgType=-1) | 
	Add 'file' to the memory filesystem.  The dataItem parameter can
    either be a `wx.Bitmap`, `wx.Image` or a string that can contain
    arbitrary data.  If a bitmap or image is used then the imgType
    parameter should specify what kind of image file it should be
    written as, wx.BITMAP_TYPE_PNG, etc. | 
	Add 'file' to the memory filesystem.  The dataItem parameter can
    either be a `wx.Bitmap`, `wx.Image` or a string that can contain
    arbitrary data.  If a bitmap or image is used then the imgType
    parameter should specify what kind of image file it should be
    written as, wx.BITMAP_TYPE_PNG, etc. | 
	[
  "Add",
  "file",
  "to",
  "the",
  "memory",
  "filesystem",
  ".",
  "The",
  "dataItem",
  "parameter",
  "can",
  "either",
  "be",
  "a",
  "wx",
  ".",
  "Bitmap",
  "wx",
  ".",
  "Image",
  "or",
  "a",
  "string",
  "that",
  "can",
  "contain",
  "arbitrary",
  "data",
  ".",
  "If",
  "a",
  "bitmap",
  "or",
  "image",
  "is",
  "used",
  "then",
  "the",
  "imgType",
  "parameter",
  "should",
  "specify",
  "what",
  "kind",
  "of",
  "image",
  "file",
  "it",
  "should",
  "be",
  "written",
  "as",
  "wx",
  ".",
  "BITMAP_TYPE_PNG",
  "etc",
  "."
] | 
	def MemoryFSHandler_AddFile(filename, dataItem, imgType=-1):
    """
    Add 'file' to the memory filesystem.  The dataItem parameter can
    either be a `wx.Bitmap`, `wx.Image` or a string that can contain
    arbitrary data.  If a bitmap or image is used then the imgType
    parameter should specify what kind of image file it should be
    written as, wx.BITMAP_TYPE_PNG, etc.
    """
    if isinstance(dataItem, wx.Image):
        __wxMemoryFSHandler_AddFile_wxImage(filename, dataItem, imgType)
    elif isinstance(dataItem, wx.Bitmap):
        __wxMemoryFSHandler_AddFile_wxBitmap(filename, dataItem, imgType)
    else:
        try:
            __wxMemoryFSHandler_AddFile_Data(filename, dataItem)
        except TypeError:
            raise TypeError, 'wx.Image, wx.Bitmap or buffer object expected' | 
	[
  "def",
  "MemoryFSHandler_AddFile",
  "(",
  "filename",
  ",",
  "dataItem",
  ",",
  "imgType",
  "=",
  "-",
  "1",
  ")",
  ":",
  "if",
  "isinstance",
  "(",
  "dataItem",
  ",",
  "wx",
  ".",
  "Image",
  ")",
  ":",
  "__wxMemoryFSHandler_AddFile_wxImage",
  "(",
  "filename",
  ",",
  "dataItem",
  ",",
  "imgType",
  ")",
  "elif",
  "isinstance",
  "(",
  "dataItem",
  ",",
  "wx",
  ".",
  "Bitmap",
  ")",
  ":",
  "__wxMemoryFSHandler_AddFile_wxBitmap",
  "(",
  "filename",
  ",",
  "dataItem",
  ",",
  "imgType",
  ")",
  "else",
  ":",
  "try",
  ":",
  "__wxMemoryFSHandler_AddFile_Data",
  "(",
  "filename",
  ",",
  "dataItem",
  ")",
  "except",
  "TypeError",
  ":",
  "raise",
  "TypeError",
  ",",
  "'wx.Image, wx.Bitmap or buffer object expected'"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L2538-L2554 | ||
| 
	BlzFans/wke | 
	b0fa21158312e40c5fbd84682d643022b6c34a93 | 
	cygwin/lib/python2.6/imaplib.py | 
	python | 
	IMAP4.noop | 
	(self) | 
	return self._simple_command('NOOP') | 
	Send NOOP command.
        (typ, [data]) = <instance>.noop() | 
	Send NOOP command. | 
	[
  "Send",
  "NOOP",
  "command",
  "."
] | 
	def noop(self):
        """Send NOOP command.
        (typ, [data]) = <instance>.noop()
        """
        if __debug__:
            if self.debug >= 3:
                self._dump_ur(self.untagged_responses)
        return self._simple_command('NOOP') | 
	[
  "def",
  "noop",
  "(",
  "self",
  ")",
  ":",
  "if",
  "__debug__",
  ":",
  "if",
  "self",
  ".",
  "debug",
  ">=",
  "3",
  ":",
  "self",
  ".",
  "_dump_ur",
  "(",
  "self",
  ".",
  "untagged_responses",
  ")",
  "return",
  "self",
  ".",
  "_simple_command",
  "(",
  "'NOOP'",
  ")"
] | 
	https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/imaplib.py#L565-L573 | |
| 
	PX4/PX4-Autopilot | 
	0b9f60a0370be53d683352c63fd92db3d6586e18 | 
	platforms/nuttx/NuttX/tools/kconfiglib.py | 
	python | 
	Choice.referenced | 
	(self) | 
	return {item for node in self.nodes for item in node.referenced} | 
	See the class documentation. | 
	See the class documentation. | 
	[
  "See",
  "the",
  "class",
  "documentation",
  "."
] | 
	def referenced(self):
        """
        See the class documentation.
        """
        return {item for node in self.nodes for item in node.referenced} | 
	[
  "def",
  "referenced",
  "(",
  "self",
  ")",
  ":",
  "return",
  "{",
  "item",
  "for",
  "node",
  "in",
  "self",
  ".",
  "nodes",
  "for",
  "item",
  "in",
  "node",
  ".",
  "referenced",
  "}"
] | 
	https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/platforms/nuttx/NuttX/tools/kconfiglib.py#L4888-L4892 | |
| 
	gimli-org/gimli | 
	17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | 
	pygimli/physics/petro/resistivity.py | 
	python | 
	transInvArchiePhi | 
	(rFluid=20, m=2) | 
	return pg.trans.TransPower(-1/m, rFluid) | 
	Inverse Archie transformation function porosity(resistivity).
    # rFluid/rho = phi^m  ==> phi = (rFluid/rho)^(1/m) = (rho/rFluid)^(-1/m)
    See
    ---
    :py:mod:`pygimli.physics.petro.transFwdArchiePhi` | 
	Inverse Archie transformation function porosity(resistivity). | 
	[
  "Inverse",
  "Archie",
  "transformation",
  "function",
  "porosity",
  "(",
  "resistivity",
  ")",
  "."
] | 
	def transInvArchiePhi(rFluid=20, m=2):  # phi(rho)
    """Inverse Archie transformation function porosity(resistivity).
    # rFluid/rho = phi^m  ==> phi = (rFluid/rho)^(1/m) = (rho/rFluid)^(-1/m)
    See
    ---
    :py:mod:`pygimli.physics.petro.transFwdArchiePhi`
    """
    return pg.trans.TransPower(-1/m, rFluid) | 
	[
  "def",
  "transInvArchiePhi",
  "(",
  "rFluid",
  "=",
  "20",
  ",",
  "m",
  "=",
  "2",
  ")",
  ":",
  "# phi(rho)",
  "return",
  "pg",
  ".",
  "trans",
  ".",
  "TransPower",
  "(",
  "-",
  "1",
  "/",
  "m",
  ",",
  "rFluid",
  ")"
] | 
	https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/petro/resistivity.py#L152-L160 | |
| 
	PaddlePaddle/PaddleOCR | 
	b756bf5f8c90142e0d89d3db0163965c686b6ffe | 
	ppstructure/table/tablepyxl/style.py | 
	python | 
	Element.get_dimension | 
	(self, dimension_key) | 
	return dimension | 
	Extracts the dimension from the style dict of the Element and returns it as a float. | 
	Extracts the dimension from the style dict of the Element and returns it as a float. | 
	[
  "Extracts",
  "the",
  "dimension",
  "from",
  "the",
  "style",
  "dict",
  "of",
  "the",
  "Element",
  "and",
  "returns",
  "it",
  "as",
  "a",
  "float",
  "."
] | 
	def get_dimension(self, dimension_key):
        """
        Extracts the dimension from the style dict of the Element and returns it as a float.
        """
        dimension = self.style_dict.get(dimension_key)
        if dimension:
            if dimension[-2:] in ['px', 'em', 'pt', 'in', 'cm']:
                dimension = dimension[:-2]
            dimension = float(dimension)
        return dimension | 
	[
  "def",
  "get_dimension",
  "(",
  "self",
  ",",
  "dimension_key",
  ")",
  ":",
  "dimension",
  "=",
  "self",
  ".",
  "style_dict",
  ".",
  "get",
  "(",
  "dimension_key",
  ")",
  "if",
  "dimension",
  ":",
  "if",
  "dimension",
  "[",
  "-",
  "2",
  ":",
  "]",
  "in",
  "[",
  "'px'",
  ",",
  "'em'",
  ",",
  "'pt'",
  ",",
  "'in'",
  ",",
  "'cm'",
  "]",
  ":",
  "dimension",
  "=",
  "dimension",
  "[",
  ":",
  "-",
  "2",
  "]",
  "dimension",
  "=",
  "float",
  "(",
  "dimension",
  ")",
  "return",
  "dimension"
] | 
	https://github.com/PaddlePaddle/PaddleOCR/blob/b756bf5f8c90142e0d89d3db0163965c686b6ffe/ppstructure/table/tablepyxl/style.py#L164-L173 | |
| 
	catboost/catboost | 
	167f64f237114a4d10b2b4ee42adb4569137debe | 
	contrib/tools/python/src/Lib/sets.py | 
	python | 
	Set.discard | 
	(self, element) | 
	Remove an element from a set if it is a member.
        If the element is not a member, do nothing. | 
	Remove an element from a set if it is a member. | 
	[
  "Remove",
  "an",
  "element",
  "from",
  "a",
  "set",
  "if",
  "it",
  "is",
  "a",
  "member",
  "."
] | 
	def discard(self, element):
        """Remove an element from a set if it is a member.
        If the element is not a member, do nothing.
        """
        try:
            self.remove(element)
        except KeyError:
            pass | 
	[
  "def",
  "discard",
  "(",
  "self",
  ",",
  "element",
  ")",
  ":",
  "try",
  ":",
  "self",
  ".",
  "remove",
  "(",
  "element",
  ")",
  "except",
  "KeyError",
  ":",
  "pass"
] | 
	https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/sets.py#L525-L533 | ||
| 
	wlanjie/AndroidFFmpeg | 
	7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | 
	tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/__init__.py | 
	python | 
	basicConfig | 
	(**kwargs) | 
	Do basic configuration for the logging system.
    This function does nothing if the root logger already has handlers
    configured. It is a convenience method intended for use by simple scripts
    to do one-shot configuration of the logging package.
    The default behaviour is to create a StreamHandler which writes to
    sys.stderr, set a formatter using the BASIC_FORMAT format string, and
    add the handler to the root logger.
    A number of optional keyword arguments may be specified, which can alter
    the default behaviour.
    filename  Specifies that a FileHandler be created, using the specified
              filename, rather than a StreamHandler.
    filemode  Specifies the mode to open the file, if filename is specified
              (if filemode is unspecified, it defaults to 'a').
    format    Use the specified format string for the handler.
    datefmt   Use the specified date/time format.
    level     Set the root logger level to the specified level.
    stream    Use the specified stream to initialize the StreamHandler. Note
              that this argument is incompatible with 'filename' - if both
              are present, 'stream' is ignored.
    Note that you could specify a stream created using open(filename, mode)
    rather than passing the filename and mode in. However, it should be
    remembered that StreamHandler does not close its stream (since it may be
    using sys.stdout or sys.stderr), whereas FileHandler closes its stream
    when the handler is closed. | 
	Do basic configuration for the logging system. | 
	[
  "Do",
  "basic",
  "configuration",
  "for",
  "the",
  "logging",
  "system",
  "."
] | 
	def basicConfig(**kwargs):
    """
    Do basic configuration for the logging system.
    This function does nothing if the root logger already has handlers
    configured. It is a convenience method intended for use by simple scripts
    to do one-shot configuration of the logging package.
    The default behaviour is to create a StreamHandler which writes to
    sys.stderr, set a formatter using the BASIC_FORMAT format string, and
    add the handler to the root logger.
    A number of optional keyword arguments may be specified, which can alter
    the default behaviour.
    filename  Specifies that a FileHandler be created, using the specified
              filename, rather than a StreamHandler.
    filemode  Specifies the mode to open the file, if filename is specified
              (if filemode is unspecified, it defaults to 'a').
    format    Use the specified format string for the handler.
    datefmt   Use the specified date/time format.
    level     Set the root logger level to the specified level.
    stream    Use the specified stream to initialize the StreamHandler. Note
              that this argument is incompatible with 'filename' - if both
              are present, 'stream' is ignored.
    Note that you could specify a stream created using open(filename, mode)
    rather than passing the filename and mode in. However, it should be
    remembered that StreamHandler does not close its stream (since it may be
    using sys.stdout or sys.stderr), whereas FileHandler closes its stream
    when the handler is closed.
    """
    # Add thread safety in case someone mistakenly calls
    # basicConfig() from multiple threads
    _acquireLock()
    try:
        if len(root.handlers) == 0:
            filename = kwargs.get("filename")
            if filename:
                mode = kwargs.get("filemode", 'a')
                hdlr = FileHandler(filename, mode)
            else:
                stream = kwargs.get("stream")
                hdlr = StreamHandler(stream)
            fs = kwargs.get("format", BASIC_FORMAT)
            dfs = kwargs.get("datefmt", None)
            fmt = Formatter(fs, dfs)
            hdlr.setFormatter(fmt)
            root.addHandler(hdlr)
            level = kwargs.get("level")
            if level is not None:
                root.setLevel(level)
    finally:
        _releaseLock() | 
	[
  "def",
  "basicConfig",
  "(",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "# Add thread safety in case someone mistakenly calls",
  "# basicConfig() from multiple threads",
  "_acquireLock",
  "(",
  ")",
  "try",
  ":",
  "if",
  "len",
  "(",
  "root",
  ".",
  "handlers",
  ")",
  "==",
  "0",
  ":",
  "filename",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"filename\"",
  ")",
  "if",
  "filename",
  ":",
  "mode",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"filemode\"",
  ",",
  "'a'",
  ")",
  "hdlr",
  "=",
  "FileHandler",
  "(",
  "filename",
  ",",
  "mode",
  ")",
  "else",
  ":",
  "stream",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"stream\"",
  ")",
  "hdlr",
  "=",
  "StreamHandler",
  "(",
  "stream",
  ")",
  "fs",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"format\"",
  ",",
  "BASIC_FORMAT",
  ")",
  "dfs",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"datefmt\"",
  ",",
  "None",
  ")",
  "fmt",
  "=",
  "Formatter",
  "(",
  "fs",
  ",",
  "dfs",
  ")",
  "hdlr",
  ".",
  "setFormatter",
  "(",
  "fmt",
  ")",
  "root",
  ".",
  "addHandler",
  "(",
  "hdlr",
  ")",
  "level",
  "=",
  "kwargs",
  ".",
  "get",
  "(",
  "\"level\"",
  ")",
  "if",
  "level",
  "is",
  "not",
  "None",
  ":",
  "root",
  ".",
  "setLevel",
  "(",
  "level",
  ")",
  "finally",
  ":",
  "_releaseLock",
  "(",
  ")"
] | 
	https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/__init__.py#L1489-L1542 | ||
| 
	taichi-dev/taichi | 
	973c04d6ba40f34e9e3bd5a28ae0ee0802f136a6 | 
	python/taichi/lang/util.py | 
	python | 
	has_pytorch | 
	() | 
	return _has_pytorch | 
	Whether has pytorch in the current Python environment.
    Returns:
        bool: True if has pytorch else False. | 
	Whether has pytorch in the current Python environment. | 
	[
  "Whether",
  "has",
  "pytorch",
  "in",
  "the",
  "current",
  "Python",
  "environment",
  "."
] | 
	def has_pytorch():
    """Whether has pytorch in the current Python environment.
    Returns:
        bool: True if has pytorch else False.
    """
    return _has_pytorch | 
	[
  "def",
  "has_pytorch",
  "(",
  ")",
  ":",
  "return",
  "_has_pytorch"
] | 
	https://github.com/taichi-dev/taichi/blob/973c04d6ba40f34e9e3bd5a28ae0ee0802f136a6/python/taichi/lang/util.py#L23-L30 | |
| 
	kamyu104/LeetCode-Solutions | 
	77605708a927ea3b85aee5a479db733938c7c211 | 
	Python/maximize-distance-to-closest-person.py | 
	python | 
	Solution.maxDistToClosest | 
	(self, seats) | 
	return max(result, len(seats)-1-prev) | 
	:type seats: List[int]
        :rtype: int | 
	:type seats: List[int]
        :rtype: int | 
	[
  ":",
  "type",
  "seats",
  ":",
  "List",
  "[",
  "int",
  "]",
  ":",
  "rtype",
  ":",
  "int"
] | 
	def maxDistToClosest(self, seats):
        """
        :type seats: List[int]
        :rtype: int
        """
        prev, result = -1, 1
        for i in xrange(len(seats)):
            if seats[i]:
                if prev < 0:
                    result = i
                else:
                    result = max(result, (i-prev)//2)
                prev = i
        return max(result, len(seats)-1-prev) | 
	[
  "def",
  "maxDistToClosest",
  "(",
  "self",
  ",",
  "seats",
  ")",
  ":",
  "prev",
  ",",
  "result",
  "=",
  "-",
  "1",
  ",",
  "1",
  "for",
  "i",
  "in",
  "xrange",
  "(",
  "len",
  "(",
  "seats",
  ")",
  ")",
  ":",
  "if",
  "seats",
  "[",
  "i",
  "]",
  ":",
  "if",
  "prev",
  "<",
  "0",
  ":",
  "result",
  "=",
  "i",
  "else",
  ":",
  "result",
  "=",
  "max",
  "(",
  "result",
  ",",
  "(",
  "i",
  "-",
  "prev",
  ")",
  "//",
  "2",
  ")",
  "prev",
  "=",
  "i",
  "return",
  "max",
  "(",
  "result",
  ",",
  "len",
  "(",
  "seats",
  ")",
  "-",
  "1",
  "-",
  "prev",
  ")"
] | 
	https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/maximize-distance-to-closest-person.py#L6-L19 | |
| 
	thalium/icebox | 
	99d147d5b9269222225443ce171b4fd46d8985d4 | 
	third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py | 
	python | 
	catalog.resolve | 
	(self, pubID, sysID) | 
	return ret | 
	Do a complete resolution lookup of an External Identifier | 
	Do a complete resolution lookup of an External Identifier | 
	[
  "Do",
  "a",
  "complete",
  "resolution",
  "lookup",
  "of",
  "an",
  "External",
  "Identifier"
] | 
	def resolve(self, pubID, sysID):
        """Do a complete resolution lookup of an External Identifier """
        ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID)
        return ret | 
	[
  "def",
  "resolve",
  "(",
  "self",
  ",",
  "pubID",
  ",",
  "sysID",
  ")",
  ":",
  "ret",
  "=",
  "libxml2mod",
  ".",
  "xmlACatalogResolve",
  "(",
  "self",
  ".",
  "_o",
  ",",
  "pubID",
  ",",
  "sysID",
  ")",
  "return",
  "ret"
] | 
	https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L4902-L4905 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_cocoa/_gdi.py | 
	python | 
	PseudoDC.SetPalette | 
	(*args, **kwargs) | 
	return _gdi_.PseudoDC_SetPalette(*args, **kwargs) | 
	SetPalette(self, Palette palette)
        If this is a window DC or memory DC, assigns the given palette to the
        window or bitmap associated with the DC. If the argument is
        ``wx.NullPalette``, the current palette is selected out of the device
        context, and the original palette restored. | 
	SetPalette(self, Palette palette) | 
	[
  "SetPalette",
  "(",
  "self",
  "Palette",
  "palette",
  ")"
] | 
	def SetPalette(*args, **kwargs):
        """
        SetPalette(self, Palette palette)
        If this is a window DC or memory DC, assigns the given palette to the
        window or bitmap associated with the DC. If the argument is
        ``wx.NullPalette``, the current palette is selected out of the device
        context, and the original palette restored.
        """
        return _gdi_.PseudoDC_SetPalette(*args, **kwargs) | 
	[
  "def",
  "SetPalette",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_gdi_",
  ".",
  "PseudoDC_SetPalette",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L8266-L8275 | |
| 
	apple/turicreate | 
	cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | 
	deps/src/libxml2-2.9.1/python/libxml2.py | 
	python | 
	xmlTextReader.QuoteChar | 
	(self) | 
	return ret | 
	The quotation mark character used to enclose the value of
           an attribute. | 
	The quotation mark character used to enclose the value of
           an attribute. | 
	[
  "The",
  "quotation",
  "mark",
  "character",
  "used",
  "to",
  "enclose",
  "the",
  "value",
  "of",
  "an",
  "attribute",
  "."
] | 
	def QuoteChar(self):
        """The quotation mark character used to enclose the value of
           an attribute. """
        ret = libxml2mod.xmlTextReaderQuoteChar(self._o)
        return ret | 
	[
  "def",
  "QuoteChar",
  "(",
  "self",
  ")",
  ":",
  "ret",
  "=",
  "libxml2mod",
  ".",
  "xmlTextReaderQuoteChar",
  "(",
  "self",
  ".",
  "_o",
  ")",
  "return",
  "ret"
] | 
	https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L6816-L6820 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	wx/tools/Editra/src/ed_msg.py | 
	python | 
	RegisterCallback | 
	(callback, msgtype) | 
	Register a callback method for the given message type
    @param callback: callable
    @param msgtype: message type | 
	Register a callback method for the given message type
    @param callback: callable
    @param msgtype: message type | 
	[
  "Register",
  "a",
  "callback",
  "method",
  "for",
  "the",
  "given",
  "message",
  "type",
  "@param",
  "callback",
  ":",
  "callable",
  "@param",
  "msgtype",
  ":",
  "message",
  "type"
] | 
	def RegisterCallback(callback, msgtype):
    """Register a callback method for the given message type
    @param callback: callable
    @param msgtype: message type
    """
    if isinstance(msgtype, tuple):
        mtype = '.'.join(msgtype)
    else:
        mtype = msgtype
    if mtype not in _CALLBACK_REGISTRY:
        _CALLBACK_REGISTRY[mtype] = list()
    if callback not in _CALLBACK_REGISTRY[mtype]:
        _CALLBACK_REGISTRY[mtype].append(callback) | 
	[
  "def",
  "RegisterCallback",
  "(",
  "callback",
  ",",
  "msgtype",
  ")",
  ":",
  "if",
  "isinstance",
  "(",
  "msgtype",
  ",",
  "tuple",
  ")",
  ":",
  "mtype",
  "=",
  "'.'",
  ".",
  "join",
  "(",
  "msgtype",
  ")",
  "else",
  ":",
  "mtype",
  "=",
  "msgtype",
  "if",
  "mtype",
  "not",
  "in",
  "_CALLBACK_REGISTRY",
  ":",
  "_CALLBACK_REGISTRY",
  "[",
  "mtype",
  "]",
  "=",
  "list",
  "(",
  ")",
  "if",
  "callback",
  "not",
  "in",
  "_CALLBACK_REGISTRY",
  "[",
  "mtype",
  "]",
  ":",
  "_CALLBACK_REGISTRY",
  "[",
  "mtype",
  "]",
  ".",
  "append",
  "(",
  "callback",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_msg.py#L429-L444 | ||
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/botocore/utils.py | 
	python | 
	deep_merge | 
	(base, extra) | 
	Deeply two dictionaries, overriding existing keys in the base.
    :param base: The base dictionary which will be merged into.
    :param extra: The dictionary to merge into the base. Keys from this
        dictionary will take precedence. | 
	Deeply two dictionaries, overriding existing keys in the base. | 
	[
  "Deeply",
  "two",
  "dictionaries",
  "overriding",
  "existing",
  "keys",
  "in",
  "the",
  "base",
  "."
] | 
	def deep_merge(base, extra):
    """Deeply two dictionaries, overriding existing keys in the base.
    :param base: The base dictionary which will be merged into.
    :param extra: The dictionary to merge into the base. Keys from this
        dictionary will take precedence.
    """
    for key in extra:
        # If the key represents a dict on both given dicts, merge the sub-dicts
        if key in base and isinstance(base[key], dict)\
                and isinstance(extra[key], dict):
            deep_merge(base[key], extra[key])
            continue
        # Otherwise, set the key on the base to be the value of the extra.
        base[key] = extra[key] | 
	[
  "def",
  "deep_merge",
  "(",
  "base",
  ",",
  "extra",
  ")",
  ":",
  "for",
  "key",
  "in",
  "extra",
  ":",
  "# If the key represents a dict on both given dicts, merge the sub-dicts",
  "if",
  "key",
  "in",
  "base",
  "and",
  "isinstance",
  "(",
  "base",
  "[",
  "key",
  "]",
  ",",
  "dict",
  ")",
  "and",
  "isinstance",
  "(",
  "extra",
  "[",
  "key",
  "]",
  ",",
  "dict",
  ")",
  ":",
  "deep_merge",
  "(",
  "base",
  "[",
  "key",
  "]",
  ",",
  "extra",
  "[",
  "key",
  "]",
  ")",
  "continue",
  "# Otherwise, set the key on the base to be the value of the extra.",
  "base",
  "[",
  "key",
  "]",
  "=",
  "extra",
  "[",
  "key",
  "]"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/botocore/utils.py#L1113-L1128 | ||
| 
	bh107/bohrium | 
	5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | 
	bridge/npbackend/bohrium/reorganization.py | 
	python | 
	cond_scatter | 
	(ary, indexes, values, mask) | 
	scatter(ary, indexes, values, mask)
    Scatter 'values' into 'ary' selected by 'indexes' where 'mask' is true.
    The values of 'indexes' are absolute indexed into a flatten 'ary'
    The shape of 'indexes', 'value', and 'mask' must be equal.
    Parameters
    ----------
    ary  : array_like
        The target array to write the values to.
    indexes : array_like, interpreted as integers
        Array or list of indexes that will be written to in 'ary'
    values : array_like
        Values to write into 'ary'
    mask : array_like, interpreted as booleans
        A mask that specifies which indexes and values to include and exclude | 
	scatter(ary, indexes, values, mask) | 
	[
  "scatter",
  "(",
  "ary",
  "indexes",
  "values",
  "mask",
  ")"
] | 
	def cond_scatter(ary, indexes, values, mask):
    """
    scatter(ary, indexes, values, mask)
    Scatter 'values' into 'ary' selected by 'indexes' where 'mask' is true.
    The values of 'indexes' are absolute indexed into a flatten 'ary'
    The shape of 'indexes', 'value', and 'mask' must be equal.
    Parameters
    ----------
    ary  : array_like
        The target array to write the values to.
    indexes : array_like, interpreted as integers
        Array or list of indexes that will be written to in 'ary'
    values : array_like
        Values to write into 'ary'
    mask : array_like, interpreted as booleans
        A mask that specifies which indexes and values to include and exclude
    """
    from . import _bh
    indexes = array_manipulation.flatten(array_create.array(indexes, dtype=numpy.uint64), always_copy=False)
    values = array_manipulation.flatten(array_create.array(values, dtype=ary.dtype), always_copy=False)
    mask = array_manipulation.flatten(array_create.array(mask, dtype=numpy.bool), always_copy=False)
    assert (indexes.shape == values.shape and values.shape == mask.shape)
    if ary.size == 0 or indexes.size == 0:
        return
    # In order to ensure a contiguous array, we do the scatter on a flatten copy
    flat = array_manipulation.flatten(ary, always_copy=True)
    _bh.ufunc(_info.op['cond_scatter']['id'], (flat, values, indexes, mask))
    ary[...] = flat.reshape(ary.shape) | 
	[
  "def",
  "cond_scatter",
  "(",
  "ary",
  ",",
  "indexes",
  ",",
  "values",
  ",",
  "mask",
  ")",
  ":",
  "from",
  ".",
  "import",
  "_bh",
  "indexes",
  "=",
  "array_manipulation",
  ".",
  "flatten",
  "(",
  "array_create",
  ".",
  "array",
  "(",
  "indexes",
  ",",
  "dtype",
  "=",
  "numpy",
  ".",
  "uint64",
  ")",
  ",",
  "always_copy",
  "=",
  "False",
  ")",
  "values",
  "=",
  "array_manipulation",
  ".",
  "flatten",
  "(",
  "array_create",
  ".",
  "array",
  "(",
  "values",
  ",",
  "dtype",
  "=",
  "ary",
  ".",
  "dtype",
  ")",
  ",",
  "always_copy",
  "=",
  "False",
  ")",
  "mask",
  "=",
  "array_manipulation",
  ".",
  "flatten",
  "(",
  "array_create",
  ".",
  "array",
  "(",
  "mask",
  ",",
  "dtype",
  "=",
  "numpy",
  ".",
  "bool",
  ")",
  ",",
  "always_copy",
  "=",
  "False",
  ")",
  "assert",
  "(",
  "indexes",
  ".",
  "shape",
  "==",
  "values",
  ".",
  "shape",
  "and",
  "values",
  ".",
  "shape",
  "==",
  "mask",
  ".",
  "shape",
  ")",
  "if",
  "ary",
  ".",
  "size",
  "==",
  "0",
  "or",
  "indexes",
  ".",
  "size",
  "==",
  "0",
  ":",
  "return",
  "# In order to ensure a contiguous array, we do the scatter on a flatten copy",
  "flat",
  "=",
  "array_manipulation",
  ".",
  "flatten",
  "(",
  "ary",
  ",",
  "always_copy",
  "=",
  "True",
  ")",
  "_bh",
  ".",
  "ufunc",
  "(",
  "_info",
  ".",
  "op",
  "[",
  "'cond_scatter'",
  "]",
  "[",
  "'id'",
  "]",
  ",",
  "(",
  "flat",
  ",",
  "values",
  ",",
  "indexes",
  ",",
  "mask",
  ")",
  ")",
  "ary",
  "[",
  "...",
  "]",
  "=",
  "flat",
  ".",
  "reshape",
  "(",
  "ary",
  ".",
  "shape",
  ")"
] | 
	https://github.com/bh107/bohrium/blob/5b83e7117285fefc7779ed0e9acb0f8e74c7e068/bridge/npbackend/bohrium/reorganization.py#L393-L426 | ||
| 
	benoitsteiner/tensorflow-opencl | 
	cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | 
	tensorflow/tools/docs/doc_generator_visitor.py | 
	python | 
	DocGeneratorVisitor.__call__ | 
	(self, parent_name, parent, children) | 
	Visitor interface, see `tensorflow/tools/common:traverse` for details.
    This method is called for each symbol found in a traversal using
    `tensorflow/tools/common:traverse`. It should not be called directly in
    user code.
    Args:
      parent_name: The fully qualified name of a symbol found during traversal.
      parent: The Python object referenced by `parent_name`.
      children: A list of `(name, py_object)` pairs enumerating, in alphabetical
        order, the children (as determined by `tf_inspect.getmembers`) of
          `parent`. `name` is the local name of `py_object` in `parent`.
    Raises:
      RuntimeError: If this visitor is called with a `parent` that is not a
        class or module. | 
	Visitor interface, see `tensorflow/tools/common:traverse` for details. | 
	[
  "Visitor",
  "interface",
  "see",
  "tensorflow",
  "/",
  "tools",
  "/",
  "common",
  ":",
  "traverse",
  "for",
  "details",
  "."
] | 
	def __call__(self, parent_name, parent, children):
    """Visitor interface, see `tensorflow/tools/common:traverse` for details.
    This method is called for each symbol found in a traversal using
    `tensorflow/tools/common:traverse`. It should not be called directly in
    user code.
    Args:
      parent_name: The fully qualified name of a symbol found during traversal.
      parent: The Python object referenced by `parent_name`.
      children: A list of `(name, py_object)` pairs enumerating, in alphabetical
        order, the children (as determined by `tf_inspect.getmembers`) of
          `parent`. `name` is the local name of `py_object` in `parent`.
    Raises:
      RuntimeError: If this visitor is called with a `parent` that is not a
        class or module.
    """
    parent_name = self._add_prefix(parent_name)
    self._index[parent_name] = parent
    self._tree[parent_name] = []
    if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
      raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
                                                                   parent))
    for i, (name, child) in enumerate(list(children)):
      # Don't document __metaclass__
      if name in ['__metaclass__']:
        del children[i]
        continue
      full_name = '.'.join([parent_name, name]) if parent_name else name
      self._index[full_name] = child
      self._tree[parent_name].append(name) | 
	[
  "def",
  "__call__",
  "(",
  "self",
  ",",
  "parent_name",
  ",",
  "parent",
  ",",
  "children",
  ")",
  ":",
  "parent_name",
  "=",
  "self",
  ".",
  "_add_prefix",
  "(",
  "parent_name",
  ")",
  "self",
  ".",
  "_index",
  "[",
  "parent_name",
  "]",
  "=",
  "parent",
  "self",
  ".",
  "_tree",
  "[",
  "parent_name",
  "]",
  "=",
  "[",
  "]",
  "if",
  "not",
  "(",
  "tf_inspect",
  ".",
  "ismodule",
  "(",
  "parent",
  ")",
  "or",
  "tf_inspect",
  ".",
  "isclass",
  "(",
  "parent",
  ")",
  ")",
  ":",
  "raise",
  "RuntimeError",
  "(",
  "'Unexpected type in visitor -- %s: %r'",
  "%",
  "(",
  "parent_name",
  ",",
  "parent",
  ")",
  ")",
  "for",
  "i",
  ",",
  "(",
  "name",
  ",",
  "child",
  ")",
  "in",
  "enumerate",
  "(",
  "list",
  "(",
  "children",
  ")",
  ")",
  ":",
  "# Don't document __metaclass__",
  "if",
  "name",
  "in",
  "[",
  "'__metaclass__'",
  "]",
  ":",
  "del",
  "children",
  "[",
  "i",
  "]",
  "continue",
  "full_name",
  "=",
  "'.'",
  ".",
  "join",
  "(",
  "[",
  "parent_name",
  ",",
  "name",
  "]",
  ")",
  "if",
  "parent_name",
  "else",
  "name",
  "self",
  ".",
  "_index",
  "[",
  "full_name",
  "]",
  "=",
  "child",
  "self",
  ".",
  "_tree",
  "[",
  "parent_name",
  "]",
  ".",
  "append",
  "(",
  "name",
  ")"
] | 
	https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/tools/docs/doc_generator_visitor.py#L125-L159 | ||
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/cgi.py | 
	python | 
	MiniFieldStorage.__init__ | 
	(self, name, value) | 
	Constructor from field name and value. | 
	Constructor from field name and value. | 
	[
  "Constructor",
  "from",
  "field",
  "name",
  "and",
  "value",
  "."
] | 
	def __init__(self, name, value):
        """Constructor from field name and value."""
        self.name = name
        self.value = value | 
	[
  "def",
  "__init__",
  "(",
  "self",
  ",",
  "name",
  ",",
  "value",
  ")",
  ":",
  "self",
  ".",
  "name",
  "=",
  "name",
  "self",
  ".",
  "value",
  "=",
  "value"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/cgi.py#L282-L285 | ||
| 
	aws/lumberyard | 
	f85344403c1c2e77ec8c75deb2c116e97b713217 | 
	dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py | 
	python | 
	Entry.scan_mark | 
	(self, x) | 
	Remember the current X, Y coordinates. | 
	Remember the current X, Y coordinates. | 
	[
  "Remember",
  "the",
  "current",
  "X",
  "Y",
  "coordinates",
  "."
] | 
	def scan_mark(self, x):
        """Remember the current X, Y coordinates."""
        self.tk.call(self._w, 'scan', 'mark', x) | 
	[
  "def",
  "scan_mark",
  "(",
  "self",
  ",",
  "x",
  ")",
  ":",
  "self",
  ".",
  "tk",
  ".",
  "call",
  "(",
  "self",
  ".",
  "_w",
  ",",
  "'scan'",
  ",",
  "'mark'",
  ",",
  "x",
  ")"
] | 
	https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py#L2693-L2695 | ||
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/osx_carbon/_gdi.py | 
	python | 
	PseudoDC.SetFont | 
	(*args, **kwargs) | 
	return _gdi_.PseudoDC_SetFont(*args, **kwargs) | 
	SetFont(self, Font font)
        Sets the current font for the DC. It must be a valid font, in
        particular you should not pass ``wx.NullFont`` to this method. | 
	SetFont(self, Font font) | 
	[
  "SetFont",
  "(",
  "self",
  "Font",
  "font",
  ")"
] | 
	def SetFont(*args, **kwargs):
        """
        SetFont(self, Font font)
        Sets the current font for the DC. It must be a valid font, in
        particular you should not pass ``wx.NullFont`` to this method.
        """
        return _gdi_.PseudoDC_SetFont(*args, **kwargs) | 
	[
  "def",
  "SetFont",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")",
  ":",
  "return",
  "_gdi_",
  ".",
  "PseudoDC_SetFont",
  "(",
  "*",
  "args",
  ",",
  "*",
  "*",
  "kwargs",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L8216-L8223 | |
| 
	wxWidgets/wxPython-Classic | 
	19571e1ae65f1ac445f5491474121998c97a1bf0 | 
	src/msw/_gdi.py | 
	python | 
	GraphicsContext.CreateLinearGradientBrush | 
	(*args) | 
	return _gdi_.GraphicsContext_CreateLinearGradientBrush(*args) | 
	CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, Colour c1, 
            Colour c2) -> GraphicsBrush
        CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, GraphicsGradientStops stops) -> GraphicsBrush
        Creates a native brush, having a linear gradient, starting at (x1,y1)
        to (x2,y2) with the given boundary colors or the specified stops. | 
	CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, Colour c1, 
            Colour c2) -> GraphicsBrush
        CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, GraphicsGradientStops stops) -> GraphicsBrush | 
	[
  "CreateLinearGradientBrush",
  "(",
  "self",
  "Double",
  "x1",
  "Double",
  "y1",
  "Double",
  "x2",
  "Double",
  "y2",
  "Colour",
  "c1",
  "Colour",
  "c2",
  ")",
  "-",
  ">",
  "GraphicsBrush",
  "CreateLinearGradientBrush",
  "(",
  "self",
  "Double",
  "x1",
  "Double",
  "y1",
  "Double",
  "x2",
  "Double",
  "y2",
  "GraphicsGradientStops",
  "stops",
  ")",
  "-",
  ">",
  "GraphicsBrush"
] | 
	def CreateLinearGradientBrush(*args):
        """
        CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, Colour c1, 
            Colour c2) -> GraphicsBrush
        CreateLinearGradientBrush(self, Double x1, Double y1, Double x2, Double y2, GraphicsGradientStops stops) -> GraphicsBrush
        Creates a native brush, having a linear gradient, starting at (x1,y1)
        to (x2,y2) with the given boundary colors or the specified stops.
        """
        return _gdi_.GraphicsContext_CreateLinearGradientBrush(*args) | 
	[
  "def",
  "CreateLinearGradientBrush",
  "(",
  "*",
  "args",
  ")",
  ":",
  "return",
  "_gdi_",
  ".",
  "GraphicsContext_CreateLinearGradientBrush",
  "(",
  "*",
  "args",
  ")"
] | 
	https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L6255-L6264 | |
| 
	miyosuda/TensorFlowAndroidDemo | 
	35903e0221aa5f109ea2dbef27f20b52e317f42d | 
	jni-build/jni/include/tensorflow/contrib/losses/python/losses/loss_ops.py | 
	python | 
	get_losses | 
	(scope=None) | 
	return ops.get_collection(ops.GraphKeys.LOSSES, scope) | 
	Gets the list of loss variables.
  Args:
    scope: an optional scope for filtering the losses to return.
  Returns:
    a list of loss variables. | 
	Gets the list of loss variables. | 
	[
  "Gets",
  "the",
  "list",
  "of",
  "loss",
  "variables",
  "."
] | 
	def get_losses(scope=None):
  """Gets the list of loss variables.
  Args:
    scope: an optional scope for filtering the losses to return.
  Returns:
    a list of loss variables.
  """
  return ops.get_collection(ops.GraphKeys.LOSSES, scope) | 
	[
  "def",
  "get_losses",
  "(",
  "scope",
  "=",
  "None",
  ")",
  ":",
  "return",
  "ops",
  ".",
  "get_collection",
  "(",
  "ops",
  ".",
  "GraphKeys",
  ".",
  "LOSSES",
  ",",
  "scope",
  ")"
] | 
	https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/losses/python/losses/loss_ops.py#L204-L213 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
