repo_name
string | dataset
string | owner
string | lang
string | func_name
string | code
string | docstring
string | url
string | sha
string |
|---|---|---|---|---|---|---|---|---|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
WorkspaceService.existsByName
|
private async existsByName(
name: string,
userId: User['id']
): Promise<boolean> {
return (
(await this.prisma.workspace.count({
where: {
name,
ownerId: userId
}
})) > 0
)
}
|
/**
* Checks if a workspace with the given name exists for the given user.
* @param name The name of the workspace to check for
* @param userId The ID of the user to check for
* @returns True if the workspace exists, false otherwise
* @private
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/apps/api/src/workspace/service/workspace.service.ts#L656-L668
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
WorkspaceService.getProjectsOfWorkspace
|
private async getProjectsOfWorkspace(
workspaceId: Workspace['id'],
userId: User['id']
) {
const projects = await this.prisma.project.findMany({
where: {
workspaceId
}
})
let accessibleProjectCount = 0
for (const project of projects) {
const hasAuthority =
await this.authorityCheckerService.checkAuthorityOverProject({
userId,
entity: { slug: project.slug },
authorities: [Authority.READ_PROJECT],
prisma: this.prisma
})
if (hasAuthority) {
accessibleProjectCount++
}
}
return accessibleProjectCount
}
|
/**
* Retrieves the count of projects within a workspace that a user has permission to access.
*
* @param workspaceId The ID of the workspace to retrieve projects from.
* @param userId The ID of the user whose access permissions are being checked.
* @returns The number of projects the user has authority to access within the specified workspace.
* @private
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/apps/api/src/workspace/service/workspace.service.ts#L679-L705
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
ListProfile.printProfile
|
private printProfile(
profiles: ProfileConfig,
defaultProfile: string,
verbose: boolean
) {
const table = new Table({
chars: {
top: 'β',
'top-mid': 'β€',
'top-left': 'β',
'top-right': 'β',
bottom: 'β',
'bottom-mid': 'β§',
'bottom-left': 'β',
'bottom-right': 'β',
left: 'β',
'left-mid': 'β',
mid: 'β',
'mid-mid': 'βΌ',
right: 'β',
'right-mid': 'β’',
middle: 'β'
}
})
if (verbose) {
const profileList = []
Object.keys(profiles).forEach((profile) => {
profileList.push([
`${defaultProfile === profile ? `${profile} (default)` : profile}`,
`${profiles[profile].apiKey}`,
`${profiles[profile].baseUrl}`,
`${profiles[profile].metrics_enabled ? 'Yes' : 'No'}`
])
})
table.push(
['Profile', 'API Key', 'Base URL', 'Metrics Enabled'],
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
...profileList
)
} else {
const profileList = []
Object.keys(profiles).forEach((profile) => {
profileList.push([
`${defaultProfile === profile ? `${profile} (default)` : profile}`
])
})
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
table.push(['Profile'], ...profileList)
}
console.log(table.toString())
}
|
/**
* Prints the profile information in a formatted table.
*
* @param profiles - The profile configuration object.
* @param defaultProfile - The name of the default profile.
* @param verbose - A boolean indicating whether to display additional information.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/apps/cli/src/commands/profile/list.profile.ts#L50-L102
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
formatDate
|
const formatDate: (date: Date | string) => string = (date) => {
return dayjs(date).format('D MMMM, YYYY h:mm A')
}
|
/**
* Format a date as a string.
*
* @param {Date|string} date The date to format.
* @returns {string} The formatted date string.
*
* @example
* formatDate(new Date()) // '5 June, 2022 6:45 PM'
* formatDate('2022-06-05T18:45:00.000Z') // '5 June, 2022 6:45 PM'
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/apps/cli/src/util/date-formatter.ts#L13-L15
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
APIClient.get
|
get(url: string, headers?: Record<string, string>): Promise<Response> {
return this.request(url, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
...headers
},
credentials: 'include'
})
}
|
/**
* Sends a GET request to the specified URL and returns a Promise that resolves to the response data.
* @param url - The URL to send the GET request to.
* @returns A Promise that resolves to the response data.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/api-client/src/core/client.ts#L13-L22
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
APIClient.post
|
post(
url: string,
data: any,
headers?: Record<string, string>
): Promise<Response> {
return this.request(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...headers
},
body: JSON.stringify(data),
credentials: 'include'
})
}
|
/**
* Sends a POST request to the specified URL with the provided data.
*
* @param url - The URL to send the request to.
* @param data - The data to send in the request body.
* @returns A Promise that resolves to the response data.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/api-client/src/core/client.ts#L31-L45
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
APIClient.put
|
put(
url: string,
data: any,
headers?: Record<string, string>
): Promise<Response> {
return this.request(url, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
...headers
},
body: JSON.stringify(data),
credentials: 'include'
})
}
|
/**
* Sends a PUT request to the specified URL with the provided data.
*
* @param url - The URL to send the request to.
* @param data - The data to be sent in the request body.
* @returns A Promise that resolves to the response data.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/api-client/src/core/client.ts#L54-L68
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
APIClient.delete
|
delete(url: string, headers?: Record<string, string>): Promise<Response> {
return this.request(url, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
...headers
},
credentials: 'include'
})
}
|
/**
* Sends a DELETE request to the specified URL and returns a Promise that resolves to the response data.
*
* @param url - The URL to send the DELETE request to.
* @param headers - Optional headers to include in the request.
* @returns A Promise that resolves to the response data.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/api-client/src/core/client.ts#L77-L86
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
SecretDetector.detect
|
detect(input: string): SecretResult {
for (const regex of this.patterns) {
if (regex.test(input)) {
return { found: true, regex }
}
}
return { found: false }
}
|
/**
* Detects if a given input string contains any secret patterns.
* @param input - The input string to scan for secret patterns.
* @returns A `SecretResult` object indicating whether any secret patterns were found.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/secret-scan/src/index.ts#L17-L24
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
keyshade
|
github_2023
|
keyshade-xyz
|
typescript
|
SecretDetector.scanJsObject
|
scanJsObject(input: Record<string, string>): JsObjectScanResult {
const result: JsObjectScanResult = {
secrets: {},
variables: {}
}
for (const [key, value] of Object.entries(input)) {
const secretResult = this.detect(key + '=' + value)
if (secretResult.found) {
result.secrets[key] = value
} else {
result.variables[key] = value
}
}
return result
}
|
/**
* Detects if a given js object contains any secret patterns.
* @param input - The object to scan for secret patterns.
* @returns A `JsObjectScanResult` object containing the secrets and variables found in the object.
*/
|
https://github.com/keyshade-xyz/keyshade/blob/557b3b63dd7c589d484d4eab0b46e90a7c696af3/packages/secret-scan/src/index.ts#L31-L46
|
557b3b63dd7c589d484d4eab0b46e90a7c696af3
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getSpanIdFromAnnotations
|
const getSpanIdFromAnnotations = (annotations?: JSONValue[]) => {
if (!annotations) return;
for (const annotation of annotations) {
if (
annotation != null &&
typeof annotation === "object" &&
"spanId" in annotation &&
typeof annotation.spanId === "string"
) {
return annotation.spanId;
}
}
};
|
/**
* Messages streamed back from the server may contain annotations.
* These provide additional context about the message, such as the span ID.
* These annotations are a next.js construct and do not relate to span annotations.
* Here we extract the span ID associated with the message from the message annotations.
* This allows us to associate feedback with the correct message.
* @param annotations
* @returns
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/examples/openai/frontend/app/components/transform.ts#L18-L30
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatStartActiveSpanParams
|
function formatStartActiveSpanParams<F extends OpenInferenceActiveSpanCallback>(
arg2?: SpanOptions | F,
arg3?: Context | F,
arg4?: F,
) {
let opts: SpanOptions | undefined;
let ctx: Context | undefined;
let fn: F;
if (typeof arg2 === "function") {
fn = arg2;
} else if (typeof arg3 === "function") {
opts = arg2;
fn = arg3;
} else {
opts = arg2;
ctx = arg3;
fn = arg4 as F;
}
opts = opts ?? {};
ctx = ctx ?? apiContext.active();
return { opts, ctx, fn };
}
|
/**
* Formats the params for the startActiveSpan method
* The method has multiple overloads, so we need to format the arguments
* Taken from @see https://github.com/open-telemetry/opentelemetry-js/blob/main/packages/opentelemetry-sdk-trace-base/src/Tracer.ts#L220C3-L235C6
*
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-core/src/trace/trace-config/OITracer.ts#L23-L47
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
OITracer.constructor
|
constructor({
tracer,
traceConfig,
}: {
tracer: Tracer;
traceConfig?: TraceConfigOptions;
}) {
this.tracer = tracer;
this.config = generateTraceConfig(traceConfig);
}
|
/**
*
* @param tracer The OpenTelemetry {@link Tracer} to wrap
* @param traceConfig The {@link TraceConfigOptions} to set to control the behavior of the tracer
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-core/src/trace/trace-config/OITracer.ts#L60-L69
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
parseOption
|
function parseOption({
optionValue,
optionMetadata,
}: {
optionValue?: number | boolean;
optionMetadata: TraceConfigOptionMetadata;
}) {
if (optionValue !== undefined) {
return optionValue;
}
const envValue = process.env[optionMetadata.envKey];
if (envValue !== undefined) {
switch (optionMetadata.type) {
case "number": {
const maybeEnvNumber = safelyParseInt(envValue);
return maybeEnvNumber != null && !isNaN(maybeEnvNumber)
? maybeEnvNumber
: optionMetadata.default;
}
case "boolean":
return envValue.toLowerCase() === "true";
default:
assertUnreachable(optionMetadata);
}
}
return optionMetadata.default;
}
|
/**
* Parses an option based on its type
* The order of precedence is: optionValue > envValue > defaultValue
* @param key - The key of the option.
* @param optionMetadata - The {@link TraceConfigOptionMetadata} for the option which includes its type, default value, and environment variable key.
*
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-core/src/trace/trace-config/traceConfig.ts#L16-L43
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
isObject
|
function isObject(
value: unknown,
): value is Record<string | number | symbol, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
|
/**
* Type guard to determine whether or not a value is an object.
* @param value
* @returns true if the value is an object, false otherwise.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-core/src/utils/typeUtils.ts#L18-L22
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
LangChainTracer._startTrace
|
protected async _startTrace(run: Run) {
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
if (typeof super._startTrace === "function") {
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
await super._startTrace(run);
}
await this.startTracing(run);
}
|
/**
* Called when a new run is created on v0.1.0 of langchain see {@link BaseTracer}
* @param run the langchain {@link Run} object
*
* This method is only available on langchain ^0.1.0 BaseTracer and has been replaced in 0.2 by onRunCreate
* we support both 0.1 and 0.2 so we need to check if the method exists on the super class before calling it
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/tracer.ts#L52-L61
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
LangChainTracer.onRunCreate
|
async onRunCreate(run: Run) {
if (typeof super.onRunCreate === "function") {
await super.onRunCreate(run);
}
await this.startTracing(run);
}
|
/**
* Called when a new run is created on v0.2.0 of langchain see {@link BaseTracer}
* @param run the langchain {@link Run} object
*
* This method is only available on the langchain ^0.2.0 {@link BaseTracer}
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/tracer.ts#L69-L74
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
onError
|
const onError = (message: string) => (error: unknown) => {
diag.warn(
`OpenInference-LangChain: error processing langchain run, falling back to null. ${message}. ${error}`,
);
};
|
/**
* Handler for any unexpected errors that occur during processing.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L43-L47
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
flattenAttributes
|
function flattenAttributes(
attributes: Record<string, unknown>,
baseKey: string = "",
): Attributes {
const result: Attributes = {};
for (const key in attributes) {
const newKey = baseKey ? `${baseKey}.${key}` : key;
const value = attributes[key];
if (value == null) {
continue;
}
if (isObject(value)) {
Object.assign(result, flattenAttributes(value, newKey));
} else if (Array.isArray(value)) {
value.forEach((item, index) => {
if (isObject(item)) {
Object.assign(result, flattenAttributes(item, `${newKey}.${index}`));
} else {
result[`${newKey}.${index}`] = item;
}
});
} else if (isAttributeValue(value)) {
result[newKey] = value;
}
}
return result;
}
|
/**
* Flattens a nested object into a single level object with keys as dot-separated paths.
* Specifies elements in arrays with their index as part of the path.
* @param attributes - Nested attributes to flatten.
* @param baseKey - Base key to prepend to all keys.
* @returns Flattened attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L61-L89
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getOpenInferenceSpanKindFromRunType
|
function getOpenInferenceSpanKindFromRunType(runType: string) {
const normalizedRunType = runType.toUpperCase();
if (normalizedRunType.includes("AGENT")) {
return OpenInferenceSpanKind.AGENT;
}
if (normalizedRunType in OpenInferenceSpanKind) {
return OpenInferenceSpanKind[
normalizedRunType as keyof typeof OpenInferenceSpanKind
];
}
return OpenInferenceSpanKind.CHAIN;
}
|
/**
* Gets the OpenInferenceSpanKind based on the langchain run type.
* @param runType - The langchain run type
* @returns The OpenInferenceSpanKind based on the langchain run type or "UNKNOWN".
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L96-L108
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatIO
|
function formatIO({
io,
ioType,
}: {
io: Run["inputs"] | Run["outputs"];
ioType: "input" | "output";
}) {
let valueAttribute: string;
let mimeTypeAttribute: string;
switch (ioType) {
case "input": {
valueAttribute = SemanticConventions.INPUT_VALUE;
mimeTypeAttribute = SemanticConventions.INPUT_MIME_TYPE;
break;
}
case "output": {
valueAttribute = SemanticConventions.OUTPUT_VALUE;
mimeTypeAttribute = SemanticConventions.OUTPUT_MIME_TYPE;
break;
}
default:
assertUnreachable(ioType);
}
if (io == null) {
return {};
}
const values = Object.values(io);
if (values.length === 1 && typeof values[0] === "string") {
return {
[valueAttribute]: values[0],
[mimeTypeAttribute]: MimeType.TEXT,
};
}
return {
[valueAttribute]: safelyJSONStringify(io),
[mimeTypeAttribute]: MimeType.JSON,
};
}
|
/**
* Formats the input or output of a langchain run into OpenInference attributes for a span.
* @param ioConfig - The input or output of a langchain run and the type of IO
* @param ioConfig.io - The input or output of a langchain run
* @param ioConfig.ioType - The type of IO
* @returns The formatted input or output attributes for the span
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L117-L155
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getRoleFromMessageData
|
function getRoleFromMessageData(
messageData: Record<string, unknown>,
): string | null {
const messageIds = messageData.lc_id;
if (!isNonEmptyArray(messageIds)) {
return null;
}
const langchainMessageClass = messageIds[messageIds.length - 1];
const normalizedLangchainMessageClass = isString(langchainMessageClass)
? langchainMessageClass.toLowerCase()
: "";
if (normalizedLangchainMessageClass.includes("human")) {
return "user";
}
if (normalizedLangchainMessageClass.includes("ai")) {
return "assistant";
}
if (normalizedLangchainMessageClass.includes("system")) {
return "system";
}
if (normalizedLangchainMessageClass.includes("function")) {
return "function";
}
if (
normalizedLangchainMessageClass.includes("chat") &&
isObject(messageData.kwargs) &&
isString(messageData.kwargs.role)
) {
return messageData.kwargs.role;
}
return null;
}
|
/**
* Gets the role of a message from the langchain message data.
* @param messageData - The langchain message data to extract the role from
* @returns The role of the message or null
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L162-L193
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getContentFromMessageData
|
function getContentFromMessageData(
messageKwargs: Record<string, unknown>,
): string | null {
return isString(messageKwargs.content) ? messageKwargs.content : null;
}
|
/**
* Gets the content of a message from the langchain message kwargs.
* @param messageKwargs - The langchain message kwargs to extract the content from
* @returns The content of the message or null
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L200-L204
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getToolCallDataFromAdditionalKwargs
|
function getToolCallDataFromAdditionalKwargs(
additionalKwargs: Record<string, unknown>,
): LLMMessageToolCalls {
const toolCalls = additionalKwargs.tool_calls;
if (!Array.isArray(toolCalls)) {
return {};
}
const formattedToolCalls = toolCalls.map((toolCall) => {
if (!isObject(toolCall) && !isObject(toolCall.function)) {
return {};
}
const toolCallName = isString(toolCall.function.name)
? toolCall.function.name
: undefined;
const toolCallArgs = isString(toolCall.function.arguments)
? toolCall.function.arguments
: undefined;
return {
[SemanticConventions.TOOL_CALL_FUNCTION_NAME]: toolCallName,
[SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON]: toolCallArgs,
};
});
return {
[SemanticConventions.MESSAGE_TOOL_CALLS]: formattedToolCalls,
};
}
|
/**
* Gets the tool calls from the langchain message additional kwargs and formats them into OpenInference attributes.
* @param additionalKwargs - The langchain message additional kwargs to extract the tool calls from
* @returns the OpenInference attributes for the tool calls
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L231-L256
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
parseMessage
|
function parseMessage(messageData: Record<string, unknown>): LLMMessage {
const message: LLMMessage = {};
const maybeRole = getRoleFromMessageData(messageData);
if (maybeRole != null) {
message[SemanticConventions.MESSAGE_ROLE] = maybeRole;
}
const messageKwargs = messageData.lc_kwargs;
if (!isObject(messageKwargs)) {
return message;
}
const maybeContent = getContentFromMessageData(messageKwargs);
if (maybeContent != null) {
message[SemanticConventions.MESSAGE_CONTENT] = maybeContent;
}
const additionalKwargs = messageKwargs.additional_kwargs;
if (!isObject(additionalKwargs)) {
return message;
}
return {
...message,
...getFunctionCallDataFromAdditionalKwargs(additionalKwargs),
...getToolCallDataFromAdditionalKwargs(additionalKwargs),
};
}
|
/**
* Parses a langchain message into OpenInference attributes.
* @param messageData - The langchain message data to parse
* @returns The OpenInference attributes for the message
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L263-L289
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatInputMessages
|
function formatInputMessages(
input: Run["inputs"],
): LLMMessagesAttributes | null {
const maybeMessages = input.messages;
if (!isNonEmptyArray(maybeMessages)) {
return null;
}
// Only support the first 'set' of messages
const firstMessages = maybeMessages[0];
if (!isNonEmptyArray(firstMessages)) {
return null;
}
const parsedMessages: LLMMessage[] = [];
firstMessages.forEach((messageData) => {
if (!isObject(messageData)) {
return;
}
parsedMessages.push(parseMessage(messageData));
});
if (parsedMessages.length > 0) {
return { [SemanticConventions.LLM_INPUT_MESSAGES]: parsedMessages };
}
return null;
}
|
/**
* Formats the input messages of a langchain run into OpenInference attributes.
* @param input - The input of a langchain run.
* @returns The OpenInference attributes for the input messages.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L296-L323
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getFirstOutputGeneration
|
function getFirstOutputGeneration(output: Run["outputs"]) {
if (!isObject(output)) {
return null;
}
const maybeGenerations = output.generations;
if (!isNonEmptyArray(maybeGenerations)) {
return null;
}
// Only support the first 'set' of generations
const firstGeneration = maybeGenerations[0];
if (!isNonEmptyArray(firstGeneration)) {
return null;
}
return firstGeneration;
}
|
/**
* Gets the first generation of the output of a langchain run.
* @param output - The output of a langchain run.
* @returns The first generation of the output or null.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L330-L344
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatOutputMessages
|
function formatOutputMessages(
output: Run["outputs"],
): LLMMessagesAttributes | null {
const firstGeneration = getFirstOutputGeneration(output);
if (firstGeneration == null) {
return null;
}
const parsedMessages: LLMMessage[] = [];
firstGeneration.forEach((generation) => {
if (!isObject(generation) || !isObject(generation.message)) {
return;
}
parsedMessages.push(parseMessage(generation.message));
});
if (parsedMessages.length > 0) {
return { [SemanticConventions.LLM_OUTPUT_MESSAGES]: parsedMessages };
}
return null;
}
|
/**
* Formats the output messages of a langchain run into OpenInference attributes.
* @param output - The output of a langchain run.
* @returns The OpenInference attributes for the output messages.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L351-L371
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
parseRetrievalDocument
|
function parseRetrievalDocument(document: unknown) {
if (!isObject(document)) {
return null;
}
const parsedDocument: RetrievalDocument = {};
if (isString(document.pageContent)) {
parsedDocument["document.content"] = document.pageContent;
}
if (isObject(document.metadata)) {
parsedDocument["document.metadata"] =
safelyJSONStringify(document.metadata) ?? undefined;
}
return parsedDocument;
}
|
/**
* Parses a langchain retrieval document into OpenInference attributes.
* @param document - The langchain retrieval document to parse
* @returns The OpenInference attributes for the retrieval document
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L378-L391
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatRetrievalDocuments
|
function formatRetrievalDocuments(run: Run) {
const normalizedRunType = run.run_type.toLowerCase();
if (normalizedRunType !== "retriever") {
return null;
}
if (!isObject(run.outputs) || !Array.isArray(run.outputs.documents)) {
return null;
}
return {
[RETRIEVAL_DOCUMENTS]: run.outputs.documents
.map(parseRetrievalDocument)
.filter((doc) => doc != null),
};
}
|
/**
* Formats the retrieval documents of a langchain run into OpenInference attributes.
* @param run - The langchain run to extract the retrieval documents from
* @returns The OpenInference attributes for the retrieval documents.
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L398-L411
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatLLMParams
|
function formatLLMParams(
runExtra: Run["extra"],
): LLMParameterAttributes | null {
if (!isObject(runExtra) || !isObject(runExtra.invocation_params)) {
return null;
}
const openInferenceParams: LLMParameterAttributes = {};
openInferenceParams[SemanticConventions.LLM_INVOCATION_PARAMETERS] =
safelyJSONStringify(runExtra.invocation_params) ?? undefined;
if (isString(runExtra.invocation_params.model_name)) {
openInferenceParams[SemanticConventions.LLM_MODEL_NAME] =
runExtra.invocation_params.model_name;
} else if (isString(runExtra.invocation_params.model)) {
openInferenceParams[SemanticConventions.LLM_MODEL_NAME] =
runExtra.invocation_params.model;
}
return openInferenceParams;
}
|
/**
* Gets the model name from the langchain run extra data.
* @param runExtra - The extra data from a langchain run
* @returns The OpenInference attributes for the model name
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L418-L437
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatPromptTemplate
|
function formatPromptTemplate(run: Run): PromptTemplateAttributes | null {
if (run.run_type.toLowerCase() !== "prompt") {
return null;
}
return {
[SemanticConventions.PROMPT_TEMPLATE_VARIABLES]:
safelyJSONStringify(run.inputs) ?? undefined,
[SemanticConventions.PROMPT_TEMPLATE_TEMPLATE]:
safelyGetTemplateFromSerialized(run.serialized) ?? undefined,
};
}
|
/**
* A best effort function to extract the prompt template from a langchain run.
* @param run - The langchain run to extract the prompt template from
* @returns The OpenInference attributes for the prompt template
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L467-L477
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatTokenCounts
|
function formatTokenCounts(
outputs: Run["outputs"],
): TokenCountAttributes | null {
if (!isObject(outputs)) {
return null;
}
const firstGeneration = getFirstOutputGeneration(outputs);
/**
* Some community models have non standard output structures and show token counts in different places notable ChatBedrock
* @see https://github.com/langchain-ai/langchainjs/blob/a173e300ef9ada416220876a2739e024b3a7f268/libs/langchain-community/src/chat_models/bedrock/web.ts
*/
// Generations is an array of arrays containing messages
const maybeGenerationComponent =
firstGeneration != null ? firstGeneration[0] : null;
const maybeMessage = isObject(maybeGenerationComponent)
? maybeGenerationComponent.message
: null;
const usageMetadata = isObject(maybeMessage)
? maybeMessage.usage_metadata
: null;
if (isObject(usageMetadata)) {
return {
[SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount(
usageMetadata.output_tokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_PROMPT]: getTokenCount(
usageMetadata.input_tokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: getTokenCount(
usageMetadata.total_tokens,
),
};
}
const llmOutput = outputs.llmOutput;
if (!isObject(llmOutput)) {
return null;
}
if (isObject(llmOutput.tokenUsage)) {
return {
[SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount(
llmOutput.tokenUsage.completionTokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_PROMPT]: getTokenCount(
llmOutput.tokenUsage.promptTokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: getTokenCount(
llmOutput.tokenUsage.totalTokens,
),
};
}
/**
* In the case of streamed outputs, the token counts are not available
* only estimated counts provided by langchain (not the model provider) are available
*/
if (isObject(llmOutput.estimatedTokenUsage)) {
return {
[SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount(
llmOutput.estimatedTokenUsage.completionTokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_PROMPT]: getTokenCount(
llmOutput.estimatedTokenUsage.promptTokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: getTokenCount(
llmOutput.estimatedTokenUsage.totalTokens,
),
};
}
/**
* In some cases community models have a different output structure do to the way they extend the base model
* Notably ChatBedrock may have tokens stored in this format instead of normalized
* @see https://github.com/langchain-ai/langchainjs/blob/a173e300ef9ada416220876a2739e024b3a7f268/libs/langchain-community/src/chat_models/bedrock/web.ts for ChatBedrock
* and
* @see https://github.com/langchain-ai/langchainjs/blob/main/langchain-core/src/language_models/chat_models.ts#L403 for nomalization
*/
if (isObject(llmOutput.usage)) {
const maybePromptTokens = getTokenCount(llmOutput.usage.input_tokens);
const maybeCompletionTokens = getTokenCount(llmOutput.usage.output_tokens);
let maybeTotalTokens = getTokenCount(llmOutput.usage.total_tokens);
if (maybeTotalTokens == null) {
maybeTotalTokens =
isNumber(maybePromptTokens) && isNumber(maybeCompletionTokens)
? maybePromptTokens + maybeCompletionTokens
: undefined;
}
return {
[SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount(
maybeCompletionTokens,
),
[SemanticConventions.LLM_TOKEN_COUNT_PROMPT]:
getTokenCount(maybePromptTokens),
[SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: maybeTotalTokens,
};
}
return null;
}
|
/**
* Formats the token counts of a langchain run into OpenInference attributes.
* @param outputs - The outputs of a langchain run
* @returns The OpenInference attributes for the token counts
*
* @see https://github.com/langchain-ai/langchainjs/blob/main/langchain-core/src/language_models/chat_models.ts#L403 for how token counts get added to outputs
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L490-L586
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatFunctionCalls
|
function formatFunctionCalls(outputs: Run["outputs"]) {
const firstGeneration = getFirstOutputGeneration(outputs);
if (firstGeneration == null) {
return null;
}
const maybeGeneration = firstGeneration[0];
if (!isObject(maybeGeneration) || !isObject(maybeGeneration.message)) {
return null;
}
const additionalKwargs = maybeGeneration.message.additional_kwargs;
if (
!isObject(additionalKwargs) ||
!isObject(additionalKwargs.function_call)
) {
return null;
}
return {
[SemanticConventions.LLM_FUNCTION_CALL]: safelyJSONStringify(
additionalKwargs.function_call,
),
};
}
|
/**
* Formats the function calls of a langchain run into OpenInference attributes.
* @param outputs - The outputs of a langchain run
* @returns The OpenInference attributes for the function calls
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L593-L617
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatToolCalls
|
function formatToolCalls(run: Run) {
const normalizedRunType = run.run_type.toLowerCase();
if (normalizedRunType !== "tool") {
return null;
}
const toolAttributes: ToolAttributes = {
[SemanticConventions.TOOL_NAME]: run.name,
};
if (!isObject(run.serialized)) {
return toolAttributes;
}
if (isString(run.serialized.name)) {
toolAttributes[SemanticConventions.TOOL_NAME] = run.serialized.name;
}
if (isString(run.serialized.description)) {
toolAttributes[SemanticConventions.TOOL_DESCRIPTION] =
run.serialized.description;
}
return toolAttributes;
}
|
/**
* Formats the tool calls of a langchain run into OpenInference attributes.
* @param run - The langchain run to extract the tool calls from
* @returns The OpenInference attributes for the tool calls
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L624-L643
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatMetadata
|
function formatMetadata(run: Run) {
if (!isObject(run.extra) || !isObject(run.extra.metadata)) {
return null;
}
return {
[SemanticConventions.METADATA]: safelyJSONStringify(run.extra.metadata),
};
}
|
/**
* Formats the metadata of a langchain run into OpenInference attributes.
* @param run - The langchain run to extract the metadata from
* @returns The OpenInference attributes for the metadata
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L650-L657
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatSessionId
|
function formatSessionId(run: Run) {
if (!isObject(run.extra)) {
return null;
}
const metadata = run.extra.metadata;
if (!isObject(metadata)) {
return null;
}
const sessionId = SESSION_ID_KEYS.find((key) => isString(metadata[key]));
if (sessionId == null) {
return null;
}
return {
[SemanticConventions.SESSION_ID]: metadata[sessionId],
};
}
|
/**
* Formats the session id of a langchain run into OpenInference attributes.
*
* @see https://docs.smith.langchain.com/observability/how_to_guides/monitoring/threads#group-traces-into-threads
*
* @param run - The langchain run to extract the session id from
* @returns The OpenInference attributes for the session id
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-langchain/src/utils.ts#L667-L682
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getExecContext
|
function getExecContext(span: Span) {
const activeContext = context.active();
const suppressTracing = isTracingSuppressed(activeContext);
const execContext = suppressTracing
? trace.setSpan(context.active(), span)
: activeContext;
// Drop the span from the context
if (suppressTracing) {
trace.deleteSpan(activeContext);
}
return execContext;
}
|
/**
* Resolves the execution context for the current span
* If tracing is suppressed, the span is dropped and the current context is returned
* @param span
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L69-L80
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
OpenAIInstrumentation.manuallyInstrument
|
manuallyInstrument(module: typeof openai) {
diag.debug(`Manually instrumenting ${MODULE_NAME}`);
this.patch(module);
}
|
/**
* Manually instruments the OpenAI module. This is needed when the module is not loaded via require (commonjs)
* @param {openai} module
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L125-L128
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
OpenAIInstrumentation.patch
|
private patch(
module: typeof openai & { openInferencePatched?: boolean },
moduleVersion?: string,
) {
diag.debug(`Applying patch for ${MODULE_NAME}@${moduleVersion}`);
if (module?.openInferencePatched || _isOpenInferencePatched) {
return module;
}
// eslint-disable-next-line @typescript-eslint/no-this-alias
const instrumentation: OpenAIInstrumentation = this;
type ChatCompletionCreateType =
typeof module.OpenAI.Chat.Completions.prototype.create;
// Patch create chat completions
this._wrap(
module.OpenAI.Chat.Completions.prototype,
"create",
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(original: ChatCompletionCreateType): any => {
return function patchedCreate(
this: unknown,
...args: Parameters<ChatCompletionCreateType>
) {
const body = args[0];
const { messages: _messages, ...invocationParameters } = body;
const span = instrumentation.oiTracer.startSpan(
`OpenAI Chat Completions`,
{
kind: SpanKind.INTERNAL,
attributes: {
[SemanticConventions.OPENINFERENCE_SPAN_KIND]:
OpenInferenceSpanKind.LLM,
[SemanticConventions.LLM_MODEL_NAME]: body.model,
[SemanticConventions.INPUT_VALUE]: JSON.stringify(body),
[SemanticConventions.INPUT_MIME_TYPE]: MimeType.JSON,
[SemanticConventions.LLM_INVOCATION_PARAMETERS]:
JSON.stringify(invocationParameters),
[SemanticConventions.LLM_SYSTEM]: LLMSystem.OPENAI,
[SemanticConventions.LLM_PROVIDER]: LLMProvider.OPENAI,
...getLLMInputMessagesAttributes(body),
...getLLMToolsJSONSchema(body),
},
},
);
const execContext = getExecContext(span);
const execPromise = safeExecuteInTheMiddle<
ReturnType<ChatCompletionCreateType>
>(
() => {
return context.with(trace.setSpan(execContext, span), () => {
return original.apply(this, args);
});
},
(error) => {
// Push the error to the span
if (error) {
span.recordException(error);
span.setStatus({
code: SpanStatusCode.ERROR,
message: error.message,
});
span.end();
}
},
);
const wrappedPromise = execPromise.then((result) => {
if (isChatCompletionResponse(result)) {
// Record the results
span.setAttributes({
[SemanticConventions.OUTPUT_VALUE]: JSON.stringify(result),
[SemanticConventions.OUTPUT_MIME_TYPE]: MimeType.JSON,
// Override the model from the value sent by the server
[SemanticConventions.LLM_MODEL_NAME]: result.model,
...getChatCompletionLLMOutputMessagesAttributes(result),
...getUsageAttributes(result),
});
span.setStatus({ code: SpanStatusCode.OK });
span.end();
} else {
// This is a streaming response
// handle the chunks and add them to the span
// First split the stream via tee
const [leftStream, rightStream] = result.tee();
consumeChatCompletionStreamChunks(rightStream, span);
result = leftStream;
}
return result;
});
return context.bind(execContext, wrappedPromise);
};
},
);
// Patch create completions
type CompletionsCreateType =
typeof module.OpenAI.Completions.prototype.create;
this._wrap(
module.OpenAI.Completions.prototype,
"create",
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(original: CompletionsCreateType): any => {
return function patchedCreate(
this: unknown,
...args: Parameters<CompletionsCreateType>
) {
const body = args[0];
const { prompt: _prompt, ...invocationParameters } = body;
const span = instrumentation.oiTracer.startSpan(
`OpenAI Completions`,
{
kind: SpanKind.INTERNAL,
attributes: {
[SemanticConventions.OPENINFERENCE_SPAN_KIND]:
OpenInferenceSpanKind.LLM,
[SemanticConventions.LLM_MODEL_NAME]: body.model,
[SemanticConventions.LLM_INVOCATION_PARAMETERS]:
JSON.stringify(invocationParameters),
[SemanticConventions.LLM_SYSTEM]: LLMSystem.OPENAI,
[SemanticConventions.LLM_PROVIDER]: LLMProvider.OPENAI,
...getCompletionInputValueAndMimeType(body),
},
},
);
const execContext = getExecContext(span);
const execPromise = safeExecuteInTheMiddle<
ReturnType<CompletionsCreateType>
>(
() => {
return context.with(trace.setSpan(execContext, span), () => {
return original.apply(this, args);
});
},
(error) => {
// Push the error to the span
if (error) {
span.recordException(error);
span.setStatus({
code: SpanStatusCode.ERROR,
message: error.message,
});
span.end();
}
},
);
const wrappedPromise = execPromise.then((result) => {
if (isCompletionResponse(result)) {
// Record the results
span.setAttributes({
[SemanticConventions.OUTPUT_VALUE]: JSON.stringify(result),
[SemanticConventions.OUTPUT_MIME_TYPE]: MimeType.JSON,
// Override the model from the value sent by the server
[SemanticConventions.LLM_MODEL_NAME]: result.model,
...getCompletionOutputValueAndMimeType(result),
...getUsageAttributes(result),
});
span.setStatus({ code: SpanStatusCode.OK });
span.end();
}
return result;
});
return context.bind(execContext, wrappedPromise);
};
},
);
// Patch embeddings
type EmbeddingsCreateType =
typeof module.OpenAI.Embeddings.prototype.create;
this._wrap(
module.OpenAI.Embeddings.prototype,
"create",
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(original: EmbeddingsCreateType): any => {
return function patchedEmbeddingCreate(
this: unknown,
...args: Parameters<EmbeddingsCreateType>
) {
const body = args[0];
const { input } = body;
const isStringInput = typeof input === "string";
const span = instrumentation.oiTracer.startSpan(`OpenAI Embeddings`, {
kind: SpanKind.INTERNAL,
attributes: {
[SemanticConventions.OPENINFERENCE_SPAN_KIND]:
OpenInferenceSpanKind.EMBEDDING,
[SemanticConventions.EMBEDDING_MODEL_NAME]: body.model,
[SemanticConventions.INPUT_VALUE]: isStringInput
? input
: JSON.stringify(input),
[SemanticConventions.INPUT_MIME_TYPE]: isStringInput
? MimeType.TEXT
: MimeType.JSON,
...getEmbeddingTextAttributes(body),
},
});
const execContext = getExecContext(span);
const execPromise = safeExecuteInTheMiddle<
ReturnType<EmbeddingsCreateType>
>(
() => {
return context.with(trace.setSpan(execContext, span), () => {
return original.apply(this, args);
});
},
(error) => {
// Push the error to the span
if (error) {
span.recordException(error);
span.setStatus({
code: SpanStatusCode.ERROR,
message: error.message,
});
span.end();
}
},
);
const wrappedPromise = execPromise.then((result) => {
if (result) {
// Record the results
span.setAttributes({
// Do not record the output data as it can be large
...getEmbeddingEmbeddingsAttributes(result),
});
}
span.setStatus({ code: SpanStatusCode.OK });
span.end();
return result;
});
return context.bind(execContext, wrappedPromise);
};
},
);
_isOpenInferencePatched = true;
try {
// This can fail if the module is made immutable via the runtime or bundler
module.openInferencePatched = true;
} catch (e) {
diag.debug(`Failed to set ${MODULE_NAME} patched flag on the module`, e);
}
return module;
}
|
/**
* Patches the OpenAI module
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L133-L379
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
OpenAIInstrumentation.unpatch
|
private unpatch(
moduleExports: typeof openai & { openInferencePatched?: boolean },
moduleVersion?: string,
) {
diag.debug(`Removing patch for ${MODULE_NAME}@${moduleVersion}`);
this._unwrap(moduleExports.OpenAI.Chat.Completions.prototype, "create");
this._unwrap(moduleExports.OpenAI.Completions.prototype, "create");
this._unwrap(moduleExports.OpenAI.Embeddings.prototype, "create");
_isOpenInferencePatched = false;
try {
// This can fail if the module is made immutable via the runtime or bundler
moduleExports.openInferencePatched = false;
} catch (e) {
diag.warn(`Failed to unset ${MODULE_NAME} patched flag on the module`, e);
}
}
|
/**
* Un-patches the OpenAI module's chat completions API
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L383-L399
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
isChatCompletionResponse
|
function isChatCompletionResponse(
response: Stream<ChatCompletionChunk> | ChatCompletion,
): response is ChatCompletion {
return "choices" in response;
}
|
/**
* type-guard that checks if the response is a chat completion response
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L405-L409
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
isCompletionResponse
|
function isCompletionResponse(
response: Stream<Completion> | Completion,
): response is Completion {
return "choices" in response;
}
|
/**
* type-guard that checks if the response is a completion response
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L414-L418
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
isPromptStringArray
|
function isPromptStringArray(
prompt: CompletionCreateParamsBase["prompt"],
): prompt is Array<string> {
return (
Array.isArray(prompt) && prompt.every((item) => typeof item === "string")
);
}
|
/**
* type-guard that checks if completion prompt attribute is an array of strings
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L423-L429
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getLLMInputMessagesAttributes
|
function getLLMInputMessagesAttributes(
body: ChatCompletionCreateParamsBase,
): Attributes {
return body.messages.reduce((acc, message, index) => {
const messageAttributes = getChatCompletionInputMessageAttributes(message);
const indexPrefix = `${SemanticConventions.LLM_INPUT_MESSAGES}.${index}.`;
// Flatten the attributes on the index prefix
for (const [key, value] of Object.entries(messageAttributes)) {
acc[`${indexPrefix}${key}`] = value;
}
return acc;
}, {} as Attributes);
}
|
/**
* Converts the body of a chat completions request to LLM input messages
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L434-L446
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getLLMToolsJSONSchema
|
function getLLMToolsJSONSchema(
body: ChatCompletionCreateParamsBase,
): Attributes {
if (!body.tools) {
// If tools is undefined, return an empty object
return {};
}
return body.tools.reduce((acc: Attributes, tool, index) => {
const toolJsonSchema = safelyJSONStringify(tool);
const key = `${SemanticConventions.LLM_TOOLS}.${index}.${SemanticConventions.TOOL_JSON_SCHEMA}`;
if (toolJsonSchema) {
acc[key] = toolJsonSchema;
}
return acc;
}, {});
}
|
/**
* Converts each tool definition into a json schema
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L451-L466
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getCompletionInputValueAndMimeType
|
function getCompletionInputValueAndMimeType(
body: CompletionCreateParamsBase,
): Attributes {
if (typeof body.prompt === "string") {
return {
[SemanticConventions.INPUT_VALUE]: body.prompt,
[SemanticConventions.INPUT_MIME_TYPE]: MimeType.TEXT,
};
} else if (isPromptStringArray(body.prompt)) {
const prompt = body.prompt[0]; // Only single prompts are currently supported
if (prompt === undefined) {
return {};
}
return {
[SemanticConventions.INPUT_VALUE]: prompt,
[SemanticConventions.INPUT_MIME_TYPE]: MimeType.TEXT,
};
}
// Other cases in which the prompt is a token or array of tokens are currently unsupported
return {};
}
|
/**
* Converts the body of a completions request to input attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L547-L567
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getUsageAttributes
|
function getUsageAttributes(
completion: ChatCompletion | Completion,
): Attributes {
if (completion.usage) {
return {
[SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]:
completion.usage.completion_tokens,
[SemanticConventions.LLM_TOKEN_COUNT_PROMPT]:
completion.usage.prompt_tokens,
[SemanticConventions.LLM_TOKEN_COUNT_TOTAL]:
completion.usage.total_tokens,
};
}
return {};
}
|
/**
* Get usage attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L572-L586
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getChatCompletionLLMOutputMessagesAttributes
|
function getChatCompletionLLMOutputMessagesAttributes(
chatCompletion: ChatCompletion,
): Attributes {
// Right now support just the first choice
const choice = chatCompletion.choices[0];
if (!choice) {
return {};
}
return [choice.message].reduce((acc, message, index) => {
const indexPrefix = `${SemanticConventions.LLM_OUTPUT_MESSAGES}.${index}.`;
const messageAttributes = getChatCompletionOutputMessageAttributes(message);
// Flatten the attributes on the index prefix
for (const [key, value] of Object.entries(messageAttributes)) {
acc[`${indexPrefix}${key}`] = value;
}
return acc;
}, {} as Attributes);
}
|
/**
* Converts the chat completion result to LLM output attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L591-L608
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getCompletionOutputValueAndMimeType
|
function getCompletionOutputValueAndMimeType(
completion: Completion,
): Attributes {
// Right now support just the first choice
const choice = completion.choices[0];
if (!choice) {
return {};
}
return {
[SemanticConventions.OUTPUT_VALUE]: String(choice.text),
[SemanticConventions.OUTPUT_MIME_TYPE]: MimeType.TEXT,
};
}
|
/**
* Converts the completion result to output attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L654-L666
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getEmbeddingTextAttributes
|
function getEmbeddingTextAttributes(
request: EmbeddingCreateParams,
): Attributes {
if (typeof request.input === "string") {
return {
[`${SemanticConventions.EMBEDDING_EMBEDDINGS}.0.${SemanticConventions.EMBEDDING_TEXT}`]:
request.input,
};
} else if (
Array.isArray(request.input) &&
request.input.length > 0 &&
typeof request.input[0] === "string"
) {
return request.input.reduce((acc, input, index) => {
const indexPrefix = `${SemanticConventions.EMBEDDING_EMBEDDINGS}.${index}.`;
acc[`${indexPrefix}${SemanticConventions.EMBEDDING_TEXT}`] = input;
return acc;
}, {} as Attributes);
}
// Ignore other cases where input is a number or an array of numbers
return {};
}
|
/**
* Converts the embedding result payload to embedding attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L671-L692
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getEmbeddingEmbeddingsAttributes
|
function getEmbeddingEmbeddingsAttributes(
response: CreateEmbeddingResponse,
): Attributes {
return response.data.reduce((acc, embedding, index) => {
const indexPrefix = `${SemanticConventions.EMBEDDING_EMBEDDINGS}.${index}.`;
acc[`${indexPrefix}${SemanticConventions.EMBEDDING_VECTOR}`] =
embedding.embedding;
return acc;
}, {} as Attributes);
}
|
/**
* Converts the embedding result payload to embedding attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L697-L706
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
consumeChatCompletionStreamChunks
|
async function consumeChatCompletionStreamChunks(
stream: Stream<ChatCompletionChunk>,
span: Span,
) {
let streamResponse = "";
// Tool and function call attributes can also arrive in the stream
// NB: the tools and function calls arrive in partial diffs
// So the final tool and function calls need to be aggregated
// across chunks
const toolAndFunctionCallAttributes: Attributes = {};
// The first message is for the assistant response so we start at 1
for await (const chunk of stream) {
if (chunk.choices.length <= 0) {
continue;
}
const choice = chunk.choices[0];
if (choice.delta.content) {
streamResponse += choice.delta.content;
}
// Accumulate the tool and function call attributes
const toolAndFunctionCallAttributesDiff =
getToolAndFunctionCallAttributesFromStreamChunk(chunk);
for (const [key, value] of Object.entries(
toolAndFunctionCallAttributesDiff,
)) {
if (isString(toolAndFunctionCallAttributes[key]) && isString(value)) {
toolAndFunctionCallAttributes[key] += value;
} else if (isString(value)) {
toolAndFunctionCallAttributes[key] = value;
}
}
}
const messageIndexPrefix = `${SemanticConventions.LLM_OUTPUT_MESSAGES}.0.`;
// Append the attributes to the span as a message
const attributes: Attributes = {
[SemanticConventions.OUTPUT_VALUE]: streamResponse,
[SemanticConventions.OUTPUT_MIME_TYPE]: MimeType.TEXT,
[`${messageIndexPrefix}${SemanticConventions.MESSAGE_CONTENT}`]:
streamResponse,
[`${messageIndexPrefix}${SemanticConventions.MESSAGE_ROLE}`]: "assistant",
};
// Add the tool and function call attributes
for (const [key, value] of Object.entries(toolAndFunctionCallAttributes)) {
attributes[`${messageIndexPrefix}${key}`] = value;
}
span.setAttributes(attributes);
span.end();
}
|
/**
* Consumes the stream chunks and adds them to the span
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L711-L759
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getToolAndFunctionCallAttributesFromStreamChunk
|
function getToolAndFunctionCallAttributesFromStreamChunk(
chunk: ChatCompletionChunk,
): Attributes {
if (chunk.choices.length <= 0) {
return {};
}
const choice = chunk.choices[0];
const attributes: Attributes = {};
if (choice.delta.tool_calls) {
choice.delta.tool_calls.forEach((toolCall, index) => {
const toolCallIndexPrefix = `${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}.`;
// Add the tool call id if it exists
if (toolCall.id) {
attributes[
`${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_ID}`
] = toolCall.id;
}
// Double check that the tool call has a function
// NB: OpenAI only supports tool calls with functions right now but this may change
if (toolCall.function) {
attributes[
toolCallIndexPrefix + SemanticConventions.TOOL_CALL_FUNCTION_NAME
] = toolCall.function.name;
attributes[
toolCallIndexPrefix +
SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON
] = toolCall.function.arguments;
}
});
}
if (choice.delta.function_call) {
attributes[SemanticConventions.MESSAGE_FUNCTION_CALL_NAME] =
choice.delta.function_call.name;
attributes[SemanticConventions.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON] =
choice.delta.function_call.arguments;
}
return attributes;
}
|
/**
* Extracts the semantic attributes from the stream chunk for tool_calls and function_calls
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/src/instrumentation.ts#L764-L801
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getCurrentLocation
|
async function getCurrentLocation() {
return "Boston"; // Simulate lookup
}
|
// Function tools
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-instrumentation-openai/test/openai.test.ts#L15-L17
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getVercelFunctionNameFromOperationName
|
const getVercelFunctionNameFromOperationName = (
operationName: string,
): string | undefined => {
return operationName.split(" ")[0];
};
|
/**
*
* @param operationName - the operation name of the span
* Operation names are set on Vercel spans as under the operation.name attribute with the
* @example ai.generateText.doGenerate <functionId>
* @returns the Vercel function name from the operation name or undefined if not found
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L47-L51
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getOISpanKindFromAttributes
|
const getOISpanKindFromAttributes = (
attributes: Attributes,
): OpenInferenceSpanKind | undefined => {
const maybeOperationName = attributes["operation.name"];
if (maybeOperationName == null || typeof maybeOperationName !== "string") {
return;
}
const maybeFunctionName =
getVercelFunctionNameFromOperationName(maybeOperationName);
if (maybeFunctionName == null) {
return;
}
return VercelSDKFunctionNameToSpanKindMap.get(maybeFunctionName);
};
|
/**
* Gets the OpenInference span kind that corresponds to the Vercel operation name
* @param attributes the attributes of the span
* @returns the OpenInference span kind associated with the attributes or null if not found
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L58-L71
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getInvocationParamAttributes
|
const getInvocationParamAttributes = (attributes: Attributes) => {
const settingAttributeKeys = Object.keys(attributes).filter((key) =>
key.startsWith(AISemanticConventions.SETTINGS),
);
if (settingAttributeKeys.length === 0) {
return null;
}
const settingAttributes = settingAttributeKeys.reduce((acc, key) => {
const keyParts = key.split(".");
const paramKey = keyParts[keyParts.length - 1];
acc[paramKey] = attributes[key];
return acc;
}, {} as Attributes);
return {
[SemanticConventions.LLM_INVOCATION_PARAMETERS]:
safelyJSONStringify(settingAttributes) ?? undefined,
};
};
|
/**
* Takes the attributes from the span and accumulates the attributes that are prefixed with "ai.settings" to be used as the invocation parameters
* @param attributes the initial attributes of the span
* @returns the OpenInference attributes associated with the invocation parameters
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L86-L104
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
isValidJsonString
|
const isValidJsonString = (value?: AttributeValue) => {
if (typeof value !== "string") {
return false;
}
const parsed = safelyJSONParse(value);
return typeof parsed === "object" && parsed !== null;
};
|
/**
* Determines whether the value is a valid JSON string
* @param value the value to check
* @returns whether the value is a valid JSON string
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L119-L125
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getMimeTypeFromValue
|
const getMimeTypeFromValue = (value?: AttributeValue) => {
if (isValidJsonString(value)) {
return MimeType.JSON;
}
return MimeType.TEXT;
};
|
/**
* Gets the mime type of the attribute value
* @param value the attribute value to check
* @returns the mime type of the value
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L132-L137
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getIOValueAttributes
|
const getIOValueAttributes = ({
attributeValue,
OpenInferenceSemanticConventionKey,
}: {
attributeValue?: AttributeValue;
OpenInferenceSemanticConventionKey: OpenInferenceIOConventionKey;
}) => {
const mimeTypeSemanticConvention =
OpenInferenceSemanticConventionKey === SemanticConventions.INPUT_VALUE
? SemanticConventions.INPUT_MIME_TYPE
: SemanticConventions.OUTPUT_MIME_TYPE;
return {
[OpenInferenceSemanticConventionKey]: attributeValue,
[mimeTypeSemanticConvention]: getMimeTypeFromValue(attributeValue),
};
};
|
/**
* Gets OpenInference attributes associated with the IO
* @param object.attributeValue the IO attribute value set by Vercel
* @param object.OpenInferenceSemanticConventionKey the corresponding OpenInference semantic convention
* @returns the OpenInference attributes associated with the IO value
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L145-L160
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
formatEmbeddingValue
|
const formatEmbeddingValue = (value: AttributeValue) => {
if (typeof value !== "string") {
return value;
}
const parsedValue = safelyJSONParse(value);
if (isAttributeValue(parsedValue) && parsedValue !== null) {
return parsedValue;
}
return value;
};
|
/**
* Formats an embedding attribute value (i.e., embedding text or vector) into the expected format
* Vercel embedding vector attributes are stringified arrays, however, the OpenInference spec expects them to be un-stringified arrays
* @param value the value to format (either embedding text or vector)
* @returns the formatted value or the original value if it is not a string or cannot be parsed
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L176-L185
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getEmbeddingAttributes
|
const getEmbeddingAttributes = ({
attributeValue,
OpenInferenceSemanticConventionKey,
}: {
attributeValue?: AttributeValue;
OpenInferenceSemanticConventionKey: OpenInferenceSemanticConventionKey;
}) => {
const EMBEDDING_PREFIX = SemanticConventions.EMBEDDING_EMBEDDINGS;
if (typeof attributeValue === "string") {
return {
[`${EMBEDDING_PREFIX}.0.${OpenInferenceSemanticConventionKey}`]:
formatEmbeddingValue(attributeValue),
};
}
if (isStringArray(attributeValue)) {
return attributeValue.reduce((acc: Attributes, embeddingValue, index) => {
acc[
`${EMBEDDING_PREFIX}.${index}.${OpenInferenceSemanticConventionKey}`
] = formatEmbeddingValue(embeddingValue);
return acc;
}, {});
}
return null;
};
|
/**
* Takes the Vercel embedding attribute value and the corresponding OpenInference attribute key and returns the OpenInference attributes associated with the embedding
* The Vercel embedding attribute value can be a string or an array of strings
* @param object the attribute value and the OpenInferenceSemanticConventionKey (either EMBEDDING_TEXT or EMBEDDING_VECTOR)
* @returns the OpenInference attributes associated with the embedding
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L193-L217
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getInputMessageAttributes
|
const getInputMessageAttributes = (promptMessages?: AttributeValue) => {
if (typeof promptMessages !== "string") {
return null;
}
const messages = safelyJSONParse(promptMessages);
if (!isArrayOfObjects(messages)) {
return null;
}
return messages.reduce((acc: Attributes, message, index) => {
const MESSAGE_PREFIX = `${SemanticConventions.LLM_INPUT_MESSAGES}.${index}`;
if (isArrayOfObjects(message.content)) {
const messageAttributes = message.content.reduce(
(acc: Attributes, content, contentIndex) => {
const CONTENTS_PREFIX = `${MESSAGE_PREFIX}.${SemanticConventions.MESSAGE_CONTENTS}.${contentIndex}`;
return {
...acc,
[`${CONTENTS_PREFIX}.${SemanticConventions.MESSAGE_CONTENT_TYPE}`]:
typeof content.type === "string" ? content.type : undefined,
[`${CONTENTS_PREFIX}.${SemanticConventions.MESSAGE_CONTENT_TEXT}`]:
typeof content.text === "string" ? content.text : undefined,
[`${CONTENTS_PREFIX}.${SemanticConventions.MESSAGE_CONTENT_IMAGE}`]:
typeof content.image === "string" ? content.image : undefined,
};
},
{},
);
acc = {
...acc,
...messageAttributes,
};
} else if (typeof message.content === "string") {
acc[`${MESSAGE_PREFIX}.${SemanticConventions.MESSAGE_CONTENT}`] =
message.content;
}
acc[
`${SemanticConventions.LLM_INPUT_MESSAGES}.${index}.${SemanticConventions.MESSAGE_ROLE}`
] = typeof message.role === "string" ? message.role : undefined;
return acc;
}, {});
};
|
/**
* Gets the input_messages OpenInference attributes
* @param promptMessages the attribute value of the Vercel prompt messages
* @returns input_messages attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L232-L274
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getToolCallMessageAttributes
|
const getToolCallMessageAttributes = (toolCalls?: AttributeValue) => {
if (typeof toolCalls !== "string") {
return null;
}
const parsedToolCalls = safelyJSONParse(toolCalls);
if (!isArrayOfObjects(parsedToolCalls)) {
return null;
}
const OUTPUT_MESSAGE_PREFIX = `${SemanticConventions.LLM_OUTPUT_MESSAGES}.0`;
return {
[`${OUTPUT_MESSAGE_PREFIX}.${SemanticConventions.MESSAGE_ROLE}`]:
"assistant",
...parsedToolCalls.reduce((acc: Attributes, toolCall, index) => {
const TOOL_CALL_PREFIX = `${OUTPUT_MESSAGE_PREFIX}.${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}`;
const toolCallArgsJSON = safelyJSONStringify(toolCall.args);
return {
...acc,
[`${TOOL_CALL_PREFIX}.${SemanticConventions.TOOL_CALL_FUNCTION_NAME}`]:
isAttributeValue(toolCall.toolName) ? toolCall.toolName : undefined,
[`${TOOL_CALL_PREFIX}.${SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}`]:
toolCallArgsJSON != null ? toolCallArgsJSON : undefined,
};
}, {}),
};
};
|
/**
* Gets the output_messages tool_call OpenInference attributes
* @param toolCalls the attribute value of the Vercel result.toolCalls
* @returns output_messages tool_call attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L289-L316
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getMetadataAttributes
|
const getMetadataAttributes = (attributes: Attributes) => {
const metadataAttributeKeys = Object.keys(attributes)
.filter((key) => key.startsWith(AISemanticConventions.METADATA))
.map((key) => ({ key: key.split(".")[3], value: attributes[key] }));
if (metadataAttributeKeys.length === 0) {
return null;
}
return metadataAttributeKeys.reduce((acc, { key, value }) => {
return key != null
? {
...acc,
[`${SemanticConventions.METADATA}.${key}`]: value,
}
: acc;
}, {});
};
|
/**
* Gets the OpenInference metadata attributes
* Both vercel and OpenInference attach metadata attributes to spans in a flat structure
* @example Vercel: ai.telemetry.metadata.<metadataKey>
* @example OpenInference: metadata.<metadataKey>
* @param attributes the initial attributes of the span
* @returns the OpenInference metadata attributes
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L334-L349
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
openinference
|
github_2023
|
Arize-ai
|
typescript
|
getOpenInferenceAttributes
|
const getOpenInferenceAttributes = (attributes: Attributes): Attributes => {
const spanKind = safelyGetOISpanKindFromAttributes(attributes);
const openInferenceAttributes = {
[SemanticConventions.OPENINFERENCE_SPAN_KIND]: spanKind ?? undefined,
};
return AISemanticConventionsList.reduce(
(openInferenceAttributes: Attributes, convention) => {
/**
* Both settings and metadata are not full attribute paths but prefixes
* @example ai.settings.<paramName> or ai.metadata.<metadataKey>
*/
if (
!(convention in attributes) &&
convention !== AISemanticConventions.SETTINGS &&
convention !== AISemanticConventions.METADATA
) {
return openInferenceAttributes;
}
const openInferenceKey = AISemConvToOISemConvMap[convention];
switch (convention) {
case AISemanticConventions.METADATA:
return {
...openInferenceAttributes,
...safelyGetMetadataAttributes(attributes),
};
case AISemanticConventions.TOKEN_COUNT_COMPLETION:
case AISemanticConventions.TOKEN_COUNT_PROMPT:
// Do not capture token counts for non LLM spans to avoid double token counts
if (spanKind !== OpenInferenceSpanKind.LLM) {
return openInferenceAttributes;
}
return {
...openInferenceAttributes,
[openInferenceKey]: attributes[convention],
};
case AISemanticConventions.TOOL_CALL_ID:
return {
...openInferenceAttributes,
[openInferenceKey]: attributes[convention],
};
case AISemanticConventions.TOOL_CALL_NAME:
return {
...openInferenceAttributes,
[openInferenceKey]: attributes[convention],
};
case AISemanticConventions.TOOL_CALL_ARGS: {
let argsAttributes = {
[openInferenceKey]: attributes[convention],
};
// For tool spans, capture the arguments as input value
if (spanKind === OpenInferenceSpanKind.TOOL) {
argsAttributes = {
...argsAttributes,
[SemanticConventions.INPUT_VALUE]: attributes[convention],
[SemanticConventions.INPUT_MIME_TYPE]: getMimeTypeFromValue(
attributes[convention],
),
};
}
return {
...openInferenceAttributes,
...argsAttributes,
};
}
case AISemanticConventions.TOOL_CALL_RESULT:
// For tool spans, capture the result as output value, for non tool spans ignore
if (spanKind !== OpenInferenceSpanKind.TOOL) {
return openInferenceAttributes;
}
return {
...openInferenceAttributes,
[openInferenceKey]: attributes[convention],
[SemanticConventions.OUTPUT_MIME_TYPE]: getMimeTypeFromValue(
attributes[convention],
),
};
case AISemanticConventions.MODEL_ID: {
const modelSemanticConvention =
spanKind === OpenInferenceSpanKind.EMBEDDING
? SemanticConventions.EMBEDDING_MODEL_NAME
: SemanticConventions.LLM_MODEL_NAME;
return {
...openInferenceAttributes,
[modelSemanticConvention]: attributes[convention],
};
}
case AISemanticConventions.SETTINGS:
return {
...openInferenceAttributes,
...safelyGetInvocationParamAttributes(attributes),
};
case AISemanticConventions.PROMPT:
case AISemanticConventions.RESPONSE_OBJECT:
case AISemanticConventions.RESPONSE_TEXT: {
return {
...openInferenceAttributes,
...safelyGetIOValueAttributes({
attributeValue: attributes[convention],
OpenInferenceSemanticConventionKey:
openInferenceKey as OpenInferenceIOConventionKey,
}),
};
}
case AISemanticConventions.RESPONSE_TOOL_CALLS:
return {
...openInferenceAttributes,
...safelyGetToolCallMessageAttributes(attributes[convention]),
};
case AISemanticConventions.PROMPT_MESSAGES:
return {
...openInferenceAttributes,
...safelyGetInputMessageAttributes(attributes[convention]),
};
break;
case AISemanticConventions.EMBEDDING_TEXT:
case AISemanticConventions.EMBEDDING_TEXTS:
case AISemanticConventions.EMBEDDING_VECTOR:
case AISemanticConventions.EMBEDDING_VECTORS:
return {
...openInferenceAttributes,
...safelyGetEmbeddingAttributes({
attributeValue: attributes[convention],
OpenInferenceSemanticConventionKey: openInferenceKey,
}),
};
default:
return assertUnreachable(convention);
}
},
openInferenceAttributes,
);
};
|
/**
* Gets the OpenInference attributes associated with the span from the initial attributes
* @param attributesWithSpanKind the initial attributes of the span and the OpenInference span kind
* @param attributesWithSpanKind.attributes the initial attributes of the span
* @param attributesWithSpanKind.spanKind the OpenInference span kind
* @returns The OpenInference attributes associated with the span
*/
|
https://github.com/Arize-ai/openinference/blob/b151bc9a3f8243c846c2981ade94e3d2823602e7/js/packages/openinference-vercel/src/utils.ts#L366-L499
|
b151bc9a3f8243c846c2981ade94e3d2823602e7
|
liveview-svelte-pwa
|
github_2023
|
thisistonydang
|
typescript
|
isHTMLElement
|
function isHTMLElement(element: Element | HTMLElement): element is HTMLElement {
return element instanceof HTMLElement;
}
|
/**
* Type guard to check if an element is an HTMLElement.
*/
|
https://github.com/thisistonydang/liveview-svelte-pwa/blob/d501458f69f8279b5f84eddb2b9ccef9fd399664/assets/lib/actions/focusTrap.ts#L9-L11
|
d501458f69f8279b5f84eddb2b9ccef9fd399664
|
liveview-svelte-pwa
|
github_2023
|
thisistonydang
|
typescript
|
focusElement
|
function focusElement(element: Element) {
if (isHTMLElement(element)) {
element.focus();
} else {
console.error("Element is not an HTMLElement and cannot be focused.", element);
}
}
|
/**
* Focus an element or throw an error if it is not an HTMLElement.
*/
|
https://github.com/thisistonydang/liveview-svelte-pwa/blob/d501458f69f8279b5f84eddb2b9ccef9fd399664/assets/lib/actions/focusTrap.ts#L16-L22
|
d501458f69f8279b5f84eddb2b9ccef9fd399664
|
float32
|
github_2023
|
KevinZonda
|
typescript
|
reqStore.isLoading
|
public get isLoading() {
return this._isLoading
}
|
//region isLoading
|
https://github.com/KevinZonda/float32/blob/c06981616c71089e63c7e11f077b2c209c0f4b94/frontend/src/Store/ReqStore.ts#L28-L30
|
c06981616c71089e63c7e11f077b2c209c0f4b94
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
RegularPages
|
const RegularPages = ({ params }: { params: { regular: string } }) => {
const regularData = getSinglePage("pages");
const data = regularData.find(
(page: RegularPage) => page.slug === params.regular
);
if (!data) return notFound();
const { frontmatter, content } = data;
const { title, meta_title, description, image } = frontmatter;
const callToAction = getListPage("sections/call-to-action.md");
return (
<>
<SeoMeta
title={title}
meta_title={meta_title}
description={description}
image={image}
/>
<PageHeader title={title} />
<section className="section">
<div className="container">
<div className="content">
<MDXContent content={content} />
</div>
</div>
</section>
{/* <CallToAction data={callToAction} /> */}
</>
);
};
|
// For all regular pages
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/app/[regular]/page.tsx#L17-L48
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
readFile
|
const readFile = (filePath: string) => {
return fs.readFileSync(filePath, "utf-8");
};
|
// Helper function to read file content
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/lib/contentParser.ts#L9-L11
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
parseFrontmatter
|
const parseFrontmatter = (frontmatter: any) => {
const frontmatterString = JSON.stringify(frontmatter);
return JSON.parse(frontmatterString);
};
|
// Helper function to parse frontmatter
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/lib/contentParser.ts#L14-L17
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
readingTime
|
const readingTime = (content: string): string => {
const WPS = 275 / 60;
let images = 0;
const regex = /\w/;
let words = content.split(" ").filter((word) => {
if (word.includes("<img")) {
images += 1;
}
return regex.test(word);
}).length;
let imageAdjust = images * 4;
let imageSecs = 0;
let imageFactor = 12;
while (images) {
imageSecs += imageFactor;
if (imageFactor > 3) {
imageFactor -= 1;
}
images -= 1;
}
const minutes = Math.ceil(((words - imageAdjust) / WPS + imageSecs) / 60);
if (minutes < 10) {
if (minutes < 2) {
return "0" + minutes + ` Min read`;
} else {
return "0" + minutes + ` Mins read`;
}
} else {
return minutes + ` Mins read`;
}
};
|
// content reading
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/lib/utils/readingTime.ts#L2-L38
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
similerItems
|
const similerItems = (
currentItem: Post,
allItems: Post[],
slug: string,
): Post[] => {
let categories: string[] = [];
let tags: string[] = [];
// set categories
if (currentItem.frontmatter.categories.length > 0) {
categories = currentItem.frontmatter.categories;
}
// set tags
if (currentItem.frontmatter.tags.length > 0) {
tags = currentItem.frontmatter.tags;
}
// filter by categories
const filterByCategories = allItems.filter((item: any) =>
categories.find((category) =>
item.frontmatter.categories.includes(category),
),
);
// filter by tags
const filterByTags = allItems.filter((item: any) =>
tags.find((tag) => item.frontmatter.tags.includes(tag)),
);
// merged after filter
const mergedItems = [...new Set([...filterByCategories, ...filterByTags])];
// filter by slug
const filterBySlug = mergedItems.filter((product) => product.slug !== slug);
return filterBySlug;
};
|
// similer products
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/lib/utils/similarItems.ts#L4-L41
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
commerceplate
|
github_2023
|
zeon-studio
|
typescript
|
htmlEntityDecoder
|
const htmlEntityDecoder = (htmlWithEntities: string): string => {
let entityList: { [key: string]: string } = {
" ": " ",
"<": "<",
">": ">",
"&": "&",
""": '"',
"'": "'",
};
let htmlWithoutEntities: string = htmlWithEntities.replace(
/(&|<|>|"|')/g,
(entity: string): string => {
return entityList[entity];
},
);
return htmlWithoutEntities;
};
|
// strip entities for plainify
|
https://github.com/zeon-studio/commerceplate/blob/de740683efd10508e0270caf8d376b03fe9b1fed/src/lib/utils/textConverter.ts#L46-L62
|
de740683efd10508e0270caf8d376b03fe9b1fed
|
private-pdf
|
github_2023
|
photown
|
typescript
|
Controller.getPagesOverlappingOverlay
|
private getPagesOverlappingOverlay(
pages: NodeListOf<Element>,
draggableTopLeft: [number, number],
draggableBottomRight: [number, number],
pageCount: number
): Array<number> {
const pagesToInclude = [];
for (var i = 1; i <= pageCount; i++) {
const page = pages[i - 1] as HTMLElement;
const pageTopLeft = [page.offsetLeft, page.offsetTop];
const pageBottomRight = [
page.offsetLeft + page.offsetWidth,
page.offsetTop + page.offsetHeight,
];
if (
this.doRectsOverlap(
draggableTopLeft,
draggableBottomRight,
pageTopLeft,
pageBottomRight
)
) {
pagesToInclude.push(i);
}
}
return pagesToInclude;
}
|
// Returns a list of the page numbers for the pages that the draggable overlaps with.
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/Controller.ts#L435-L461
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
Controller.adjustTransformToPageRotation
|
private adjustTransformToPageRotation(
transform: Transform,
pdfPage: PdfPage
): void {
var drawX = null;
var drawY = null;
const pageRotation = pdfPage.getRotation();
const rotationRads = (pageRotation * Math.PI) / 180;
var dimensionWidth = pdfPage.getSize()[0];
var dimensionHeight = pdfPage.getSize()[1];
if (pageRotation == 90 || pageRotation == 270) {
const t = dimensionWidth;
dimensionWidth = dimensionHeight;
dimensionHeight = t;
}
if (pageRotation === 90) {
drawX =
transform.x * Math.cos(rotationRads) -
transform.y * Math.sin(rotationRads) +
dimensionWidth;
drawY =
transform.x * Math.sin(rotationRads) +
transform.y * Math.cos(rotationRads);
} else if (pageRotation === 180) {
drawX =
transform.x * Math.cos(rotationRads) -
transform.y * Math.sin(rotationRads) +
dimensionWidth;
drawY =
transform.x * Math.sin(rotationRads) +
transform.y * Math.cos(rotationRads) +
dimensionHeight;
} else if (pageRotation === 270) {
drawX =
transform.x * Math.cos(rotationRads) -
transform.y * Math.sin(rotationRads);
drawY =
transform.x * Math.sin(rotationRads) +
transform.y * Math.cos(rotationRads) +
dimensionHeight;
} else {
//no rotation
drawX = transform.x;
drawY = transform.y;
}
transform.x = drawX;
transform.y = drawY;
transform.rotation = pageRotation;
}
|
/** Applies a matrix transformation on the `Transform` object to be aligned with the `PdfPage`. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/Controller.ts#L543-L594
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
View.setOnContentScrollEventListener
|
public setOnContentScrollEventListener(
scrollEvent: (currentPage: number) => void
) {
const that = this;
(document.getElementById("content") as HTMLElement).addEventListener(
"scroll",
function () {
// Iterate through each element and check its position
const elements = document.querySelectorAll(".page");
var currentScrollPage = -1;
const content = that.content;
var minDist = Number.MAX_VALUE;
elements?.forEach((element, index) => {
const casted = element as HTMLElement;
const dist = Math.abs(
-content.scrollTop +
casted.offsetTop +
casted.offsetHeight / 2 -
content.offsetHeight / 2
);
if (minDist > dist) {
minDist = dist;
currentScrollPage = index + 1;
}
});
if (currentScrollPage != -1) {
scrollEvent(currentScrollPage);
}
}
);
}
|
/** Updates the page number wherever relevant (such as the thumbnails) as the user scrolls. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/View.ts#L109-L142
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
View.extractFormInputValues
|
public extractFormInputValues(): FormInputValues {
const that = this;
const formInputValues: FormInputValues = new FormInputValues();
const textInputElements = this.content.querySelectorAll(
':not(.draggable) > input[type="text"]'
);
textInputElements.forEach(function (inputElement) {
const casted = inputElement as HTMLInputElement;
formInputValues.textNameToValue.set(casted.name, casted.value);
});
const textAreaElements = this.content.querySelectorAll(
":not(.draggable) > textarea"
);
textAreaElements.forEach(function (textAreaElement) {
const casted = textAreaElement as HTMLTextAreaElement;
formInputValues.textNameToValue.set(casted.name, casted.value);
});
const checkboxInputElements = this.content.querySelectorAll(
':not(.draggable) > input[type="checkbox"]'
);
checkboxInputElements.forEach(function (inputElement) {
const casted = inputElement as HTMLInputElement;
formInputValues.checkboxNameToValue.set(casted.name, casted.checked);
});
const radioInputFields = this.content.querySelectorAll(
':not(.draggable) > input[type="radio"]'
);
const radioGroups: Set<string> = new Set();
radioInputFields.forEach(function (inputElement) {
const casted = inputElement as HTMLInputElement;
radioGroups.add(casted.name);
});
radioGroups.forEach(function (groupName) {
const radioButtons = Array.from(document.getElementsByName(groupName));
var selected = radioButtons.find(
(radioButton) => (radioButton as HTMLInputElement).checked
);
if (selected != null) {
// pdfjs doesn't necessarily add form fields in the same order as
// in the original PDF. Instead we rely on the zIndex which is in
// the correct order.
var minZIndex = that.calculateSmallestZIndex(
radioButtons.map((el) => el.parentElement as HTMLElement)
);
var adjustedIndex =
parseInt(
getComputedStyle(selected.parentElement as HTMLElement).zIndex
) - minZIndex;
formInputValues.radioGroupNameToSelectedIndex.set(
groupName,
adjustedIndex
);
}
});
const selectFields = this.content.querySelectorAll(
":not(.draggable) > select"
);
selectFields.forEach(function (selectElement) {
const casted = selectElement as HTMLSelectElement;
if (casted.size > 1) {
formInputValues.optionNameToSelectedIndex.set(
casted.name,
casted.selectedIndex
);
} else {
formInputValues.dropdownNameToSelectedIndex.set(
casted.name,
casted.selectedIndex
);
}
});
return formInputValues;
}
|
/** Iterates through all form fields in the PDF and returns them as a `FormInputValues` object. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/View.ts#L346-L424
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
View.setupDraggable
|
private setupDraggable(
draggableElement: HTMLElement,
numDraggables: number
): void {
let offsetX: number, offsetY: number;
const scrollTop = (document.getElementById("content") as HTMLElement)
.scrollTop;
draggableElement.style.left = 50 + (numDraggables - 1) * 10 + "px";
draggableElement.style.top =
scrollTop + (50 + (numDraggables - 1) * 10) + "px";
(draggableElement.querySelector(".options-delete") as HTMLElement).onclick =
function () {
draggableElement.remove();
};
const mouseDownListener = function (event: MouseEvent) {
offsetX = event.clientX - draggableElement.offsetLeft;
offsetY = event.clientY - draggableElement.offsetTop;
draggableElement.style.opacity = "0.7";
window.addEventListener("mousemove", mouseMoveListener);
window.addEventListener("mouseup", mouseUpListener);
};
const mouseMoveListener = function (event: MouseEvent) {
const x = event.clientX - offsetX;
const y = event.clientY - offsetY;
draggableElement.style.left = `${x}px`;
draggableElement.style.top = `${y}px`;
};
const mouseUpListener = function (event: MouseEvent) {
window.removeEventListener("mousemove", mouseMoveListener);
window.removeEventListener("mouseup", mouseUpListener);
draggableElement.style.opacity = "1";
};
const a = draggableElement.querySelector(".drag-handle") as HTMLElement;
a.addEventListener("mousedown", mouseDownListener);
draggableElement.addEventListener("focusin", function (event: FocusEvent) {
const targetElement: Element = event.target as Element;
const parent = targetElement.closest(".draggable");
if (
parent != draggableElement ||
draggableElement.classList.contains("focused")
) {
return;
}
draggableElement.classList.remove("unfocused");
draggableElement.classList.add("focused");
});
draggableElement.addEventListener("focusout", function (event: FocusEvent) {
if (draggableElement.classList.contains("unfocused")) {
return;
}
const newlyFocusedElement: Element = event.relatedTarget as Element;
if (newlyFocusedElement != null) {
const parent = newlyFocusedElement.closest(".draggable");
if (parent == draggableElement) {
return;
}
}
draggableElement.classList.remove("focused");
draggableElement.classList.add("unfocused");
});
}
|
/** Sets up the draggable overlay's button options and dragging. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/View.ts#L669-L737
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
PdfDocumentSaver.applyChangesAndSave
|
public async applyChangesAndSave(
originalPdfBytes: Uint8Array,
formInputValues: FormInputValues,
overlays: Overlays,
rotateBy: number
): Promise<Uint8Array> {
const pdfDoc = await PDFDocument.load(originalPdfBytes);
this.populateFormValues(formInputValues, pdfDoc.getForm());
const neededFonts: Map<string, PDFFont> = await this.getNeededFonts(
overlays,
pdfDoc
);
const helveticaFont = await pdfDoc.embedFont(StandardFonts.Helvetica);
const base64ToPdfImageMap = await this.embedImages(overlays, pdfDoc);
for (const [pageNumber, pageOverlays] of overlays.pagesOverlays) {
const page = pdfDoc.getPage(pageNumber - 1); // in pdflib pages are 0-based
this.doTextDrawing(pageOverlays, page, neededFonts, helveticaFont);
this.doImageDrawing(pageOverlays, page, base64ToPdfImageMap);
}
this.rotatePages(pdfDoc, rotateBy);
return pdfDoc.save();
}
|
/** Returns a `Uint8Array` which represents a PDF with all user-made changes applied, such as filled PDF forms, added overlays, page rotations. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/pdf/PdfDocumentSaver.ts#L22-L48
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
PdfDocumentSaver.populateFormValues
|
private populateFormValues(formInputValues: FormInputValues, form: PDFForm) {
for (const [key, value] of formInputValues.textNameToValue) {
try {
form.getTextField(key).setText(value);
} catch (error) {
console.log(
`Error thrown while updating the text field value of ${key} with ${value}.`,
error
);
}
}
for (const [key, value] of formInputValues.checkboxNameToValue) {
try {
if (value) {
form.getCheckBox(key).check();
} else {
form.getCheckBox(key).uncheck();
}
} catch (error) {
console.log(
`Error thrown while updating the checkbox value of ${key} with ${value}.`,
error
);
}
}
for (const [key, value] of formInputValues.dropdownNameToSelectedIndex) {
try {
const dropdown = form.getDropdown(key);
const options = dropdown.getOptions();
if (value < 0 || value >= options.length) {
continue;
}
dropdown.select(options[value]);
} catch (error) {
console.log(
`Error thrown while updating the dropdown value of ${key} with ${value}.`,
error
);
}
}
for (const [key, value] of formInputValues.optionNameToSelectedIndex) {
try {
const optionsList = form.getOptionList(key);
const options = optionsList.getOptions();
if (value < 0 || value >= options.length) {
continue;
}
optionsList.select(options[value]);
} catch (error) {
console.log(
`Error thrown while updating the options list value of ${key} with ${value}.`,
error
);
}
}
for (const [key, value] of formInputValues.radioGroupNameToSelectedIndex) {
try {
const radioGroup = form.getRadioGroup(key);
const options = radioGroup.getOptions();
if (value < 0 || value >= options.length) {
continue;
}
radioGroup.select(options[value]);
} catch (error) {
console.log(
`Error thrown while updating the radio group value of ${key} with ${value}.`,
error
);
}
}
}
|
/** Populates the form fields in the PDF with any changes the user has done. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/pdf/PdfDocumentSaver.ts#L134-L208
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
PdfDocumentSaver.getNeededFonts
|
private async getNeededFonts(overlays: Overlays, pdfDoc: PDFDocument) {
const fontValues: string[] = Object.values(StandardFonts);
const neededFonts: Map<string, PDFFont> = new Map();
for (const pageOverlays of overlays.pagesOverlays.values()) {
for (const textOverlay of pageOverlays.textOverlays) {
if (
fontValues.indexOf(textOverlay.fontFamily) != -1 &&
!neededFonts.has(textOverlay.fontFamily)
) {
const pdfFont = await pdfDoc.embedFont(textOverlay.fontFamily);
neededFonts.set(textOverlay.fontFamily, pdfFont);
}
}
}
return neededFonts;
}
|
/** Returns any fonts that need to be embedded into the PDF. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/pdf/PdfDocumentSaver.ts#L211-L226
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
ColorUtils.parseRgb
|
public static parseRgb(rgbString: string): RGB | null {
const match = rgbString.match(/(\d+), (\d+), (\d+)/);
if (match) {
const [, red, green, blue] = match;
return {
red: parseInt(red, 10),
green: parseInt(green, 10),
blue: parseInt(blue, 10),
};
}
return null;
}
|
/** Decomposes strings like "rgb(2, 255, 0)" into its components. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/utils/ColorUtils.ts#L11-L24
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
private-pdf
|
github_2023
|
photown
|
typescript
|
ColorUtils.normalize
|
public static normalize(rgb: RGB): RGB {
return {
red: rgb.red / 255,
blue: rgb.blue / 255,
green: rgb.green / 255,
};
}
|
/** Converts the components of this color to be in the range [0, 1]. */
|
https://github.com/photown/private-pdf/blob/dedb3608c6881cd84f0c5ff960896c27f5af9f7c/src/utils/ColorUtils.ts#L27-L33
|
dedb3608c6881cd84f0c5ff960896c27f5af9f7c
|
vagometro
|
github_2023
|
leo-holanda
|
typescript
|
SearchResultsComponent.filterJobsBySearchData
|
private filterJobsBySearchData = (jobs: Job[]): Job[] => {
const searchData = this.easySearchService.getSearchData();
if (!searchData) return jobs;
return jobs.filter((job) => {
if (searchData.experienceLevels.length > 0) {
const hasExperienceLevel = job.experienceLevels.some((experienceLevel) =>
searchData.experienceLevels.includes(experienceLevel),
);
if (!hasExperienceLevel) return false;
}
if (searchData.workplaceTypes.length > 0) {
const hasWorkPlaceType = job.workplaceTypes.some((workplaceType) =>
searchData.workplaceTypes.includes(workplaceType),
);
if (!hasWorkPlaceType) return false;
}
if (searchData.keywords.length > 0) {
const hasKeywords = job.keywords.some((keyword) => {
const keywordsName = searchData.keywords.map((keyword) => keyword.name);
return keywordsName.includes(keyword.name);
});
if (!hasKeywords) return false;
}
if (searchData.contractTypes.length > 0) {
const hasContractTypes = job.contractTypes.some((contractType) =>
searchData.contractTypes.includes(contractType),
);
if (!hasContractTypes) return false;
}
if (searchData.inclusionTypes.length > 0) {
const hasInclusionTypes = job.inclusionTypes.some((inclusionType) =>
searchData.inclusionTypes.includes(inclusionType),
);
if (!hasInclusionTypes) return false;
}
return true;
});
}
|
//TODO: Move this to jobService
|
https://github.com/leo-holanda/vagometro/blob/d150ce5f079935957520dd50db1858da506cacdb/frontend/src/app/job/easy-search/search-results/search-results.component.ts
|
d150ce5f079935957520dd50db1858da506cacdb
|
vagometro
|
github_2023
|
leo-holanda
|
typescript
|
ChartService.getHalfYearlyMovingAverage
|
getHalfYearlyMovingAverage(
jobs$: Observable<Job[] | undefined> = this.jobService.jobs$,
): Observable<ShortTermSeriesData[]> {
return this.getDailyPostingsSeries(jobs$).pipe(
map((series) => this.splitInGroups(series, 182)),
map(this.mapToWeeklyMovingAverageSeries),
);
}
|
//TODO: Implement tthe split by half year
|
https://github.com/leo-holanda/vagometro/blob/d150ce5f079935957520dd50db1858da506cacdb/frontend/src/app/statistics/charts/chart.service.ts#L79-L86
|
d150ce5f079935957520dd50db1858da506cacdb
|
vagometro
|
github_2023
|
leo-holanda
|
typescript
|
ChartService.splitInWeeks
|
private splitInWeeks(series: ShortTermSeriesData[]): ShortTermSeriesData[][] {
const firstDataEntryDate = new Date(series[0].value[0].getTime());
const dayOfWeek = firstDataEntryDate.getDay();
const differenceFromWeekFirstDay = 7 - dayOfWeek;
const howManyDaysToSlice = differenceFromWeekFirstDay + 1;
const weeks: ShortTermSeriesData[][] = [];
if (howManyDaysToSlice > 0) weeks.push([{ value: [firstDataEntryDate, 0] }]);
weeks.push(series.slice(0, howManyDaysToSlice));
for (let i = howManyDaysToSlice; i < series.length; i += 7) {
weeks.push(series.slice(i, i + 7));
}
return weeks;
}
|
//TODO: Move all the moving average logic to statistics service and just do the map here
|
https://github.com/leo-holanda/vagometro/blob/d150ce5f079935957520dd50db1858da506cacdb/frontend/src/app/statistics/charts/chart.service.ts#L254-L270
|
d150ce5f079935957520dd50db1858da506cacdb
|
vagometro
|
github_2023
|
leo-holanda
|
typescript
|
MapDataService.constructor
|
constructor(private jobService: JobService) {
this.mapGeoJson = topojson.feature(
brazilTopoJson as unknown as TopoJSON.Topology,
brazilTopoJson.objects.uf as unknown as TopoJSON.GeometryCollection,
);
}
|
//TODO: Set the correct type
|
https://github.com/leo-holanda/vagometro/blob/d150ce5f079935957520dd50db1858da506cacdb/frontend/src/app/statistics/maps/map-data.service.ts#L15-L20
|
d150ce5f079935957520dd50db1858da506cacdb
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
printSuccess
|
function printSuccess(data: any, showDonation?: boolean) {
console.log(JSON.stringify(data, null, 2));
if (!showDonation) {
return;
}
if (process.env.DISABLE_DONATE_QUOTE && process.env.DISABLE_DONATE_QUOTE === 'true') {
return;
}
console.log(chalk.blue("\n\n------------------------------------------------------------------------------"));
let q = 'Recommend to your children virtue; that alone can make them happy, not gold.';
let by = 'Ludwig van Beethoven';
try {
const quoteObj = quotes.getTodaysQuote();
q = quoteObj.body;
by = quoteObj.by;
} catch (ex) {
// Lib not installed
}
console.log(chalk.green(q));
console.log(chalk.green('- ' + by));
console.log(chalk.blue("------------------------------------------------------------------------------\n"))
const donate = 'bc1pe608upsgh9dc3ywv0gp8vuhqa8rmct7v6m3qm20qk3vw6lktp03qrgh3aq';
console.log('Thank you for your support and contributions to Atomicals CLI development! β€οΈ');
console.log(`Donation address: ${donate}\n`);
console.log(`Even a little goes a long way!\n`);
console.log(`Scan QR Code to Donate:`);
qrcode.generate(donate, { small: true });
}
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/cli.ts#L23-L52
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
resolveRealmAction
|
const resolveRealmAction = async (realm_or_subrealm, options) => {
try {
const config: ConfigurationInterface = validateCliInputs();
const atomicals = new Atomicals(ElectrumApi.createClient(process.env.ELECTRUMX_PROXY_BASE_URL || ''));
const result: any = await atomicals.getAtomicalByRealm(realm_or_subrealm);
console.log(JSON.stringify(result, null, 2));
} catch (error) {
console.log(error);
}
}
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/cli.ts#L646-L655
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.getDataObjectFromStringTypeHints
|
static async getDataObjectFromStringTypeHints(fieldTypeHints: string[]) {
return prepareFilesDataAsObject(fieldTypeHints);
}
|
/**
* For each array element do:
*
* - determine if it's a file, or a file with an alias, or a scalar/json object type
*
* @param fieldTypeHints The type hint string array
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L347-L349
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.addInputUtxo
|
addInputUtxo(utxoPartial: IInputUtxoPartial, wif: string) {
const keypairInput = ECPair.fromWIF(wif);
const keypairInputInfo = getKeypairInfo(keypairInput);
this.inputUtxos.push({
utxo: utxoPartial,
keypairInfo: keypairInputInfo,
});
}
|
/**
*
* @param utxoPartial The UTXO to spend in the constructed tx
* @param wif The signing WIF key for the utxo
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L450-L457
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.setInputParent
|
setInputParent(input: ParentInputAtomical) {
// Validate the parentId is an atomical id in compact form
if (!isAtomicalId(input.parentId)) {
throw new Error("Invalid parent atomical id: " + input.parentId);
}
this.parentInputAtomical = input;
}
|
/**
* Set an input parent for linking with $parent reference of the operation to an input spend
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L462-L468
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.addOutput
|
addOutput(output: { address: string; value: number }) {
this.additionalOutputs.push(output);
}
|
/**
* Additional output to add, to be used with addInputUtxo normally
* @param output Output to add
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L481-L483
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
stopAllWorkers
|
const stopAllWorkers = () => {
workers.forEach((worker) => {
worker.terminate();
});
workers = [];
};
|
// Function to stop all worker threads
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L667-L672
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
customFinalizer
|
const customFinalizer = (_inputIndex: number, input: any) => {
const scriptSolution = [input.tapScriptSig[0].signature];
const witness = scriptSolution
.concat(tapLeafScript.script)
.concat(tapLeafScript.controlBlock);
return {
finalScriptWitness: witnessStackToScriptWitness(witness),
};
};
|
// We have to construct our witness script in a custom finalizer
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L947-L955
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.calculateFeesRequiredForAccumulatedCommitAndReveal
|
calculateFeesRequiredForAccumulatedCommitAndReveal(
hashLockP2TROutputLen: number = 0
): FeeCalculations {
const revealFee = this.calculateAmountRequiredForReveal(
hashLockP2TROutputLen
);
const commitFee = this.calculateFeesRequiredForCommit();
const commitAndRevealFee = commitFee + revealFee;
const commitAndRevealFeePlusOutputs =
commitFee + revealFee + this.totalOutputSum();
const revealFeePlusOutputs = revealFee + this.totalOutputSum();
const ret = {
commitAndRevealFee,
commitAndRevealFeePlusOutputs,
revealFeePlusOutputs,
commitFeeOnly: commitFee,
revealFeeOnly: revealFee,
};
return ret;
}
|
/**
* Get the commit and reveal fee. The commit fee assumes it is chained together
* @returns
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L1152-L1171
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
atomicals-js
|
github_2023
|
danieleth2
|
typescript
|
AtomicalOperationBuilder.addRevealOutputIfChangeRequired
|
addRevealOutputIfChangeRequired(
totalInputsValue: number,
totalOutputsValue: number,
revealFee: number,
address: string
) {
const currentSatoshisFeePlanned = totalInputsValue - totalOutputsValue;
// It will be invalid, but at least we know we don't need to add change
if (currentSatoshisFeePlanned <= 0) {
return;
}
// In order to keep the fee-rate unchanged, we should add extra fee for the new added change output.
const excessSatoshisFound =
currentSatoshisFeePlanned -
revealFee -
(this.options.satsbyte as any) * OUTPUT_BYTES_BASE;
// There were no excess satoshis, therefore no change is due
if (excessSatoshisFound <= 0) {
return;
}
// There were some excess satoshis, but let's verify that it meets the dust threshold to make change
if (excessSatoshisFound >= DUST_AMOUNT) {
this.addOutput({
address: address,
value: excessSatoshisFound,
});
}
}
|
/**
* Adds an extra output at the end if it was detected there would be excess satoshis for the reveal transaction
* @param fee Fee calculations
* @returns
*/
|
https://github.com/danieleth2/atomicals-js/blob/02e854cc71c0f6c6559ff35c2093dc8d526b5d72/lib/utils/atomical-operation-builder.ts#L1178-L1205
|
02e854cc71c0f6c6559ff35c2093dc8d526b5d72
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.