diff --git a/application/bun.lockb b/application/bun.lockb
index 160304d..6b3c354 100644
Binary files a/application/bun.lockb and b/application/bun.lockb differ
diff --git a/application/src/api/realtime/index.ts b/application/src/api/realtime/index.ts
index e114a68..0c62e43 100644
--- a/application/src/api/realtime/index.ts
+++ b/application/src/api/realtime/index.ts
@@ -169,6 +169,117 @@ export default async function handler(req) {
description: "Signal message sent successfully (simulated)"
}
};
+ }
+ // Handle WeChat Work (WeCom) notifications
+ else if (type === "wecom") {
+ const { webhookUrl, message } = body;
+
+ if (!webhookUrl || !message) {
+ console.error("Missing required parameters for WeChat Work notification", {
+ hasWebhookUrl: !!webhookUrl,
+ hasMessage: !!message
+ });
+
+ return {
+ status: 400,
+ json: {
+ ok: false,
+ error_code: 400,
+ description: "Missing required WeChat Work parameters"
+ }
+ };
+ }
+
+ try {
+ console.log("Attempting to call WeChat Work webhook API");
+ console.log("Calling WeChat Work webhook URL: [REDACTED]");
+
+ // Parse the message to get the JSON payload
+ let messagePayload;
+ try {
+ messagePayload = JSON.parse(message);
+ } catch (e) {
+ console.error("Error parsing WeChat Work message payload:", e);
+ return {
+ status: 400,
+ json: {
+ ok: false,
+ error_code: 400,
+ description: "Invalid WeChat Work message format"
+ }
+ };
+ }
+
+ const response = await fetch(webhookUrl, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(messagePayload),
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ console.error(`WeChat Work API error (${response.status}):`, errorText);
+
+ try {
+ // Try to parse error as JSON if possible
+ const errorJson = JSON.parse(errorText);
+ return {
+ status: response.status,
+ json: errorJson
+ };
+ } catch (e) {
+ // If parsing fails, return the raw error
+ return {
+ status: response.status,
+ json: {
+ ok: false,
+ error_code: response.status,
+ description: `WeChat Work API error: ${errorText}`
+ }
+ };
+ }
+ }
+
+ const result = await response.json();
+ console.log("WeChat Work API response:", JSON.stringify(result, null, 2));
+
+ // WeChat Work API returns errcode: 0 for success
+ if (result.errcode !== 0) {
+ console.error("WeChat Work API error:", result);
+ return {
+ status: 400,
+ json: {
+ ok: false,
+ error_code: result.errcode,
+ description: result.errmsg || "Unknown WeChat Work API error"
+ }
+ };
+ }
+
+ console.log("Successfully sent message to WeChat Work!");
+ return {
+ status: 200,
+ json: {
+ ok: true,
+ result: result,
+ description: "Message sent successfully to WeChat Work"
+ }
+ };
+ } catch (error) {
+ console.error("Error calling WeChat Work API:", error);
+
+ // Return detailed error information
+ return {
+ status: 500,
+ json: {
+ ok: false,
+ error_code: 500,
+ description: `Error sending WeChat Work message: ${error instanceof Error ? error.message : "Unknown error"}`
+ }
+ };
+ }
} else {
// Return error for unsupported notification type
console.error("Unsupported notification type:", type);
diff --git a/application/src/components/dashboard/Header.tsx b/application/src/components/dashboard/Header.tsx
index c937d2e..deeded4 100644
--- a/application/src/components/dashboard/Header.tsx
+++ b/application/src/components/dashboard/Header.tsx
@@ -132,6 +132,9 @@ export const Header = ({
setLanguage("ja")} className={language === "ja" ? "bg-accent" : ""}>
{t("japanese")}
+ setLanguage("zh-CN")} className={language === "zh-CN" ? "bg-accent" : ""}>
+ {t("simplifiedChinese")}
+
diff --git a/application/src/components/settings/notification-settings/NotificationChannelDialog.tsx b/application/src/components/settings/notification-settings/NotificationChannelDialog.tsx
index bf84cae..ad820f5 100644
--- a/application/src/components/settings/notification-settings/NotificationChannelDialog.tsx
+++ b/application/src/components/settings/notification-settings/NotificationChannelDialog.tsx
@@ -1,5 +1,5 @@
-import React, { useEffect } from "react";
+import React, { useEffect, useState } from "react";
import {
Dialog,
DialogContent,
@@ -24,7 +24,9 @@ import {
FormMessage,
} from "@/components/ui/form";
import { Switch } from "@/components/ui/switch";
-import { Loader2 } from "lucide-react";
+import { Loader2, MessageSquare } from "lucide-react";
+import TestWecomDialog from "./TestWecomDialog";
+import { useLanguage } from "@/contexts/LanguageContext";
interface NotificationChannelDialogProps {
open: boolean;
@@ -34,7 +36,7 @@ interface NotificationChannelDialogProps {
const baseSchema = z.object({
notify_name: z.string().min(1, "Name is required"),
- notification_type: z.enum(["telegram", "discord", "slack", "signal", "email"]),
+ notification_type: z.enum(["telegram", "discord", "slack", "signal", "email", "wecom"]),
enabled: z.boolean().default(true),
service_id: z.string().default("global"), // Assuming global for now, could be linked to specific services
template_id: z.string().optional(),
@@ -66,12 +68,18 @@ const emailSchema = baseSchema.extend({
// Email specific fields could be added here
});
+const wecomSchema = baseSchema.extend({
+ notification_type: z.literal("wecom"),
+ wecom_webhook_url: z.string().url("Must be a valid URL"),
+});
+
const formSchema = z.discriminatedUnion("notification_type", [
telegramSchema,
discordSchema,
slackSchema,
signalSchema,
- emailSchema
+ emailSchema,
+ wecomSchema
]);
type FormValues = z.infer;
@@ -96,6 +104,8 @@ export const NotificationChannelDialog = ({
const { watch, reset } = form;
const notificationType = watch("notification_type");
const [isSubmitting, setIsSubmitting] = React.useState(false);
+ const [showTestWecomDialog, setShowTestWecomDialog] = useState(false);
+ const { language } = useLanguage();
useEffect(() => {
if (editingConfig) {
@@ -224,6 +234,14 @@ export const NotificationChannelDialog = ({
Email
+
+
+
+
+
+ {language === "zh-CN" ? "企业微信" : "Wecom"}
+
+
@@ -311,6 +329,38 @@ export const NotificationChannelDialog = ({
/>
)}
+ {notificationType === "wecom" && (
+ <>
+ (
+
+ Webhook URL
+
+
+
+
+ {language === "zh-CN" ? "在企业微信群聊中添加机器人,获取Webhook URL" : "Add a bot in Wecom group chat to get the Webhook URL"}
+
+
+
+ )}
+ />
+ {isEditing && editingConfig && editingConfig.wecom_webhook_url && (
+
+ )}
+ >
+ )}
+
+
+ {/* 测试企业微信通知对话框 */}
+
);
};
diff --git a/application/src/components/settings/notification-settings/NotificationChannelList.tsx b/application/src/components/settings/notification-settings/NotificationChannelList.tsx
index 539805f..4710f71 100644
--- a/application/src/components/settings/notification-settings/NotificationChannelList.tsx
+++ b/application/src/components/settings/notification-settings/NotificationChannelList.tsx
@@ -13,6 +13,7 @@ import { Switch } from "@/components/ui/switch";
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import { alertConfigService } from "@/services/alertConfigService";
+import { useLanguage } from "@/contexts/LanguageContext";
interface NotificationChannelListProps {
channels: AlertConfiguration[];
@@ -25,6 +26,7 @@ export const NotificationChannelList = ({
onEdit,
onDelete
}: NotificationChannelListProps) => {
+ const { language } = useLanguage();
const toggleEnabled = async (config: AlertConfiguration) => {
if (!config.id) return;
@@ -43,6 +45,7 @@ export const NotificationChannelList = ({
case "slack": return "Slack";
case "signal": return "Signal";
case "email": return "Email";
+ case "wecom": return language === "zh-CN" ? "企业微信" : "Wecom";
default: return type;
}
};
diff --git a/application/src/components/settings/notification-settings/NotificationSettings.tsx b/application/src/components/settings/notification-settings/NotificationSettings.tsx
index 22d7949..5e1a47c 100644
--- a/application/src/components/settings/notification-settings/NotificationSettings.tsx
+++ b/application/src/components/settings/notification-settings/NotificationSettings.tsx
@@ -7,6 +7,7 @@ import { Plus, Loader2 } from "lucide-react";
import { AlertConfiguration, alertConfigService } from "@/services/alertConfigService";
import { NotificationChannelDialog } from "./NotificationChannelDialog";
import { NotificationChannelList } from "./NotificationChannelList";
+import { useLanguage } from "@/contexts/LanguageContext";
const NotificationSettings = () => {
const [isLoading, setIsLoading] = useState(true);
@@ -14,6 +15,7 @@ const NotificationSettings = () => {
const [dialogOpen, setDialogOpen] = useState(false);
const [currentTab, setCurrentTab] = useState("all");
const [editingConfig, setEditingConfig] = useState(null);
+ const { language } = useLanguage();
const fetchAlertConfigurations = async () => {
setIsLoading(true);
@@ -87,6 +89,7 @@ const NotificationSettings = () => {
SlackSignalEmail
+ {language === "zh-CN" ? "企业微信" : "Wecom"}
diff --git a/application/src/components/settings/notification-settings/TestWecomDialog.tsx b/application/src/components/settings/notification-settings/TestWecomDialog.tsx
new file mode 100644
index 0000000..bbe8d94
--- /dev/null
+++ b/application/src/components/settings/notification-settings/TestWecomDialog.tsx
@@ -0,0 +1,190 @@
+import React, { useState } from 'react';
+import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogFooter } from "@/components/ui/dialog";
+import { Button } from "@/components/ui/button";
+import { Input } from "@/components/ui/input";
+import { Label } from "@/components/ui/label";
+import { X, AlertCircle, CheckCircle, Loader2, MessageSquare } from "lucide-react";
+import { toast } from "@/hooks/use-toast";
+import { Alert, AlertDescription } from "@/components/ui/alert";
+import { AlertConfiguration } from "@/services/alertConfigService";
+import { testSendWecomMessage } from "@/services/notification/wecomService";
+import { useLanguage } from "@/contexts/LanguageContext";
+
+interface TestWecomDialogProps {
+ open: boolean;
+ onOpenChange: (open: boolean) => void;
+ config: AlertConfiguration | null;
+}
+
+const TestWecomDialog: React.FC = ({
+ open,
+ onOpenChange,
+ config
+}) => {
+ const { language } = useLanguage();
+ const [serviceName, setServiceName] = useState(language === "zh-CN" ? '测试服务' : 'Test Service');
+ const [isTesting, setIsTesting] = useState(false);
+ const [lastResult, setLastResult] = useState<{ success: boolean; message: string } | null>(null);
+
+ const handleSend = async () => {
+ if (!config) {
+ toast({
+ title: language === "zh-CN" ? "配置错误" : "Configuration Error",
+ description: language === "zh-CN" ? "无法获取企业微信配置信息" : "Unable to get Wecom configuration",
+ variant: "destructive",
+ });
+ return;
+ }
+
+ if (!config.wecom_webhook_url) {
+ toast({
+ title: language === "zh-CN" ? "配置错误" : "Configuration Error",
+ description: language === "zh-CN" ? "请先设置企业微信 Webhook URL" : "Please set Wecom Webhook URL first",
+ variant: "destructive",
+ });
+ return;
+ }
+
+ try {
+ setLastResult(null);
+ setIsTesting(true);
+
+ console.log(language === "zh-CN" ? '发送企业微信测试消息:' : 'Sending Wecom test message:', {
+ serviceName,
+ webhookUrl: config.wecom_webhook_url ? (language === "zh-CN" ? "[已隐藏]" : "[hidden]") : undefined
+ });
+
+ const result = await testSendWecomMessage(config, serviceName);
+
+ if (result) {
+ setLastResult({
+ success: true,
+ message: language === "zh-CN" ? `测试消息已成功发送到企业微信` : `Test message has been successfully sent to Wecom`
+ });
+
+ toast({
+ title: language === "zh-CN" ? "发送成功" : "Send Success",
+ description: language === "zh-CN" ? `测试消息已成功发送到企业微信` : `Test message has been successfully sent to Wecom`,
+ variant: "default",
+ });
+ } else {
+ setLastResult({
+ success: false,
+ message: language === "zh-CN" ? "发送测试消息失败,请检查配置和网络连接" : "Failed to send test message, please check configuration and network connection"
+ });
+
+ toast({
+ title: language === "zh-CN" ? "发送失败" : "Send Failed",
+ description: language === "zh-CN" ? "发送测试消息失败,请检查配置和网络连接" : "Failed to send test message, please check configuration and network connection",
+ variant: "destructive",
+ });
+ }
+ } catch (error) {
+ console.error(language === "zh-CN" ? '发送企业微信测试消息出错:' : 'Error sending Wecom test message:', error);
+ const errorMessage = error instanceof Error ? error.message : (language === "zh-CN" ? "发送测试消息失败" : "Failed to send test message");
+
+ setLastResult({
+ success: false,
+ message: errorMessage
+ });
+
+ toast({
+ title: language === "zh-CN" ? "错误" : "Error",
+ description: errorMessage,
+ variant: "destructive",
+ });
+ } finally {
+ setIsTesting(false);
+ }
+ };
+
+ const handleClose = () => {
+ onOpenChange(false);
+ // 重置表单但保留上次结果以供参考
+ setServiceName(language === "zh-CN" ? '测试服务' : 'Test Service');
+ // 不立即重置lastResult,以便用户查看结果
+ setTimeout(() => setLastResult(null), 300);
+ };
+
+ return (
+
+ );
+};
+
+export default TestWecomDialog;
\ No newline at end of file
diff --git a/application/src/services/alertConfigService.ts b/application/src/services/alertConfigService.ts
index 51c07e9..0cf7357 100644
--- a/application/src/services/alertConfigService.ts
+++ b/application/src/services/alertConfigService.ts
@@ -7,7 +7,7 @@ export interface AlertConfiguration {
collectionId?: string;
collectionName?: string;
service_id: string;
- notification_type: "telegram" | "discord" | "signal" | "slack" | "email";
+ notification_type: "telegram" | "discord" | "signal" | "slack" | "email" | "wecom";
telegram_chat_id?: string;
discord_webhook_url?: string;
signal_number?: string;
@@ -15,6 +15,7 @@ export interface AlertConfiguration {
bot_token?: string;
template_id?: string;
slack_webhook_url?: string;
+ wecom_webhook_url?: string;
enabled: boolean;
created?: string;
updated?: string;
diff --git a/application/src/services/notification/wecomService.ts b/application/src/services/notification/wecomService.ts
new file mode 100644
index 0000000..3a6fcce
--- /dev/null
+++ b/application/src/services/notification/wecomService.ts
@@ -0,0 +1,229 @@
+import { toast } from "@/hooks/use-toast";
+import { AlertConfiguration } from "../alertConfigService";
+import api from "@/api";
+import { useLanguage } from "@/contexts/LanguageContext";
+
+// 获取当前语言环境
+let currentLanguage = "en";
+try {
+ // 尝试从localStorage获取语言设置
+ const storedLanguage = typeof window !== 'undefined' ? localStorage.getItem('language') : null;
+ currentLanguage = storedLanguage || "en";
+} catch (e) {
+ console.error("Error getting language from localStorage:", e);
+}
+
+/**
+ * Send a notification via WeCom
+ */
+export async function sendWecomNotification(
+ config: AlertConfiguration,
+ message: string,
+ status: string
+): Promise {
+ try {
+ console.log("====== WECOM NOTIFICATION ATTEMPT ======");
+ console.log("Config:", JSON.stringify({
+ ...config,
+ notify_name: config.notify_name,
+ wecom_webhook_url: config.wecom_webhook_url ? "[REDACTED]" : undefined,
+ enabled: config.enabled
+ }, null, 2));
+
+ // Use provided webhook URL
+ const webhookUrl = config.wecom_webhook_url;
+
+ if (!webhookUrl) {
+ console.error("Missing WeChat Work configuration - Webhook URL:", webhookUrl);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "配置错误" : "Configuration Error",
+ description: isZhCN ? "缺少企业微信 Webhook URL" : "Missing Wecom webhook URL",
+ variant: "destructive"
+ });
+ return false;
+ }
+
+ console.log("Sending WeChat Work notification to webhook URL");
+ console.log("Message content:", message);
+
+ // Format message for WeChat Work Markdown format
+ const formattedMessage = formatWecomMarkdownMessage(message, status);
+
+ // Prepare payload for the API call
+ const payload = {
+ type: "wecom",
+ webhookUrl: webhookUrl,
+ message: formattedMessage
+ };
+
+ console.log("Prepared payload for notification:", {
+ ...payload,
+ webhookUrl: "[REDACTED]"
+ });
+
+ try {
+ // Call our client-side API handler
+ console.log("Sending request to /api/realtime endpoint");
+ const response = await api.handleRequest('/api/realtime', 'POST', payload);
+
+ console.log("API response status:", response.status);
+ console.log("API response data:", JSON.stringify(response.json, null, 2));
+
+ // Check if response is ok
+ if (response.status !== 200) {
+ console.error("Error response from notification API:", response.status);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "通知失败" : "Notification Failed",
+ description: isZhCN
+ ? `服务器返回错误 ${response.status}: ${response.json?.description || "未知错误"}`
+ : `Server returned error ${response.status}: ${response.json?.description || "Unknown error"}`,
+ variant: "destructive"
+ });
+ return false;
+ }
+
+ const responseData = response.json;
+
+ if (responseData && responseData.ok === false) {
+ console.error("Error sending notification:", responseData);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "通知失败" : "Notification Failed",
+ description: responseData.description || (isZhCN ? "发送通知失败" : "Failed to send notification"),
+ variant: "destructive"
+ });
+ return false;
+ }
+
+ console.log("Notification sent successfully");
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "通知已发送" : "Notification Sent",
+ description: isZhCN ? "企业微信通知已成功发送" : "Wecom notification sent successfully"
+ });
+ return true;
+ } catch (error) {
+ console.error("Error calling notification API:", error);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "API 错误" : "API Error",
+ description: isZhCN
+ ? `与通知服务通信失败: ${error instanceof Error ? error.message : "网络错误"}`
+ : `Failed to communicate with notification service: ${error instanceof Error ? error.message : "Network error"}`,
+ variant: "destructive"
+ });
+ return false;
+ }
+ } catch (error) {
+ console.error("Error in sendWecomNotification:", error);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "通知错误" : "Notification Error",
+ description: isZhCN
+ ? `发送企业微信通知时出错: ${error instanceof Error ? error.message : "未知错误"}`
+ : `Error sending Wecom notification: ${error instanceof Error ? error.message : "Unknown error"}`,
+ variant: "destructive"
+ });
+ return false;
+ }
+}
+
+/**
+ * Format a message for WeChat Work Markdown format
+ *
+ * @param message The original message
+ * @param status The service status (up, down, warning, etc.)
+ * @returns Formatted markdown message for WeChat Work
+ */
+function formatWecomMarkdownMessage(message: string, status: string): string {
+ // Replace newlines with markdown line breaks
+ let formattedMessage = message.replace(/\n/g, '\n');
+
+ // Extract service name and other details for better formatting
+ const serviceNameMatch = message.match(/Service ([^\s]+) is/);
+ const serviceName = serviceNameMatch ? serviceNameMatch[1] : "Unknown";
+
+ // Extract response time if available
+ const responseTimeMatch = message.match(/Response time: ([\d.]+)ms/);
+ const responseTime = responseTimeMatch ? responseTimeMatch[1] + "ms" : "N/A";
+
+ // Extract URL if available
+ const urlMatch = message.match(/URL: ([^\s\n]+)/);
+ const url = urlMatch ? urlMatch[1] : "N/A";
+
+ // Format the message with markdown based on language
+ const isZhCN = currentLanguage === "zh-CN";
+
+ const markdownMessage = {
+ msgtype: "markdown",
+ markdown: {
+ content: `## ${isZhCN ? "服务状态通知" : "Service Status Notification"}\n\n` +
+ `**${isZhCN ? "服务名称" : "Service Name"}**: ${serviceName}\n` +
+ `**${isZhCN ? "当前状态" : "Current Status"}**: ${status.toUpperCase()}\n` +
+ `**${isZhCN ? "响应时间" : "Response Time"}**: **${responseTime}**\n` +
+ `**URL**: **${url}**\n` +
+ `**${isZhCN ? "详细信息" : "Details"}**: ${formattedMessage}\n` +
+ `**${isZhCN ? "通知时间" : "Notification Time"}**: **${new Date().toLocaleString()}**`
+ }
+ };
+
+ return JSON.stringify(markdownMessage);
+}
+
+/**
+ * Get color code based on status
+ * 根据用户要求,状态信息根据事件级别用红、绿、蓝三种颜色表示
+ */
+function getStatusColor(status: string): string {
+ const statusLower = status.toLowerCase();
+
+ if (statusLower === "up" || statusLower === "resolved" || statusLower === "ok" || statusLower === "operational") {
+ return "green"; // 绿色表示正常状态
+ } else if (statusLower === "down" || statusLower === "error" || statusLower === "critical") {
+ return "red"; // 红色表示错误或严重问题
+ } else if (statusLower === "warning" || statusLower === "degraded" || statusLower === "maintenance" || statusLower === "paused") {
+ return "blue"; // 蓝色表示警告或降级状态
+ } else {
+ return "gray"; // 灰色表示其他状态
+ }
+}
+
+/**
+ * Test sending a WeChat Work notification
+ * This function is used for testing the WeChat Work notification configuration
+ *
+ * @param config The alert configuration containing WeChat Work webhook URL
+ * @param serviceName The name of the service to include in the test message
+ * @returns Promise resolving to true if successful, false otherwise
+ */
+export async function testSendWecomMessage(
+ config: AlertConfiguration,
+ serviceName: string = "Test Service"
+): Promise {
+ try {
+ console.log("====== TEST WECOM NOTIFICATION ======");
+ console.log("Sending test notification to WeChat Work");
+
+ // Create a test message based on language
+ const isZhCN = currentLanguage === "zh-CN";
+ const testMessage = isZhCN
+ ? `🧪 这是一条测试消息\nService ${serviceName} is UP\nResponse time: 123ms\nURL: https://example.com\n\n此消息仅用于测试企业微信通知配置。`
+ : `🧪 This is a test message\nService ${serviceName} is UP\nResponse time: 123ms\nURL: https://example.com\n\nThis message is only for testing Wecom notification configuration.`;
+
+ // Send the notification with "up" status for testing
+ return await sendWecomNotification(config, testMessage, "up");
+ } catch (error) {
+ console.error("Error in testSendWecomMessage:", error);
+ const isZhCN = currentLanguage === "zh-CN";
+ toast({
+ title: isZhCN ? "测试通知失败" : "Test Notification Failed",
+ description: isZhCN
+ ? `发送企业微信测试通知时出错: ${error instanceof Error ? error.message : "未知错误"}`
+ : `Error sending Wecom test notification: ${error instanceof Error ? error.message : "Unknown error"}`,
+ variant: "destructive"
+ });
+ return false;
+ }
+}
\ No newline at end of file
diff --git a/application/src/services/notificationService.ts b/application/src/services/notificationService.ts
index 84ac207..24593fc 100644
--- a/application/src/services/notificationService.ts
+++ b/application/src/services/notificationService.ts
@@ -1,7 +1,8 @@
import { Service } from "@/types/service.types";
import { processTemplate, generateDefaultMessage } from "./notification/templateProcessor";
-import { sendTelegramNotification, testSendTelegramMessage } from "./notification/telegramService";
+import { sendTelegramNotification } from "./notification/telegramService";
+import { sendWecomNotification, testSendWecomMessage } from "./notification/wecomService";
import { pb } from "@/lib/pocketbase";
import { templateService } from "./templateService";
import { AlertConfiguration } from "./alertConfigService";
@@ -116,6 +117,7 @@ export const notificationService = {
bot_token: alertConfigRecord.bot_token,
template_id: alertConfigRecord.template_id,
slack_webhook_url: alertConfigRecord.slack_webhook_url,
+ wecom_webhook_url: alertConfigRecord.wecom_webhook_url,
enabled: alertConfigRecord.enabled,
created: alertConfigRecord.created,
updated: alertConfigRecord.updated
@@ -153,6 +155,8 @@ export const notificationService = {
if (notificationType === 'telegram') {
return await sendTelegramNotification(alertConfig, message);
+ } else if (notificationType === 'wecom') {
+ return await sendWecomNotification(alertConfig, message, status);
}
// For other types like discord, slack, etc. (not implemented yet)
diff --git a/application/src/translations/de/common.ts b/application/src/translations/de/common.ts
index 0e69d07..5d0359f 100644
--- a/application/src/translations/de/common.ts
+++ b/application/src/translations/de/common.ts
@@ -8,6 +8,8 @@ export const commonTranslations: CommonTranslations = {
english: "Englisch",
khmer: "Khmer",
german: "Deutsch",
+ japanese: "Japanisch",
+ simplifiedChinese: "Vereinfachtes Chinesisch",
goodMorning: "Guten Morgen",
goodAfternoon: "Guten Nachmittag",
goodEvening: "Guten Abend",
diff --git a/application/src/translations/en/common.ts b/application/src/translations/en/common.ts
index 3bdbb1a..0bbd26d 100644
--- a/application/src/translations/en/common.ts
+++ b/application/src/translations/en/common.ts
@@ -8,6 +8,8 @@ export const commonTranslations: CommonTranslations = {
english: "English",
khmer: "Khmer",
german: "Deutsch",
+ japanese: "Japanese",
+ simplifiedChinese: "Simplified Chinese",
goodMorning: "Good morning",
goodAfternoon: "Good afternoon",
goodEvening: "Good evening",
diff --git a/application/src/translations/index.ts b/application/src/translations/index.ts
index 8ae73b8..8070efd 100644
--- a/application/src/translations/index.ts
+++ b/application/src/translations/index.ts
@@ -2,14 +2,16 @@ import enTranslations from './en';
import kmTranslations from './km';
import deTranslations from './de';
import jaTranslations from './ja';
+import zhCNTranslations from './zh-CN';
-export type Language = "en" | "km" | "de" | "ja";
+export type Language = "en" | "km" | "de" | "ja" | "zh-CN";
export const translations = {
en: enTranslations,
km: kmTranslations,
de: deTranslations,
ja: jaTranslations,
+ "zh-CN": zhCNTranslations,
};
// Type for accessing translations by module and key
diff --git a/application/src/translations/ja/common.ts b/application/src/translations/ja/common.ts
index dedc536..cbd7fa2 100644
--- a/application/src/translations/ja/common.ts
+++ b/application/src/translations/ja/common.ts
@@ -8,6 +8,7 @@ export const commonTranslations: CommonTranslations = {
khmer: "Khmer",
german: "Deutsch",
japanese: "日本語",
+ simplifiedChinese: "簡体字中国語",
goodMorning: "おはようございます",
goodAfternoon: "こんにちは",
goodEvening: "こんばんは",
diff --git a/application/src/translations/km/common.ts b/application/src/translations/km/common.ts
index 84ed44c..3013eed 100644
--- a/application/src/translations/km/common.ts
+++ b/application/src/translations/km/common.ts
@@ -8,6 +8,8 @@ export const commonTranslations: CommonTranslations = {
english: "អង់គ្លេស",
khmer: "ខ្មែរ",
german: "Deutsch",
+ japanese: "ជប៉ុន",
+ simplifiedChinese: "ចិនសាមញ្ញ",
goodMorning: "អរុណសួស្តី",
goodAfternoon: "ទិវាសួស្តី",
goodEvening: "សាយណ្ហសួស្តី",
diff --git a/application/src/translations/types/common.ts b/application/src/translations/types/common.ts
index fc16c53..275651b 100644
--- a/application/src/translations/types/common.ts
+++ b/application/src/translations/types/common.ts
@@ -5,6 +5,9 @@ export interface CommonTranslations {
language: string;
english: string;
khmer: string;
+ german: string;
+ japanese: string;
+ simplifiedChinese: string;
goodMorning: string;
goodAfternoon: string;
goodEvening: string;
diff --git a/application/src/translations/zh-CN/about.ts b/application/src/translations/zh-CN/about.ts
new file mode 100644
index 0000000..cbd58db
--- /dev/null
+++ b/application/src/translations/zh-CN/about.ts
@@ -0,0 +1,17 @@
+import { AboutTranslations } from '../types/about';
+
+export const aboutTranslations: AboutTranslations = {
+ aboutCheckcle: "关于Checkcle",
+ systemDescription: "Checkcle是一个开源监控系统,提供服务器和服务健康状况的实时洞察、事件管理和运营透明度。以MIT许可证发布。",
+ systemVersion: "系统版本",
+ license: "许可证",
+ mitLicense: "MIT许可证",
+ links: "链接",
+ viewOnGithub: "在GitHub上查看",
+ viewDocumentation: "查看文档",
+ followOnX: "在X上关注",
+ joinDiscord: "加入Discord",
+ quickActions: "快速操作",
+ quickActionsDescription: "快速访问常见的监控操作和功能。选择以下操作开始使用。",
+ quickTips: "快速提示",
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/common.ts b/application/src/translations/zh-CN/common.ts
new file mode 100644
index 0000000..ac8e6fa
--- /dev/null
+++ b/application/src/translations/zh-CN/common.ts
@@ -0,0 +1,30 @@
+import { CommonTranslations } from '../types/common';
+
+export const commonTranslations: CommonTranslations = {
+ welcome: "欢迎",
+ logout: "退出登录",
+ language: "语言",
+ english: "英语",
+ khmer: "高棉语",
+ german: "德语",
+ japanese: "日语",
+ simplifiedChinese: "简体中文",
+ goodMorning: "早上好",
+ goodAfternoon: "下午好",
+ goodEvening: "晚上好",
+ profile: "个人资料",
+ settings: "设置",
+ documentation: "文档",
+ notifications: "通知",
+ close: "关闭",
+ cancel: "取消",
+ view: "查看",
+ edit: "编辑",
+ delete: "删除",
+ status: "状态",
+ time: "时间",
+ title: "标题",
+ description: "描述",
+ success: "成功",
+ error: "错误",
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/incident.ts b/application/src/translations/zh-CN/incident.ts
new file mode 100644
index 0000000..c914552
--- /dev/null
+++ b/application/src/translations/zh-CN/incident.ts
@@ -0,0 +1,54 @@
+import { IncidentTranslations } from '../types/incident';
+
+export const incidentTranslations: IncidentTranslations = {
+ incidentManagement: '事件管理',
+ incidentsManagementDesc: '跟踪和管理服务事件及其解决方案',
+ unresolvedIncidents: '未解决',
+ resolvedIncidents: '已解决',
+ activeIncidents: '活跃事件',
+ criticalIssues: '严重问题',
+ avgResolutionTime: '平均解决时间',
+ noIncidents: '没有活跃事件',
+ createIncident: '创建事件',
+ investigating: '调查中',
+ identified: '已确认',
+ monitoring: '监控中',
+ resolved: '已解决',
+ scheduleIncidentManagement: '计划与事件管理',
+ incidentName: '事件名称',
+ incidentStatus: '事件状态',
+ highPriority: '高优先级',
+ configurationSettings: '配置设置',
+ incidentCreatedSuccess: '事件创建成功',
+ basicInfo: '基本信息',
+ serviceId: '服务ID',
+ assignedTo: '分配给',
+ unassigned: '未分配',
+ timeline: '时间线',
+ incidentTime: '事件时间',
+ resolutionTime: '解决时间',
+ systems: '系统',
+ noSystems: '没有受影响的系统',
+ impactAnalysis: '影响分析',
+ rootCause: '根本原因',
+ resolutionSteps: '解决步骤',
+ lessonsLearned: '经验教训',
+ resolutionDetails: '解决详情',
+ assignment: '分配',
+ download: '下载',
+ downloadPdf: '下载PDF',
+ print: '打印',
+ confidentialNote: '本文档为机密文件,仅供内部使用。',
+ generatedOn: '生成于',
+ enterResolutionSteps: '输入解决事件的步骤',
+ enterLessonsLearned: '输入从此事件中学到的经验教训',
+ editIncident: '编辑事件',
+ editIncidentDesc: '更新此事件的详情',
+ updating: '更新中...',
+ update: '更新',
+ create: '创建',
+ creating: '创建中...',
+ configuration: '配置',
+ failedToUpdateStatus: '更新状态失败',
+ inProgress: '进行中',
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/index.ts b/application/src/translations/zh-CN/index.ts
new file mode 100644
index 0000000..98885a1
--- /dev/null
+++ b/application/src/translations/zh-CN/index.ts
@@ -0,0 +1,24 @@
+import { Translations } from '../types';
+import { commonTranslations } from './common';
+import { menuTranslations } from './menu';
+import { loginTranslations } from './login';
+import { aboutTranslations } from './about';
+import { servicesTranslations } from './services';
+import { maintenanceTranslations } from './maintenance';
+import { incidentTranslations } from './incident';
+import { sslTranslations } from './ssl';
+import { settingsTranslations } from './settings';
+
+const zhCNTranslations: Translations = {
+ common: commonTranslations,
+ menu: menuTranslations,
+ login: loginTranslations,
+ about: aboutTranslations,
+ services: servicesTranslations,
+ maintenance: maintenanceTranslations,
+ incident: incidentTranslations,
+ ssl: sslTranslations,
+ settings: settingsTranslations
+};
+
+export default zhCNTranslations;
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/login.ts b/application/src/translations/zh-CN/login.ts
new file mode 100644
index 0000000..ae5f566
--- /dev/null
+++ b/application/src/translations/zh-CN/login.ts
@@ -0,0 +1,22 @@
+import { LoginTranslations } from '../types/login';
+
+export const loginTranslations: LoginTranslations = {
+ signInToYourAccount: "登录您的账户",
+ dontHaveAccount: "没有账户?",
+ createOne: "创建一个",
+ signInWithGoogle: "使用Google登录",
+ orContinueWith: "或",
+ email: "邮箱",
+ password: "密码",
+ forgot: "忘记密码?",
+ signIn: "登录",
+ signingIn: "登录中...",
+ loginSuccessful: "登录成功",
+ loginSuccessMessage: "您已成功登录。",
+ loginFailed: "登录失败",
+ authenticationFailed: "认证失败",
+ bySigningIn: "登录即表示您同意我们的",
+ termsAndConditions: "条款和条件",
+ and: "和",
+ privacyPolicy: "隐私政策",
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/maintenance.ts b/application/src/translations/zh-CN/maintenance.ts
new file mode 100644
index 0000000..053a60e
--- /dev/null
+++ b/application/src/translations/zh-CN/maintenance.ts
@@ -0,0 +1,67 @@
+import { MaintenanceTranslations } from '../types/maintenance';
+
+export const maintenanceTranslations: MaintenanceTranslations = {
+ scheduledMaintenance: '计划维护',
+ scheduledMaintenanceDesc: '查看和管理系统和服务的计划维护窗口',
+ upcomingMaintenance: '即将进行',
+ ongoingMaintenance: '正在进行',
+ completedMaintenance: '已完成',
+ createMaintenanceWindow: '创建维护',
+ totalScheduledHours: '计划总时长',
+ maintenanceName: '维护名称',
+ maintenanceStatus: '状态',
+ scheduledStart: '计划开始',
+ scheduledEnd: '计划结束',
+ affectedServices: '受影响的服务',
+ impact: '影响',
+ minor: '轻微',
+ major: '重大',
+ critical: '严重',
+ none: '无',
+ actions: '操作',
+ scheduled: '已计划',
+ inprogress: '进行中',
+ completed: '已完成',
+ cancelled: '已取消',
+ markAsInProgress: '标记为进行中',
+ markAsCompleted: '标记为已完成',
+ markAsCancelled: '标记为已取消',
+ confirmDelete: '确认删除',
+ deleteMaintenanceConfirmation: '您确定要删除此维护窗口吗?',
+ thisActionCannotBeUndone: '此操作无法撤销。',
+ maintenanceDeleted: '维护已删除',
+ maintenanceDeletedDesc: '维护窗口已成功删除。',
+ errorDeletingMaintenance: '删除维护窗口时出错。',
+ statusUpdated: '状态已更新',
+ maintenanceStatusUpdated: '维护状态已成功更新。',
+ errorUpdatingMaintenanceStatus: '更新维护状态时出错。',
+ createMaintenance: '创建维护',
+ createMaintenanceDesc: '为您的服务安排新的维护窗口',
+ enterTitle: '输入维护标题',
+ enterDescription: '输入维护的详细描述',
+ startTime: '开始时间',
+ endTime: '结束时间',
+ selectDate: '选择日期',
+ enterAffectedServices: '输入受影响的服务',
+ separateServicesWithComma: '多个服务用逗号分隔',
+ priority: '优先级',
+ selectPriority: '选择优先级',
+ selectStatus: '选择状态',
+ selectImpact: '选择影响',
+ notifySubscribers: '通知订阅者',
+ notifySubscribersDesc: '当此维护开始时向所有订阅者发送通知',
+ maintenanceCreated: '维护已创建',
+ maintenanceCreatedDesc: '维护窗口已成功安排。',
+ errorCreatingMaintenance: '创建维护窗口时出错。',
+ errorFetchingMaintenanceData: '获取维护数据时出错。',
+ low: '低',
+ medium: '中',
+ high: '高',
+ created: '已创建',
+ lastUpdated: '最后更新',
+ subscribersWillBeNotified: '维护开始时将通知订阅者',
+ noNotifications: '不会发送通知',
+ noScheduledMaintenance: '无计划维护',
+ noMaintenanceWindows: '此期间没有维护窗口。点击"创建维护"按钮创建一个。',
+ maintenanceCreatedSuccess: '维护窗口创建成功',
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/menu.ts b/application/src/translations/zh-CN/menu.ts
new file mode 100644
index 0000000..b0c9459
--- /dev/null
+++ b/application/src/translations/zh-CN/menu.ts
@@ -0,0 +1,20 @@
+import { MenuTranslations } from '../types/menu';
+
+export const menuTranslations: MenuTranslations = {
+ uptimeMonitoring: "运行时间监控",
+ instanceMonitoring: "实例监控",
+ sslDomain: "SSL与域名",
+ scheduleIncident: "计划与事件",
+ operationalPage: "运营页面",
+ reports: "报告",
+ regionalMonitoring: "区域监控",
+ settingPanel: "设置面板",
+ generalSettings: "常规设置",
+ userManagement: "用户管理",
+ notificationSettings: "通知设置",
+ alertsTemplates: "警报模板",
+ rolesManagement: "角色管理",
+ dataRetention: "数据保留",
+ backupSettings: "备份设置",
+ aboutSystem: "关于系统",
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/services.ts b/application/src/translations/zh-CN/services.ts
new file mode 100644
index 0000000..df4a940
--- /dev/null
+++ b/application/src/translations/zh-CN/services.ts
@@ -0,0 +1,11 @@
+import { ServicesTranslations } from '../types/services';
+
+export const servicesTranslations: ServicesTranslations = {
+ serviceName: "服务名称",
+ serviceType: "服务类型",
+ serviceStatus: "服务状态",
+ responseTime: "响应时间",
+ uptime: "运行时间",
+ lastChecked: "最后检查",
+ noServices: "没有符合您筛选条件的服务。",
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/settings.ts b/application/src/translations/zh-CN/settings.ts
new file mode 100644
index 0000000..6f85b8b
--- /dev/null
+++ b/application/src/translations/zh-CN/settings.ts
@@ -0,0 +1,51 @@
+import { SettingsTranslations } from '../types/settings';
+
+export const settingsTranslations: SettingsTranslations = {
+ // 标签页
+ systemSettings: "系统设置",
+ mailSettings: "邮件设置",
+
+ // 系统设置
+ appName: "应用名称",
+ appURL: "应用URL",
+ senderName: "发件人名称",
+ senderEmail: "发件人邮箱地址",
+ hideControls: "隐藏控件",
+
+ // 邮件设置
+ smtpSettings: "SMTP配置",
+ smtpEnabled: "启用SMTP",
+ smtpHost: "SMTP主机",
+ smtpPort: "SMTP端口",
+ smtpUsername: "SMTP用户名",
+ smtpPassword: "SMTP密码",
+ smtpAuthMethod: "认证方式",
+ enableTLS: "启用TLS",
+ localName: "本地名称",
+
+ // 测试邮件
+ testEmail: "测试邮件",
+ sendTestEmail: "发送测试邮件",
+ emailTemplate: "邮件模板",
+ verification: "验证",
+ passwordReset: "密码重置",
+ confirmEmailChange: "确认邮箱变更",
+ otp: "一次性密码",
+ loginAlert: "登录提醒",
+ authCollection: "认证集合",
+ selectCollection: "选择集合",
+ toEmailAddress: "收件人邮箱地址",
+ enterEmailAddress: "输入邮箱地址",
+ sending: "发送中...",
+
+ // 操作和状态
+ save: "保存更改",
+ saving: "保存中...",
+ settingsUpdated: "设置更新成功",
+ errorSavingSettings: "保存设置时出错",
+ errorFetchingSettings: "加载设置时出错",
+ testConnection: "测试连接",
+ testingConnection: "测试连接中...",
+ connectionSuccess: "连接成功",
+ connectionFailed: "连接失败"
+};
\ No newline at end of file
diff --git a/application/src/translations/zh-CN/ssl.ts b/application/src/translations/zh-CN/ssl.ts
new file mode 100644
index 0000000..693638e
--- /dev/null
+++ b/application/src/translations/zh-CN/ssl.ts
@@ -0,0 +1,106 @@
+import { SSLTranslations } from '../types/ssl';
+
+export const sslTranslations: SSLTranslations = {
+ // 页面和部分标题
+ sslDomainManagement: "SSL与域名管理",
+ monitorSSLCertificates: "监控和管理您域名的SSL证书",
+ addSSLCertificate: "添加SSL证书",
+ editSSLCertificate: "编辑SSL证书",
+ deleteSSLCertificate: "删除SSL证书",
+ sslCertificateDetails: "SSL证书详情",
+ detailedInfo: "详细信息",
+
+ // 状态相关
+ valid: "有效",
+ expiringSoon: "即将过期",
+ expired: "已过期",
+ pending: "待处理",
+
+ // 统计和卡片
+ validCertificates: "有效证书",
+ expiringSoonCertificates: "即将过期",
+ expiredCertificates: "已过期证书",
+
+ // 表单字段
+ domain: "域名",
+ domainName: "域名",
+ domainCannotChange: "创建后域名无法更改",
+ warningThreshold: "警告阈值",
+ warningThresholdDays: "警告阈值(天)",
+ expiryThreshold: "过期阈值",
+ expiryThresholdDays: "过期阈值(天)",
+ notificationChannel: "通知渠道",
+ chooseChannel: "选择通知渠道",
+ whereToSend: "发送通知的位置",
+ daysBeforeExpiration: "过期前多少天接收警告",
+ daysBeforeCritical: "过期前多少天接收严重警报",
+ getNotifiedExpiration: "在证书即将过期时获得通知",
+ getNotifiedCritical: "在证书临近过期时获得严重警报",
+
+ // 表格标题和字段
+ issuer: "颁发者",
+ expirationDate: "过期日期",
+ daysLeft: "剩余天数",
+ status: "状态",
+ lastNotified: "最后通知",
+ actions: "操作",
+ validFrom: "有效期自",
+ validUntil: "有效期至",
+ validityDays: "有效天数",
+ organization: "组织",
+ commonName: "通用名称",
+ serialNumber: "序列号",
+ algorithm: "算法",
+ subjectAltNames: "主题备用名称",
+
+ // 按钮和操作
+ addDomain: "添加域名",
+ refreshAll: "刷新全部",
+ cancel: "取消",
+ addCertificate: "添加证书",
+ check: "检查",
+ view: "查看",
+ edit: "编辑",
+ delete: "删除",
+ close: "关闭",
+ saveChanges: "保存更改",
+ updating: "更新中",
+
+ // 详情视图中的部分
+ basicInformation: "基本信息",
+ validity: "有效性",
+ issuerInfo: "颁发者信息",
+ technicalDetails: "技术详情",
+ monitoringConfig: "监控配置",
+ recordInfo: "记录信息",
+
+ // 通知和消息
+ sslCertificateAdded: "SSL证书添加成功",
+ sslCertificateUpdated: "SSL证书更新成功",
+ sslCertificateDeleted: "SSL证书删除成功",
+ sslCertificateRefreshed: "{domain}的SSL证书刷新成功",
+ allCertificatesRefreshed: "所有{count}个证书刷新成功",
+ someCertificatesFailed: "{success}个证书刷新成功,{failed}个失败",
+ failedToAddCertificate: "添加SSL证书失败",
+ failedToLoadCertificates: "加载SSL证书失败",
+ failedToUpdateCertificate: "更新SSL证书失败",
+ failedToDeleteCertificate: "删除SSL证书失败",
+ failedToCheckCertificate: "检查SSL证书失败",
+ noCertificatesToRefresh: "没有证书需要刷新",
+ startingRefreshAll: "开始刷新{count}个证书",
+ checkingSSLCertificate: "正在检查SSL证书...",
+ deleteConfirmation: "您确定要删除以下域名的证书吗",
+ deleteWarning: "此操作无法撤销。这将永久删除证书。",
+
+ // 其他
+ unknown: "未知",
+ never: "从不",
+ none: "无",
+ loadingChannels: "加载渠道中...",
+ noChannelsFound: "未找到通知渠道",
+ noSSLCertificates: "未找到SSL证书",
+ created: "创建时间",
+ lastUpdated: "最后更新",
+ lastNotification: "最后通知",
+ collectionId: "集合ID"
+};
\ No newline at end of file
diff --git a/server/pb_data/auxiliary.db-shm b/server/pb_data/auxiliary.db-shm
index 9a867ac..6917fb8 100644
Binary files a/server/pb_data/auxiliary.db-shm and b/server/pb_data/auxiliary.db-shm differ
diff --git a/server/pb_data/auxiliary.db-wal b/server/pb_data/auxiliary.db-wal
index fe1b1ff..66382d5 100644
Binary files a/server/pb_data/auxiliary.db-wal and b/server/pb_data/auxiliary.db-wal differ
diff --git a/server/pb_data/data.db b/server/pb_data/data.db
index 8047a3c..dc77f56 100644
Binary files a/server/pb_data/data.db and b/server/pb_data/data.db differ
diff --git a/server/pb_data/types.d.ts b/server/pb_data/types.d.ts
index 2503831..eba10cc 100644
--- a/server/pb_data/types.d.ts
+++ b/server/pb_data/types.d.ts
@@ -1,4 +1,4 @@
-// 1746911627
+// 1752913830
// GENERATED CODE - DO NOT MODIFY BY HAND
// -------------------------------------------------------------------
@@ -170,14 +170,46 @@ declare function readerToString(reader: any, maxBytes?: number): string;
* // io.Reader
* const ex1 = toString(e.request.body)
*
- * // slice of bytes ("hello")
- * const ex2 = toString([104 101 108 108 111])
+ * // slice of bytes
+ * const ex2 = toString([104 101 108 108 111]) // "hello"
+ *
+ * // null
+ * const ex3 = toString(null) // ""
* ```
*
* @group PocketBase
*/
declare function toString(val: any, maxBytes?: number): string;
+/**
+ * toBytes converts the specified value into a bytes slice.
+ *
+ * Support optional second maxBytes argument to limit the max read bytes
+ * when the value is a io.Reader (default to 32MB).
+ *
+ * Types that don't have Go slice representation (bool, objects, etc.)
+ * are serialized to UTF8 string and its bytes slice is returned.
+ *
+ * Example:
+ *
+ * ```js
+ * // io.Reader
+ * const ex1 = toBytes(e.request.body)
+ *
+ * // string
+ * const ex2 = toBytes("hello") // [104 101 108 108 111]
+ *
+ * // object (the same as the string '{"test":1}')
+ * const ex3 = toBytes({"test":1}) // [123 34 116 101 115 116 34 58 49 125]
+ *
+ * // null
+ * const ex4 = toBytes(null) // []
+ * ```
+ *
+ * @group PocketBase
+ */
+declare function toBytes(val: any, maxBytes?: number): Array;
+
/**
* sleep pauses the current goroutine for at least the specified user duration (in ms).
* A zero or negative duration returns immediately.
@@ -1770,8 +1802,8 @@ namespace os {
* than ReadFrom. This is used to permit ReadFrom to call io.Copy
* without leading to a recursive call to ReadFrom.
*/
- type _snLnyFf = noReadFrom&File
- interface fileWithoutReadFrom extends _snLnyFf {
+ type _sYrUOuJ = noReadFrom&File
+ interface fileWithoutReadFrom extends _sYrUOuJ {
}
interface File {
/**
@@ -1815,8 +1847,8 @@ namespace os {
* than WriteTo. This is used to permit WriteTo to call io.Copy
* without leading to a recursive call to WriteTo.
*/
- type _syNvbJl = noWriteTo&File
- interface fileWithoutWriteTo extends _syNvbJl {
+ type _swvQQIH = noWriteTo&File
+ interface fileWithoutWriteTo extends _swvQQIH {
}
interface File {
/**
@@ -2460,8 +2492,8 @@ namespace os {
*
* The methods of File are safe for concurrent use.
*/
- type _sDzXOPD = file
- interface File extends _sDzXOPD {
+ type _swXMOjN = file
+ interface File extends _swXMOjN {
}
/**
* A FileInfo describes a file and is returned by [Stat] and [Lstat].
@@ -2853,130 +2885,378 @@ namespace filepath {
}
}
-/**
- * Package validation provides configurable and extensible rules for validating data of various types.
- */
-namespace ozzo_validation {
- /**
- * Error interface represents an validation error
- */
- interface Error {
- [key:string]: any;
- error(): string
- code(): string
- message(): string
- setMessage(_arg0: string): Error
- params(): _TygojaDict
- setParams(_arg0: _TygojaDict): Error
- }
-}
-
-/**
- * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases.
- */
-namespace dbx {
- /**
- * Builder supports building SQL statements in a DB-agnostic way.
- * Builder mainly provides two sets of query building methods: those building SELECT statements
- * and those manipulating DB data or schema (e.g. INSERT statements, CREATE TABLE statements).
- */
- interface Builder {
- [key:string]: any;
+namespace security {
+ interface s256Challenge {
/**
- * NewQuery creates a new Query object with the given SQL statement.
- * The SQL statement may contain parameter placeholders which can be bound with actual parameter
- * values before the statement is executed.
+ * S256Challenge creates base64 encoded sha256 challenge string derived from code.
+ * The padding of the result base64 string is stripped per [RFC 7636].
+ *
+ * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
*/
- newQuery(_arg0: string): (Query)
+ (code: string): string
+ }
+ interface md5 {
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * MD5 creates md5 hash from the provided plain text.
*/
- select(..._arg0: string[]): (SelectQuery)
+ (text: string): string
+ }
+ interface sha256 {
/**
- * ModelQuery returns a new ModelQuery object that can be used to perform model insertion, update, and deletion.
- * The parameter to this method should be a pointer to the model struct that needs to be inserted, updated, or deleted.
+ * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text.
*/
- model(_arg0: {
- }): (ModelQuery)
+ (text: string): string
+ }
+ interface sha512 {
/**
- * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text.
*/
- generatePlaceholder(_arg0: number): string
+ (text: string): string
+ }
+ interface hs256 {
/**
- * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ * HS256 creates a HMAC hash with sha256 digest algorithm.
*/
- quote(_arg0: string): string
+ (text: string, secret: string): string
+ }
+ interface hs512 {
/**
- * QuoteSimpleTableName quotes a simple table name.
- * A simple table name does not contain any schema prefix.
+ * HS512 creates a HMAC hash with sha512 digest algorithm.
*/
- quoteSimpleTableName(_arg0: string): string
+ (text: string, secret: string): string
+ }
+ interface equal {
/**
- * QuoteSimpleColumnName quotes a simple column name.
- * A simple column name does not contain any table prefix.
+ * Equal compares two hash strings for equality without leaking timing information.
*/
- quoteSimpleColumnName(_arg0: string): string
+ (hash1: string, hash2: string): boolean
+ }
+ // @ts-ignore
+ import crand = rand
+ interface encrypt {
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
*/
- queryBuilder(): QueryBuilder
+ (data: string|Array, key: string): string
+ }
+ interface decrypt {
/**
- * Insert creates a Query that represents an INSERT SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
+ * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
*/
- insert(table: string, cols: Params): (Query)
+ (cipherText: string, key: string): string|Array
+ }
+ interface parseUnverifiedJWT {
/**
- * Upsert creates a Query that represents an UPSERT SQL statement.
- * Upsert inserts a row into the table if the primary key or unique index is not found.
- * Otherwise it will update the row with the new values.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
+ * ParseUnverifiedJWT parses JWT and returns its claims
+ * but DOES NOT verify the signature.
+ *
+ * It verifies only the exp, iat and nbf claims.
*/
- upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ (token: string): jwt.MapClaims
+ }
+ interface parseJWT {
/**
- * Update creates a Query that represents an UPDATE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding new column
- * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will update ALL rows in the table).
+ * ParseJWT verifies and parses JWT and returns its claims.
*/
- update(table: string, cols: Params, where: Expression): (Query)
+ (token: string, verificationKey: string): jwt.MapClaims
+ }
+ interface newJWT {
/**
- * Delete creates a Query that represents a DELETE SQL statement.
- * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will delete ALL rows in the table).
+ * NewJWT generates and returns new HS256 signed JWT.
*/
- delete(table: string, where: Expression): (Query)
+ (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string
+ }
+ // @ts-ignore
+ import cryptoRand = rand
+ // @ts-ignore
+ import mathRand = rand
+ interface randomString {
/**
- * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column types.
- * The optional "options" parameters will be appended to the generated SQL statement.
+ * RandomString generates a cryptographically random string with the specified length.
+ *
+ * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
*/
- createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
+ (length: number): string
+ }
+ interface randomStringWithAlphabet {
/**
- * RenameTable creates a Query that can be used to rename a table.
+ * RandomStringWithAlphabet generates a cryptographically random string
+ * with the specified length and characters set.
+ *
+ * It panics if for some reason rand.Int returns a non-nil error.
*/
- renameTable(oldName: string, newName: string): (Query)
+ (length: number, alphabet: string): string
+ }
+ interface pseudorandomString {
/**
- * DropTable creates a Query that can be used to drop a table.
+ * PseudorandomString generates a pseudorandom string with the specified length.
+ *
+ * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
+ *
+ * For a cryptographically random string (but a little bit slower) use RandomString instead.
*/
- dropTable(table: string): (Query)
+ (length: number): string
+ }
+ interface pseudorandomStringWithAlphabet {
/**
- * TruncateTable creates a Query that can be used to truncate a table.
+ * PseudorandomStringWithAlphabet generates a pseudorandom string
+ * with the specified length and characters set.
+ *
+ * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead.
*/
- truncateTable(table: string): (Query)
+ (length: number, alphabet: string): string
+ }
+ interface randomStringByRegex {
/**
- * AddColumn creates a Query that can be used to add a column to a table.
+ * RandomStringByRegex generates a random string matching the regex pattern.
+ * If optFlags is not set, fallbacks to [syntax.Perl].
+ *
+ * NB! While the source of the randomness comes from [crypto/rand] this method
+ * is not recommended to be used on its own in critical secure contexts because
+ * the generated length could vary too much on the used pattern and may not be
+ * as secure as simply calling [security.RandomString].
+ * If you still insist on using it for such purposes, consider at least
+ * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`.
+ *
+ * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages.
*/
- addColumn(table: string, col: string, typ: string): (Query)
+ (pattern: string, ...optFlags: syntax.Flags[]): string
+ }
+}
+
+/**
+ * Package template is a thin wrapper around the standard html/template
+ * and text/template packages that implements a convenient registry to
+ * load and cache templates on the fly concurrently.
+ *
+ * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
+ *
+ * Example:
+ *
+ * ```
+ * registry := template.NewRegistry()
+ *
+ * html1, err := registry.LoadFiles(
+ * // the files set wil be parsed only once and then cached
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "John"})
+ *
+ * html2, err := registry.LoadFiles(
+ * // reuse the already parsed and cached files set
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "Jane"})
+ * ```
+ */
+namespace template {
+ interface newRegistry {
/**
- * DropColumn creates a Query that can be used to drop a column from a table.
+ * NewRegistry creates and initializes a new templates registry with
+ * some defaults (eg. global "raw" template function for unescaped HTML).
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
*/
- dropColumn(table: string, col: string): (Query)
+ (): (Registry)
+ }
+ /**
+ * Registry defines a templates registry that is safe to be used by multiple goroutines.
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
+ */
+ interface Registry {
+ }
+ interface Registry {
/**
- * RenameColumn creates a Query that can be used to rename a column in a table.
+ * AddFuncs registers new global template functions.
+ *
+ * The key of each map entry is the function name that will be used in the templates.
+ * If a function with the map entry name already exists it will be replaced with the new one.
+ *
+ * The value of each map entry is a function that must have either a
+ * single return value, or two return values of which the second has type error.
+ *
+ * Example:
+ *
+ * ```
+ * r.AddFuncs(map[string]any{
+ * "toUpper": func(str string) string {
+ * return strings.ToUppser(str)
+ * },
+ * ...
+ * })
+ * ```
+ */
+ addFuncs(funcs: _TygojaDict): (Registry)
+ }
+ interface Registry {
+ /**
+ * LoadFiles caches (if not already) the specified filenames set as a
+ * single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 filename specified.
+ */
+ loadFiles(...filenames: string[]): (Renderer)
+ }
+ interface Registry {
+ /**
+ * LoadString caches (if not already) the specified inline string as a
+ * single template and returns a ready to use Renderer instance.
+ */
+ loadString(text: string): (Renderer)
+ }
+ interface Registry {
+ /**
+ * LoadFS caches (if not already) the specified fs and globPatterns
+ * pair as single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 file matching the provided globPattern(s)
+ * (note that most file names serves as glob patterns matching themselves).
+ */
+ loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer)
+ }
+ /**
+ * Renderer defines a single parsed template.
+ */
+ interface Renderer {
+ }
+ interface Renderer {
+ /**
+ * Render executes the template with the specified data as the dot object
+ * and returns the result as plain string.
+ */
+ render(data: any): string
+ }
+}
+
+/**
+ * Package validation provides configurable and extensible rules for validating data of various types.
+ */
+namespace ozzo_validation {
+ /**
+ * Error interface represents an validation error
+ */
+ interface Error {
+ [key:string]: any;
+ error(): string
+ code(): string
+ message(): string
+ setMessage(_arg0: string): Error
+ params(): _TygojaDict
+ setParams(_arg0: _TygojaDict): Error
+ }
+}
+
+/**
+ * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases.
+ */
+namespace dbx {
+ /**
+ * Builder supports building SQL statements in a DB-agnostic way.
+ * Builder mainly provides two sets of query building methods: those building SELECT statements
+ * and those manipulating DB data or schema (e.g. INSERT statements, CREATE TABLE statements).
+ */
+ interface Builder {
+ [key:string]: any;
+ /**
+ * NewQuery creates a new Query object with the given SQL statement.
+ * The SQL statement may contain parameter placeholders which can be bound with actual parameter
+ * values before the statement is executed.
+ */
+ newQuery(_arg0: string): (Query)
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(..._arg0: string[]): (SelectQuery)
+ /**
+ * ModelQuery returns a new ModelQuery object that can be used to perform model insertion, update, and deletion.
+ * The parameter to this method should be a pointer to the model struct that needs to be inserted, updated, or deleted.
+ */
+ model(_arg0: {
+ }): (ModelQuery)
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(_arg0: number): string
+ /**
+ * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ */
+ quote(_arg0: string): string
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(_arg0: string): string
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(_arg0: string): string
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ /**
+ * Insert creates a Query that represents an INSERT SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ insert(table: string, cols: Params): (Query)
+ /**
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ /**
+ * Update creates a Query that represents an UPDATE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding new column
+ * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will update ALL rows in the table).
+ */
+ update(table: string, cols: Params, where: Expression): (Query)
+ /**
+ * Delete creates a Query that represents a DELETE SQL statement.
+ * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will delete ALL rows in the table).
+ */
+ delete(table: string, where: Expression): (Query)
+ /**
+ * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column types.
+ * The optional "options" parameters will be appended to the generated SQL statement.
+ */
+ createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ /**
+ * DropTable creates a Query that can be used to drop a table.
+ */
+ dropTable(table: string): (Query)
+ /**
+ * TruncateTable creates a Query that can be used to truncate a table.
+ */
+ truncateTable(table: string): (Query)
+ /**
+ * AddColumn creates a Query that can be used to add a column to a table.
+ */
+ addColumn(table: string, col: string, typ: string): (Query)
+ /**
+ * DropColumn creates a Query that can be used to drop a column from a table.
+ */
+ dropColumn(table: string, col: string): (Query)
+ /**
+ * RenameColumn creates a Query that can be used to rename a column in a table.
*/
renameColumn(table: string, oldName: string, newName: string): (Query)
/**
@@ -3207,14 +3487,14 @@ namespace dbx {
/**
* MssqlBuilder is the builder for SQL Server databases.
*/
- type _sLlglaO = BaseBuilder
- interface MssqlBuilder extends _sLlglaO {
+ type _sWbMyfz = BaseBuilder
+ interface MssqlBuilder extends _sWbMyfz {
}
/**
* MssqlQueryBuilder is the query builder for SQL Server databases.
*/
- type _sIerFva = BaseQueryBuilder
- interface MssqlQueryBuilder extends _sIerFva {
+ type _saKuASF = BaseQueryBuilder
+ interface MssqlQueryBuilder extends _saKuASF {
}
interface newMssqlBuilder {
/**
@@ -3285,8 +3565,8 @@ namespace dbx {
/**
* MysqlBuilder is the builder for MySQL databases.
*/
- type _sJRJGqZ = BaseBuilder
- interface MysqlBuilder extends _sJRJGqZ {
+ type _sTGSKBe = BaseBuilder
+ interface MysqlBuilder extends _sTGSKBe {
}
interface newMysqlBuilder {
/**
@@ -3361,14 +3641,14 @@ namespace dbx {
/**
* OciBuilder is the builder for Oracle databases.
*/
- type _sHTbvIJ = BaseBuilder
- interface OciBuilder extends _sHTbvIJ {
+ type _swAOXYP = BaseBuilder
+ interface OciBuilder extends _swAOXYP {
}
/**
* OciQueryBuilder is the query builder for Oracle databases.
*/
- type _sGNFgrc = BaseQueryBuilder
- interface OciQueryBuilder extends _sGNFgrc {
+ type _swJVwGn = BaseQueryBuilder
+ interface OciQueryBuilder extends _swJVwGn {
}
interface newOciBuilder {
/**
@@ -3431,8 +3711,8 @@ namespace dbx {
/**
* PgsqlBuilder is the builder for PostgreSQL databases.
*/
- type _sALdjlO = BaseBuilder
- interface PgsqlBuilder extends _sALdjlO {
+ type _siYYFzP = BaseBuilder
+ interface PgsqlBuilder extends _siYYFzP {
}
interface newPgsqlBuilder {
/**
@@ -3499,8 +3779,8 @@ namespace dbx {
/**
* SqliteBuilder is the builder for SQLite databases.
*/
- type _spUmBhh = BaseBuilder
- interface SqliteBuilder extends _spUmBhh {
+ type _smcDSmy = BaseBuilder
+ interface SqliteBuilder extends _smcDSmy {
}
interface newSqliteBuilder {
/**
@@ -3599,8 +3879,8 @@ namespace dbx {
/**
* StandardBuilder is the builder that is used by DB for an unknown driver.
*/
- type _sWjnuHQ = BaseBuilder
- interface StandardBuilder extends _sWjnuHQ {
+ type _shFlgBJ = BaseBuilder
+ interface StandardBuilder extends _shFlgBJ {
}
interface newStandardBuilder {
/**
@@ -3666,8 +3946,8 @@ namespace dbx {
* DB enhances sql.DB by providing a set of DB-agnostic query building methods.
* DB allows easier query building and population of data into Go variables.
*/
- type _swIUbPb = Builder
- interface DB extends _swIUbPb {
+ type _saVuAwy = Builder
+ interface DB extends _saVuAwy {
/**
* FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc.
*/
@@ -4471,8 +4751,8 @@ namespace dbx {
* Rows enhances sql.Rows by providing additional data query methods.
* Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row.
*/
- type _sguxiye = sql.Rows
- interface Rows extends _sguxiye {
+ type _srmrCaw = sql.Rows
+ interface Rows extends _srmrCaw {
}
interface Rows {
/**
@@ -4844,8 +5124,8 @@ namespace dbx {
}): string }
interface structInfo {
}
- type _sPncttL = structInfo
- interface structValue extends _sPncttL {
+ type _smmBUkg = structInfo
+ interface structValue extends _smmBUkg {
}
interface fieldInfo {
}
@@ -4884,8 +5164,8 @@ namespace dbx {
/**
* Tx enhances sql.Tx with additional querying methods.
*/
- type _sbhRiou = Builder
- interface Tx extends _sbhRiou {
+ type _sZSxrxS = Builder
+ interface Tx extends _sZSxrxS {
}
interface Tx {
/**
@@ -4901,197 +5181,54 @@ namespace dbx {
}
}
-namespace security {
- interface s256Challenge {
- /**
- * S256Challenge creates base64 encoded sha256 challenge string derived from code.
- * The padding of the result base64 string is stripped per [RFC 7636].
- *
- * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
- */
- (code: string): string
- }
- interface md5 {
- /**
- * MD5 creates md5 hash from the provided plain text.
- */
- (text: string): string
+namespace filesystem {
+ /**
+ * FileReader defines an interface for a file resource reader.
+ */
+ interface FileReader {
+ [key:string]: any;
+ open(): io.ReadSeekCloser
}
- interface sha256 {
- /**
- * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text.
- */
- (text: string): string
+ /**
+ * File defines a single file [io.ReadSeekCloser] resource.
+ *
+ * The file could be from a local path, multipart/form-data header, etc.
+ */
+ interface File {
+ reader: FileReader
+ name: string
+ originalName: string
+ size: number
}
- interface sha512 {
+ interface File {
/**
- * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text.
+ * AsMap implements [core.mapExtractor] and returns a value suitable
+ * to be used in an API rule expression.
*/
- (text: string): string
+ asMap(): _TygojaDict
}
- interface hs256 {
+ interface newFileFromPath {
/**
- * HS256 creates a HMAC hash with sha256 digest algorithm.
+ * NewFileFromPath creates a new File instance from the provided local file path.
*/
- (text: string, secret: string): string
+ (path: string): (File)
}
- interface hs512 {
+ interface newFileFromBytes {
/**
- * HS512 creates a HMAC hash with sha512 digest algorithm.
+ * NewFileFromBytes creates a new File instance from the provided byte slice.
*/
- (text: string, secret: string): string
+ (b: string|Array, name: string): (File)
}
- interface equal {
+ interface newFileFromMultipart {
/**
- * Equal compares two hash strings for equality without leaking timing information.
+ * NewFileFromMultipart creates a new File from the provided multipart header.
*/
- (hash1: string, hash2: string): boolean
+ (mh: multipart.FileHeader): (File)
}
- // @ts-ignore
- import crand = rand
- interface encrypt {
+ interface newFileFromURL {
/**
- * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key).
- *
- * This method uses AES-256-GCM block cypher mode.
- */
- (data: string|Array, key: string): string
- }
- interface decrypt {
- /**
- * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key).
- *
- * This method uses AES-256-GCM block cypher mode.
- */
- (cipherText: string, key: string): string|Array
- }
- interface parseUnverifiedJWT {
- /**
- * ParseUnverifiedJWT parses JWT and returns its claims
- * but DOES NOT verify the signature.
- *
- * It verifies only the exp, iat and nbf claims.
- */
- (token: string): jwt.MapClaims
- }
- interface parseJWT {
- /**
- * ParseJWT verifies and parses JWT and returns its claims.
- */
- (token: string, verificationKey: string): jwt.MapClaims
- }
- interface newJWT {
- /**
- * NewJWT generates and returns new HS256 signed JWT.
- */
- (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string
- }
- // @ts-ignore
- import cryptoRand = rand
- // @ts-ignore
- import mathRand = rand
- interface randomString {
- /**
- * RandomString generates a cryptographically random string with the specified length.
- *
- * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
- */
- (length: number): string
- }
- interface randomStringWithAlphabet {
- /**
- * RandomStringWithAlphabet generates a cryptographically random string
- * with the specified length and characters set.
- *
- * It panics if for some reason rand.Int returns a non-nil error.
- */
- (length: number, alphabet: string): string
- }
- interface pseudorandomString {
- /**
- * PseudorandomString generates a pseudorandom string with the specified length.
- *
- * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
- *
- * For a cryptographically random string (but a little bit slower) use RandomString instead.
- */
- (length: number): string
- }
- interface pseudorandomStringWithAlphabet {
- /**
- * PseudorandomStringWithAlphabet generates a pseudorandom string
- * with the specified length and characters set.
- *
- * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead.
- */
- (length: number, alphabet: string): string
- }
- interface randomStringByRegex {
- /**
- * RandomStringByRegex generates a random string matching the regex pattern.
- * If optFlags is not set, fallbacks to [syntax.Perl].
- *
- * NB! While the source of the randomness comes from [crypto/rand] this method
- * is not recommended to be used on its own in critical secure contexts because
- * the generated length could vary too much on the used pattern and may not be
- * as secure as simply calling [security.RandomString].
- * If you still insist on using it for such purposes, consider at least
- * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`.
- *
- * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages.
- */
- (pattern: string, ...optFlags: syntax.Flags[]): string
- }
-}
-
-namespace filesystem {
- /**
- * FileReader defines an interface for a file resource reader.
- */
- interface FileReader {
- [key:string]: any;
- open(): io.ReadSeekCloser
- }
- /**
- * File defines a single file [io.ReadSeekCloser] resource.
- *
- * The file could be from a local path, multipart/form-data header, etc.
- */
- interface File {
- reader: FileReader
- name: string
- originalName: string
- size: number
- }
- interface File {
- /**
- * AsMap implements [core.mapExtractor] and returns a value suitable
- * to be used in an API rule expression.
- */
- asMap(): _TygojaDict
- }
- interface newFileFromPath {
- /**
- * NewFileFromPath creates a new File instance from the provided local file path.
- */
- (path: string): (File)
- }
- interface newFileFromBytes {
- /**
- * NewFileFromBytes creates a new File instance from the provided byte slice.
- */
- (b: string|Array, name: string): (File)
- }
- interface newFileFromMultipart {
- /**
- * NewFileFromMultipart creates a new File from the provided multipart header.
- */
- (mh: multipart.FileHeader): (File)
- }
- interface newFileFromURL {
- /**
- * NewFileFromURL creates a new File from the provided url by
- * downloading the resource and load it as BytesReader.
+ * NewFileFromURL creates a new File from the provided url by
+ * downloading the resource and load it as BytesReader.
*
* Example
*
@@ -5140,8 +5277,8 @@ namespace filesystem {
*/
open(): io.ReadSeekCloser
}
- type _sgrUUXz = bytes.Reader
- interface bytesReadSeekCloser extends _sgrUUXz {
+ type _ssHjdzq = bytes.Reader
+ interface bytesReadSeekCloser extends _ssHjdzq {
}
interface bytesReadSeekCloser {
/**
@@ -7106,8 +7243,8 @@ namespace core {
/**
* AuthOrigin defines a Record proxy for working with the authOrigins collection.
*/
- type _sUmzkLl = Record
- interface AuthOrigin extends _sUmzkLl {
+ type _sogJtsm = Record
+ interface AuthOrigin extends _sogJtsm {
}
interface newAuthOrigin {
/**
@@ -7852,8 +7989,8 @@ namespace core {
/**
* @todo experiment eventually replacing the rules *string with a struct?
*/
- type _smUOmdS = BaseModel
- interface baseCollection extends _smUOmdS {
+ type _syDotjM = BaseModel
+ interface baseCollection extends _syDotjM {
listRule?: string
viewRule?: string
createRule?: string
@@ -7880,8 +8017,8 @@ namespace core {
/**
* Collection defines the table, fields and various options related to a set of records.
*/
- type _sKRFDEg = baseCollection&collectionAuthOptions&collectionViewOptions
- interface Collection extends _sKRFDEg {
+ type _sxZhUzw = baseCollection&collectionAuthOptions&collectionViewOptions
+ interface Collection extends _sxZhUzw {
}
interface newCollection {
/**
@@ -8891,8 +9028,8 @@ namespace core {
/**
* RequestEvent defines the PocketBase router handler event.
*/
- type _sKhraUa = router.Event
- interface RequestEvent extends _sKhraUa {
+ type _sTsQEpM = router.Event
+ interface RequestEvent extends _sTsQEpM {
app: App
auth?: Record
}
@@ -8952,8 +9089,8 @@ namespace core {
*/
clone(): (RequestInfo)
}
- type _sbXFFhl = hook.Event&RequestEvent
- interface BatchRequestEvent extends _sbXFFhl {
+ type _sZBeKMv = hook.Event&RequestEvent
+ interface BatchRequestEvent extends _sZBeKMv {
batch: Array<(InternalRequest | undefined)>
}
interface InternalRequest {
@@ -8990,28 +9127,34 @@ namespace core {
interface baseCollectionEventData {
tags(): Array
}
- type _spyRtBo = hook.Event
- interface BootstrapEvent extends _spyRtBo {
+ type _sMlkbbr = hook.Event
+ interface BootstrapEvent extends _sMlkbbr {
app: App
}
- type _scmVEbS = hook.Event
- interface TerminateEvent extends _scmVEbS {
+ type _sDYmrRL = hook.Event
+ interface TerminateEvent extends _sDYmrRL {
app: App
isRestart: boolean
}
- type _sLJKyMQ = hook.Event
- interface BackupEvent extends _sLJKyMQ {
+ type _sEAECNa = hook.Event
+ interface BackupEvent extends _sEAECNa {
app: App
context: context.Context
name: string // the name of the backup to create/restore.
exclude: Array // list of dir entries to exclude from the backup create/restore.
}
- type _sZsAFoN = hook.Event
- interface ServeEvent extends _sZsAFoN {
+ type _sNlEcow = hook.Event
+ interface ServeEvent extends _sNlEcow {
app: App
router?: router.Router
server?: http.Server
certManager?: any
+ /**
+ * Listener allow specifying a custom network listener.
+ *
+ * Leave it nil to use the default net.Listen("tcp", e.Server.Addr).
+ */
+ listener: net.Listener
/**
* InstallerFunc is the "installer" function that is called after
* successful server tcp bind but only if there is no explicit
@@ -9030,31 +9173,31 @@ namespace core {
*/
installerFunc: (app: App, systemSuperuser: Record, baseURL: string) => void
}
- type _szpyhbL = hook.Event&RequestEvent
- interface SettingsListRequestEvent extends _szpyhbL {
+ type _saCrNkL = hook.Event&RequestEvent
+ interface SettingsListRequestEvent extends _saCrNkL {
settings?: Settings
}
- type _sUrpTPz = hook.Event&RequestEvent
- interface SettingsUpdateRequestEvent extends _sUrpTPz {
+ type _sQZlNsK = hook.Event&RequestEvent
+ interface SettingsUpdateRequestEvent extends _sQZlNsK {
oldSettings?: Settings
newSettings?: Settings
}
- type _sOIBUYK = hook.Event
- interface SettingsReloadEvent extends _sOIBUYK {
+ type _sVDykXO = hook.Event
+ interface SettingsReloadEvent extends _sVDykXO {
app: App
}
- type _srxGkIn = hook.Event
- interface MailerEvent extends _srxGkIn {
+ type _sdQeTzw = hook.Event
+ interface MailerEvent extends _sdQeTzw {
app: App
mailer: mailer.Mailer
message?: mailer.Message
}
- type _spcbBxx = MailerEvent&baseRecordEventData
- interface MailerRecordEvent extends _spcbBxx {
+ type _suqcYkb = MailerEvent&baseRecordEventData
+ interface MailerRecordEvent extends _suqcYkb {
meta: _TygojaDict
}
- type _seMeelk = hook.Event&baseModelEventData
- interface ModelEvent extends _seMeelk {
+ type _sPgsLeR = hook.Event&baseModelEventData
+ interface ModelEvent extends _sPgsLeR {
app: App
context: context.Context
/**
@@ -9066,12 +9209,12 @@ namespace core {
*/
type: string
}
- type _stQeXll = ModelEvent
- interface ModelErrorEvent extends _stQeXll {
+ type _swBypkn = ModelEvent
+ interface ModelErrorEvent extends _swBypkn {
error: Error
}
- type _sLYFFhx = hook.Event&baseRecordEventData
- interface RecordEvent extends _sLYFFhx {
+ type _ssrrJfe = hook.Event&baseRecordEventData
+ interface RecordEvent extends _ssrrJfe {
app: App
context: context.Context
/**
@@ -9083,12 +9226,12 @@ namespace core {
*/
type: string
}
- type _sWPmnLi = RecordEvent
- interface RecordErrorEvent extends _sWPmnLi {
+ type _sNIiUoL = RecordEvent
+ interface RecordErrorEvent extends _sNIiUoL {
error: Error
}
- type _seGYfWU = hook.Event&baseCollectionEventData
- interface CollectionEvent extends _seGYfWU {
+ type _stnXgYd = hook.Event&baseCollectionEventData
+ interface CollectionEvent extends _stnXgYd {
app: App
context: context.Context
/**
@@ -9100,95 +9243,95 @@ namespace core {
*/
type: string
}
- type _sOEsoWv = CollectionEvent
- interface CollectionErrorEvent extends _sOEsoWv {
+ type _sTqxVKy = CollectionEvent
+ interface CollectionErrorEvent extends _sTqxVKy {
error: Error
}
- type _swJRVXC = hook.Event&RequestEvent&baseRecordEventData
- interface FileTokenRequestEvent extends _swJRVXC {
+ type _sYiQxMC = hook.Event&RequestEvent&baseRecordEventData
+ interface FileTokenRequestEvent extends _sYiQxMC {
token: string
}
- type _skWTzPe = hook.Event&RequestEvent&baseCollectionEventData
- interface FileDownloadRequestEvent extends _skWTzPe {
+ type _sEgyETZ = hook.Event&RequestEvent&baseCollectionEventData
+ interface FileDownloadRequestEvent extends _sEgyETZ {
record?: Record
fileField?: FileField
servedPath: string
servedName: string
}
- type _sjZNtLd = hook.Event&RequestEvent
- interface CollectionsListRequestEvent extends _sjZNtLd {
+ type _sBqgvuD = hook.Event&RequestEvent
+ interface CollectionsListRequestEvent extends _sBqgvuD {
collections: Array<(Collection | undefined)>
result?: search.Result
}
- type _sQwJwmj = hook.Event&RequestEvent
- interface CollectionsImportRequestEvent extends _sQwJwmj {
+ type _soslurq = hook.Event&RequestEvent
+ interface CollectionsImportRequestEvent extends _soslurq {
collectionsData: Array<_TygojaDict>
deleteMissing: boolean
}
- type _ssHWEJn = hook.Event&RequestEvent&baseCollectionEventData
- interface CollectionRequestEvent extends _ssHWEJn {
+ type _sTOKiZB = hook.Event&RequestEvent&baseCollectionEventData
+ interface CollectionRequestEvent extends _sTOKiZB {
}
- type _sZMxaAS = hook.Event&RequestEvent
- interface RealtimeConnectRequestEvent extends _sZMxaAS {
+ type _sheMBrI = hook.Event&RequestEvent
+ interface RealtimeConnectRequestEvent extends _sheMBrI {
client: subscriptions.Client
/**
* note: modifying it after the connect has no effect
*/
idleTimeout: time.Duration
}
- type _sirsirI = hook.Event&RequestEvent
- interface RealtimeMessageEvent extends _sirsirI {
+ type _svjkqqb = hook.Event&RequestEvent
+ interface RealtimeMessageEvent extends _svjkqqb {
client: subscriptions.Client
message?: subscriptions.Message
}
- type _sGVLXGI = hook.Event&RequestEvent
- interface RealtimeSubscribeRequestEvent extends _sGVLXGI {
+ type _slnQrin = hook.Event&RequestEvent
+ interface RealtimeSubscribeRequestEvent extends _slnQrin {
client: subscriptions.Client
subscriptions: Array
}
- type _sNkctgx = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordsListRequestEvent extends _sNkctgx {
+ type _smONdLz = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordsListRequestEvent extends _smONdLz {
/**
* @todo consider removing and maybe add as generic to the search.Result?
*/
records: Array<(Record | undefined)>
result?: search.Result
}
- type _sesXIeC = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordRequestEvent extends _sesXIeC {
+ type _stEEWjn = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordRequestEvent extends _stEEWjn {
record?: Record
}
- type _sNqEfxw = hook.Event&baseRecordEventData
- interface RecordEnrichEvent extends _sNqEfxw {
+ type _sMbQhat = hook.Event&baseRecordEventData
+ interface RecordEnrichEvent extends _sMbQhat {
app: App
requestInfo?: RequestInfo
}
- type _saJisDV = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordCreateOTPRequestEvent extends _saJisDV {
+ type _sVZGMgc = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordCreateOTPRequestEvent extends _sVZGMgc {
record?: Record
password: string
}
- type _sOSfgvh = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordAuthWithOTPRequestEvent extends _sOSfgvh {
+ type _smpkDaM = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordAuthWithOTPRequestEvent extends _smpkDaM {
record?: Record
otp?: OTP
}
- type _sAHiJnG = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordAuthRequestEvent extends _sAHiJnG {
+ type _sZjcQOZ = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordAuthRequestEvent extends _sZjcQOZ {
record?: Record
token: string
meta: any
authMethod: string
}
- type _syiXgiO = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordAuthWithPasswordRequestEvent extends _syiXgiO {
+ type _sIXWIde = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordAuthWithPasswordRequestEvent extends _sIXWIde {
record?: Record
identity: string
identityField: string
password: string
}
- type _sRauTZa = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordAuthWithOAuth2RequestEvent extends _sRauTZa {
+ type _scuTtNw = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordAuthWithOAuth2RequestEvent extends _scuTtNw {
providerName: string
providerClient: auth.Provider
record?: Record
@@ -9196,41 +9339,41 @@ namespace core {
createData: _TygojaDict
isNewRecord: boolean
}
- type _srGgmEJ = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordAuthRefreshRequestEvent extends _srGgmEJ {
+ type _srYufQD = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordAuthRefreshRequestEvent extends _srYufQD {
record?: Record
}
- type _sFfmUxk = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordRequestPasswordResetRequestEvent extends _sFfmUxk {
+ type _sPriDVN = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordRequestPasswordResetRequestEvent extends _sPriDVN {
record?: Record
}
- type _sLGZbki = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordConfirmPasswordResetRequestEvent extends _sLGZbki {
+ type _sFiSyGB = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordConfirmPasswordResetRequestEvent extends _sFiSyGB {
record?: Record
}
- type _sQcbAzh = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordRequestVerificationRequestEvent extends _sQcbAzh {
+ type _sxbGIrS = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordRequestVerificationRequestEvent extends _sxbGIrS {
record?: Record
}
- type _sIJpEBx = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordConfirmVerificationRequestEvent extends _sIJpEBx {
+ type _sBERXHo = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordConfirmVerificationRequestEvent extends _sBERXHo {
record?: Record
}
- type _sGcXDVI = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordRequestEmailChangeRequestEvent extends _sGcXDVI {
+ type _sbpJxPU = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordRequestEmailChangeRequestEvent extends _sbpJxPU {
record?: Record
newEmail: string
}
- type _sFyRDNw = hook.Event&RequestEvent&baseCollectionEventData
- interface RecordConfirmEmailChangeRequestEvent extends _sFyRDNw {
+ type _sfnHuJK = hook.Event&RequestEvent&baseCollectionEventData
+ interface RecordConfirmEmailChangeRequestEvent extends _sfnHuJK {
record?: Record
newEmail: string
}
/**
* ExternalAuth defines a Record proxy for working with the externalAuths collection.
*/
- type _sBktZEa = Record
- interface ExternalAuth extends _sBktZEa {
+ type _sakXxeh = Record
+ interface ExternalAuth extends _sakXxeh {
}
interface newExternalAuth {
/**
@@ -11692,8 +11835,8 @@ namespace core {
interface onlyFieldType {
type: string
}
- type _sHqecYH = Field
- interface fieldWithType extends _sHqecYH {
+ type _svvecbM = Field
+ interface fieldWithType extends _svvecbM {
type: string
}
interface fieldWithType {
@@ -11725,8 +11868,8 @@ namespace core {
*/
scan(value: any): void
}
- type _sUWXjNf = BaseModel
- interface Log extends _sUWXjNf {
+ type _sOtLTVu = BaseModel
+ interface Log extends _sOtLTVu {
created: types.DateTime
data: types.JSONMap
message: string
@@ -11772,8 +11915,8 @@ namespace core {
/**
* MFA defines a Record proxy for working with the mfas collection.
*/
- type _sobFmjd = Record
- interface MFA extends _sobFmjd {
+ type _sSoYaam = Record
+ interface MFA extends _sSoYaam {
}
interface newMFA {
/**
@@ -11995,8 +12138,8 @@ namespace core {
/**
* OTP defines a Record proxy for working with the otps collection.
*/
- type _swyrTJF = Record
- interface OTP extends _swyrTJF {
+ type _seIGqwi = Record
+ interface OTP extends _seIGqwi {
}
interface newOTP {
/**
@@ -12232,8 +12375,8 @@ namespace core {
}
interface runner {
}
- type _sksdbkz = BaseModel
- interface Record extends _sksdbkz {
+ type _szRDXSw = BaseModel
+ interface Record extends _szRDXSw {
}
interface newRecord {
/**
@@ -12708,8 +12851,8 @@ namespace core {
* BaseRecordProxy implements the [RecordProxy] interface and it is intended
* to be used as embed to custom user provided Record proxy structs.
*/
- type _sPOlOfX = Record
- interface BaseRecordProxy extends _sPOlOfX {
+ type _sXLUgJH = Record
+ interface BaseRecordProxy extends _sXLUgJH {
}
interface BaseRecordProxy {
/**
@@ -12958,8 +13101,8 @@ namespace core {
/**
* Settings defines the PocketBase app settings.
*/
- type _sXtvxjh = settings
- interface Settings extends _sXtvxjh {
+ type _sYgABiW = settings
+ interface Settings extends _sYgABiW {
}
interface Settings {
/**
@@ -13260,8 +13403,14 @@ namespace core {
*/
durationTime(): time.Duration
}
- type _sCbDiPV = BaseModel
- interface Param extends _sCbDiPV {
+ interface RateLimitRule {
+ /**
+ * String returns a string representation of the rule.
+ */
+ string(): string
+ }
+ type _sxQNvkZ = BaseModel
+ interface Param extends _sxQNvkZ {
created: types.DateTime
updated: types.DateTime
value: types.JSONRaw
@@ -13775,8 +13924,8 @@ namespace apis {
*/
(limitBytes: number): (hook.Handler)
}
- type _svzUclG = io.ReadCloser
- interface limitedReader extends _svzUclG {
+ type _sMtQFon = io.ReadCloser
+ interface limitedReader extends _sMtQFon {
}
interface limitedReader {
read(b: string|Array): number
@@ -13927,8 +14076,8 @@ namespace apis {
*/
(config: GzipConfig): (hook.Handler)
}
- type _sGCwxMx = http.ResponseWriter&io.Writer
- interface gzipResponseWriter extends _sGCwxMx {
+ type _scaWhIM = http.ResponseWriter&io.Writer
+ interface gzipResponseWriter extends _scaWhIM {
}
interface gzipResponseWriter {
writeHeader(code: number): void
@@ -13948,11 +14097,11 @@ namespace apis {
interface gzipResponseWriter {
unwrap(): http.ResponseWriter
}
- type _sXBRDsb = sync.RWMutex
- interface rateLimiter extends _sXBRDsb {
+ type _sjyibNu = sync.RWMutex
+ interface rateLimiter extends _sjyibNu {
}
- type _sxuiJKx = sync.Mutex
- interface fixedWindow extends _sxuiJKx {
+ type _sxhfghT = sync.Mutex
+ interface fixedWindow extends _sxhfghT {
}
interface realtimeSubscribeForm {
clientId: string
@@ -14193,8 +14342,8 @@ namespace pocketbase {
* It implements [CoreApp] via embedding and all of the app interface methods
* could be accessed directly through the instance (eg. PocketBase.DataDir()).
*/
- type _sQnaLxt = CoreApp
- interface PocketBase extends _sQnaLxt {
+ type _scTZvLO = CoreApp
+ interface PocketBase extends _scTZvLO {
/**
* RootCmd is the main console command
*/
@@ -14279,111 +14428,6 @@ namespace pocketbase {
}
}
-/**
- * Package template is a thin wrapper around the standard html/template
- * and text/template packages that implements a convenient registry to
- * load and cache templates on the fly concurrently.
- *
- * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
- *
- * Example:
- *
- * ```
- * registry := template.NewRegistry()
- *
- * html1, err := registry.LoadFiles(
- * // the files set wil be parsed only once and then cached
- * "layout.html",
- * "content.html",
- * ).Render(map[string]any{"name": "John"})
- *
- * html2, err := registry.LoadFiles(
- * // reuse the already parsed and cached files set
- * "layout.html",
- * "content.html",
- * ).Render(map[string]any{"name": "Jane"})
- * ```
- */
-namespace template {
- interface newRegistry {
- /**
- * NewRegistry creates and initializes a new templates registry with
- * some defaults (eg. global "raw" template function for unescaped HTML).
- *
- * Use the Registry.Load* methods to load templates into the registry.
- */
- (): (Registry)
- }
- /**
- * Registry defines a templates registry that is safe to be used by multiple goroutines.
- *
- * Use the Registry.Load* methods to load templates into the registry.
- */
- interface Registry {
- }
- interface Registry {
- /**
- * AddFuncs registers new global template functions.
- *
- * The key of each map entry is the function name that will be used in the templates.
- * If a function with the map entry name already exists it will be replaced with the new one.
- *
- * The value of each map entry is a function that must have either a
- * single return value, or two return values of which the second has type error.
- *
- * Example:
- *
- * ```
- * r.AddFuncs(map[string]any{
- * "toUpper": func(str string) string {
- * return strings.ToUppser(str)
- * },
- * ...
- * })
- * ```
- */
- addFuncs(funcs: _TygojaDict): (Registry)
- }
- interface Registry {
- /**
- * LoadFiles caches (if not already) the specified filenames set as a
- * single template and returns a ready to use Renderer instance.
- *
- * There must be at least 1 filename specified.
- */
- loadFiles(...filenames: string[]): (Renderer)
- }
- interface Registry {
- /**
- * LoadString caches (if not already) the specified inline string as a
- * single template and returns a ready to use Renderer instance.
- */
- loadString(text: string): (Renderer)
- }
- interface Registry {
- /**
- * LoadFS caches (if not already) the specified fs and globPatterns
- * pair as single template and returns a ready to use Renderer instance.
- *
- * There must be at least 1 file matching the provided globPattern(s)
- * (note that most file names serves as glob patterns matching themselves).
- */
- loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer)
- }
- /**
- * Renderer defines a single parsed template.
- */
- interface Renderer {
- }
- interface Renderer {
- /**
- * Render executes the template with the specified data as the dot object
- * and returns the result as plain string.
- */
- render(data: any): string
- }
-}
-
/**
* Package sync provides basic synchronization primitives such as mutual
* exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended
@@ -14531,169 +14575,6 @@ namespace sync {
}
}
-/**
- * Package io provides basic interfaces to I/O primitives.
- * Its primary job is to wrap existing implementations of such primitives,
- * such as those in package os, into shared public interfaces that
- * abstract the functionality, plus some other related primitives.
- *
- * Because these interfaces and primitives wrap lower-level operations with
- * various implementations, unless otherwise informed clients should not
- * assume they are safe for parallel execution.
- */
-namespace io {
- /**
- * Reader is the interface that wraps the basic Read method.
- *
- * Read reads up to len(p) bytes into p. It returns the number of bytes
- * read (0 <= n <= len(p)) and any error encountered. Even if Read
- * returns n < len(p), it may use all of p as scratch space during the call.
- * If some data is available but not len(p) bytes, Read conventionally
- * returns what is available instead of waiting for more.
- *
- * When Read encounters an error or end-of-file condition after
- * successfully reading n > 0 bytes, it returns the number of
- * bytes read. It may return the (non-nil) error from the same call
- * or return the error (and n == 0) from a subsequent call.
- * An instance of this general case is that a Reader returning
- * a non-zero number of bytes at the end of the input stream may
- * return either err == EOF or err == nil. The next Read should
- * return 0, EOF.
- *
- * Callers should always process the n > 0 bytes returned before
- * considering the error err. Doing so correctly handles I/O errors
- * that happen after reading some bytes and also both of the
- * allowed EOF behaviors.
- *
- * If len(p) == 0, Read should always return n == 0. It may return a
- * non-nil error if some error condition is known, such as EOF.
- *
- * Implementations of Read are discouraged from returning a
- * zero byte count with a nil error, except when len(p) == 0.
- * Callers should treat a return of 0 and nil as indicating that
- * nothing happened; in particular it does not indicate EOF.
- *
- * Implementations must not retain p.
- */
- interface Reader {
- [key:string]: any;
- read(p: string|Array): number
- }
- /**
- * Writer is the interface that wraps the basic Write method.
- *
- * Write writes len(p) bytes from p to the underlying data stream.
- * It returns the number of bytes written from p (0 <= n <= len(p))
- * and any error encountered that caused the write to stop early.
- * Write must return a non-nil error if it returns n < len(p).
- * Write must not modify the slice data, even temporarily.
- *
- * Implementations must not retain p.
- */
- interface Writer {
- [key:string]: any;
- write(p: string|Array): number
- }
- /**
- * ReadCloser is the interface that groups the basic Read and Close methods.
- */
- interface ReadCloser {
- [key:string]: any;
- }
- /**
- * ReadSeekCloser is the interface that groups the basic Read, Seek and Close
- * methods.
- */
- interface ReadSeekCloser {
- [key:string]: any;
- }
-}
-
-/**
- * Package bytes implements functions for the manipulation of byte slices.
- * It is analogous to the facilities of the [strings] package.
- */
-namespace bytes {
- /**
- * A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
- * [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
- * a byte slice.
- * Unlike a [Buffer], a Reader is read-only and supports seeking.
- * The zero value for Reader operates like a Reader of an empty slice.
- */
- interface Reader {
- }
- interface Reader {
- /**
- * Len returns the number of bytes of the unread portion of the
- * slice.
- */
- len(): number
- }
- interface Reader {
- /**
- * Size returns the original length of the underlying byte slice.
- * Size is the number of bytes available for reading via [Reader.ReadAt].
- * The result is unaffected by any method calls except [Reader.Reset].
- */
- size(): number
- }
- interface Reader {
- /**
- * Read implements the [io.Reader] interface.
- */
- read(b: string|Array): number
- }
- interface Reader {
- /**
- * ReadAt implements the [io.ReaderAt] interface.
- */
- readAt(b: string|Array, off: number): number
- }
- interface Reader {
- /**
- * ReadByte implements the [io.ByteReader] interface.
- */
- readByte(): number
- }
- interface Reader {
- /**
- * UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
- */
- unreadByte(): void
- }
- interface Reader {
- /**
- * ReadRune implements the [io.RuneReader] interface.
- */
- readRune(): [number, number]
- }
- interface Reader {
- /**
- * UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
- */
- unreadRune(): void
- }
- interface Reader {
- /**
- * Seek implements the [io.Seeker] interface.
- */
- seek(offset: number, whence: number): number
- }
- interface Reader {
- /**
- * WriteTo implements the [io.WriterTo] interface.
- */
- writeTo(w: io.Writer): number
- }
- interface Reader {
- /**
- * Reset resets the [Reader] to be reading from b.
- */
- reset(b: string|Array): void
- }
-}
-
/**
* Package syscall contains an interface to the low-level operating system
* primitives. The details vary depending on the underlying system, and
@@ -15428,68 +15309,246 @@ namespace time {
}
/**
- * Package fs defines basic interfaces to a file system.
- * A file system can be provided by the host operating system
- * but also by other packages.
+ * Package io provides basic interfaces to I/O primitives.
+ * Its primary job is to wrap existing implementations of such primitives,
+ * such as those in package os, into shared public interfaces that
+ * abstract the functionality, plus some other related primitives.
*
- * See the [testing/fstest] package for support with testing
- * implementations of file systems.
+ * Because these interfaces and primitives wrap lower-level operations with
+ * various implementations, unless otherwise informed clients should not
+ * assume they are safe for parallel execution.
*/
-namespace fs {
+namespace io {
/**
- * An FS provides access to a hierarchical file system.
+ * Reader is the interface that wraps the basic Read method.
*
- * The FS interface is the minimum implementation required of the file system.
- * A file system may implement additional interfaces,
- * such as [ReadFileFS], to provide additional or optimized functionality.
+ * Read reads up to len(p) bytes into p. It returns the number of bytes
+ * read (0 <= n <= len(p)) and any error encountered. Even if Read
+ * returns n < len(p), it may use all of p as scratch space during the call.
+ * If some data is available but not len(p) bytes, Read conventionally
+ * returns what is available instead of waiting for more.
*
- * [testing/fstest.TestFS] may be used to test implementations of an FS for
- * correctness.
+ * When Read encounters an error or end-of-file condition after
+ * successfully reading n > 0 bytes, it returns the number of
+ * bytes read. It may return the (non-nil) error from the same call
+ * or return the error (and n == 0) from a subsequent call.
+ * An instance of this general case is that a Reader returning
+ * a non-zero number of bytes at the end of the input stream may
+ * return either err == EOF or err == nil. The next Read should
+ * return 0, EOF.
+ *
+ * Callers should always process the n > 0 bytes returned before
+ * considering the error err. Doing so correctly handles I/O errors
+ * that happen after reading some bytes and also both of the
+ * allowed EOF behaviors.
+ *
+ * If len(p) == 0, Read should always return n == 0. It may return a
+ * non-nil error if some error condition is known, such as EOF.
+ *
+ * Implementations of Read are discouraged from returning a
+ * zero byte count with a nil error, except when len(p) == 0.
+ * Callers should treat a return of 0 and nil as indicating that
+ * nothing happened; in particular it does not indicate EOF.
+ *
+ * Implementations must not retain p.
*/
- interface FS {
+ interface Reader {
[key:string]: any;
- /**
- * Open opens the named file.
- *
- * When Open returns an error, it should be of type *PathError
- * with the Op field set to "open", the Path field set to name,
- * and the Err field describing the problem.
- *
- * Open should reject attempts to open names that do not satisfy
- * ValidPath(name), returning a *PathError with Err set to
- * ErrInvalid or ErrNotExist.
- */
- open(name: string): File
+ read(p: string|Array): number
}
/**
- * A File provides access to a single file.
- * The File interface is the minimum implementation required of the file.
- * Directory files should also implement [ReadDirFile].
- * A file may implement [io.ReaderAt] or [io.Seeker] as optimizations.
+ * Writer is the interface that wraps the basic Write method.
+ *
+ * Write writes len(p) bytes from p to the underlying data stream.
+ * It returns the number of bytes written from p (0 <= n <= len(p))
+ * and any error encountered that caused the write to stop early.
+ * Write must return a non-nil error if it returns n < len(p).
+ * Write must not modify the slice data, even temporarily.
+ *
+ * Implementations must not retain p.
*/
- interface File {
+ interface Writer {
[key:string]: any;
- stat(): FileInfo
- read(_arg0: string|Array): number
- close(): void
+ write(p: string|Array): number
}
/**
- * A DirEntry is an entry read from a directory
- * (using the [ReadDir] function or a [ReadDirFile]'s ReadDir method).
+ * ReadCloser is the interface that groups the basic Read and Close methods.
*/
- interface DirEntry {
+ interface ReadCloser {
+ [key:string]: any;
+ }
+ /**
+ * ReadSeekCloser is the interface that groups the basic Read, Seek and Close
+ * methods.
+ */
+ interface ReadSeekCloser {
+ [key:string]: any;
+ }
+}
+
+/**
+ * Package bytes implements functions for the manipulation of byte slices.
+ * It is analogous to the facilities of the [strings] package.
+ */
+namespace bytes {
+ /**
+ * A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
+ * [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
+ * a byte slice.
+ * Unlike a [Buffer], a Reader is read-only and supports seeking.
+ * The zero value for Reader operates like a Reader of an empty slice.
+ */
+ interface Reader {
+ }
+ interface Reader {
+ /**
+ * Len returns the number of bytes of the unread portion of the
+ * slice.
+ */
+ len(): number
+ }
+ interface Reader {
+ /**
+ * Size returns the original length of the underlying byte slice.
+ * Size is the number of bytes available for reading via [Reader.ReadAt].
+ * The result is unaffected by any method calls except [Reader.Reset].
+ */
+ size(): number
+ }
+ interface Reader {
+ /**
+ * Read implements the [io.Reader] interface.
+ */
+ read(b: string|Array): number
+ }
+ interface Reader {
+ /**
+ * ReadAt implements the [io.ReaderAt] interface.
+ */
+ readAt(b: string|Array, off: number): number
+ }
+ interface Reader {
+ /**
+ * ReadByte implements the [io.ByteReader] interface.
+ */
+ readByte(): number
+ }
+ interface Reader {
+ /**
+ * UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
+ */
+ unreadByte(): void
+ }
+ interface Reader {
+ /**
+ * ReadRune implements the [io.RuneReader] interface.
+ */
+ readRune(): [number, number]
+ }
+ interface Reader {
+ /**
+ * UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
+ */
+ unreadRune(): void
+ }
+ interface Reader {
+ /**
+ * Seek implements the [io.Seeker] interface.
+ */
+ seek(offset: number, whence: number): number
+ }
+ interface Reader {
+ /**
+ * WriteTo implements the [io.WriterTo] interface.
+ */
+ writeTo(w: io.Writer): number
+ }
+ interface Reader {
+ /**
+ * Reset resets the [Reader] to be reading from b.
+ */
+ reset(b: string|Array): void
+ }
+}
+
+/**
+ * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
+ * object, creating another object (Reader or Writer) that also implements
+ * the interface but provides buffering and some help for textual I/O.
+ */
+namespace bufio {
+ /**
+ * ReadWriter stores pointers to a [Reader] and a [Writer].
+ * It implements [io.ReadWriter].
+ */
+ type _sXxwmFy = Reader&Writer
+ interface ReadWriter extends _sXxwmFy {
+ }
+}
+
+/**
+ * Package fs defines basic interfaces to a file system.
+ * A file system can be provided by the host operating system
+ * but also by other packages.
+ *
+ * See the [testing/fstest] package for support with testing
+ * implementations of file systems.
+ */
+namespace fs {
+ /**
+ * An FS provides access to a hierarchical file system.
+ *
+ * The FS interface is the minimum implementation required of the file system.
+ * A file system may implement additional interfaces,
+ * such as [ReadFileFS], to provide additional or optimized functionality.
+ *
+ * [testing/fstest.TestFS] may be used to test implementations of an FS for
+ * correctness.
+ */
+ interface FS {
+ [key:string]: any;
+ /**
+ * Open opens the named file.
+ *
+ * When Open returns an error, it should be of type *PathError
+ * with the Op field set to "open", the Path field set to name,
+ * and the Err field describing the problem.
+ *
+ * Open should reject attempts to open names that do not satisfy
+ * ValidPath(name), returning a *PathError with Err set to
+ * ErrInvalid or ErrNotExist.
+ */
+ open(name: string): File
+ }
+ /**
+ * A File provides access to a single file.
+ * The File interface is the minimum implementation required of the file.
+ * Directory files should also implement [ReadDirFile].
+ * A file may implement [io.ReaderAt] or [io.Seeker] as optimizations.
+ */
+ interface File {
+ [key:string]: any;
+ stat(): FileInfo
+ read(_arg0: string|Array): number
+ close(): void
+ }
+ /**
+ * A DirEntry is an entry read from a directory
+ * (using the [ReadDir] function or a [ReadDirFile]'s ReadDir method).
+ */
+ interface DirEntry {
[key:string]: any;
/**
- * Name returns the name of the file (or subdirectory) described by the entry.
- * This name is only the final element of the path (the base name), not the entire path.
- * For example, Name would return "hello.go" not "home/gopher/hello.go".
- */
- name(): string
- /**
- * IsDir reports whether the entry describes a directory.
- */
- isDir(): boolean
- /**
+ * Name returns the name of the file (or subdirectory) described by the entry.
+ * This name is only the final element of the path (the base name), not the entire path.
+ * For example, Name would return "hello.go" not "home/gopher/hello.go".
+ */
+ name(): string
+ /**
+ * IsDir reports whether the entry describes a directory.
+ */
+ isDir(): boolean
+ /**
* Type returns the type bits for the entry.
* The type bits are a subset of the usual FileMode bits, those returned by the FileMode.Type method.
*/
@@ -15791,2369 +15850,1783 @@ namespace context {
}
/**
- * Package sql provides a generic interface around SQL (or SQL-like)
- * databases.
+ * Package net provides a portable interface for network I/O, including
+ * TCP/IP, UDP, domain name resolution, and Unix domain sockets.
*
- * The sql package must be used in conjunction with a database driver.
- * See https://golang.org/s/sqldrivers for a list of drivers.
+ * Although the package provides access to low-level networking
+ * primitives, most clients will need only the basic interface provided
+ * by the [Dial], [Listen], and Accept functions and the associated
+ * [Conn] and [Listener] interfaces. The crypto/tls package uses
+ * the same interfaces and similar Dial and Listen functions.
*
- * Drivers that do not support context cancellation will not return until
- * after the query is completed.
+ * The Dial function connects to a server:
*
- * For usage examples, see the wiki page at
- * https://golang.org/s/sqlwiki.
- */
-namespace sql {
- /**
- * TxOptions holds the transaction options to be used in [DB.BeginTx].
- */
- interface TxOptions {
- /**
- * Isolation is the transaction isolation level.
- * If zero, the driver or database's default level is used.
- */
- isolation: IsolationLevel
- readOnly: boolean
- }
- /**
- * NullString represents a string that may be null.
- * NullString implements the [Scanner] interface so
- * it can be used as a scan destination:
- *
- * ```
- * var s NullString
- * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
- * ...
- * if s.Valid {
- * // use s.String
- * } else {
- * // NULL value
- * }
- * ```
- */
- interface NullString {
- string: string
- valid: boolean // Valid is true if String is not NULL
- }
- interface NullString {
- /**
- * Scan implements the [Scanner] interface.
- */
- scan(value: any): void
- }
- interface NullString {
- /**
- * Value implements the [driver.Valuer] interface.
- */
- value(): any
- }
- /**
- * DB is a database handle representing a pool of zero or more
- * underlying connections. It's safe for concurrent use by multiple
- * goroutines.
- *
- * The sql package creates and frees connections automatically; it
- * also maintains a free pool of idle connections. If the database has
- * a concept of per-connection state, such state can be reliably observed
- * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the
- * returned [Tx] is bound to a single connection. Once [Tx.Commit] or
- * [Tx.Rollback] is called on the transaction, that transaction's
- * connection is returned to [DB]'s idle connection pool. The pool size
- * can be controlled with [DB.SetMaxIdleConns].
- */
- interface DB {
- }
- interface DB {
- /**
- * PingContext verifies a connection to the database is still alive,
- * establishing a connection if necessary.
- */
- pingContext(ctx: context.Context): void
- }
- interface DB {
- /**
- * Ping verifies a connection to the database is still alive,
- * establishing a connection if necessary.
- *
- * Ping uses [context.Background] internally; to specify the context, use
- * [DB.PingContext].
+ * ```
+ * conn, err := net.Dial("tcp", "golang.org:80")
+ * if err != nil {
+ * // handle error
+ * }
+ * fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n")
+ * status, err := bufio.NewReader(conn).ReadString('\n')
+ * // ...
+ * ```
+ *
+ * The Listen function creates servers:
+ *
+ * ```
+ * ln, err := net.Listen("tcp", ":8080")
+ * if err != nil {
+ * // handle error
+ * }
+ * for {
+ * conn, err := ln.Accept()
+ * if err != nil {
+ * // handle error
+ * }
+ * go handleConnection(conn)
+ * }
+ * ```
+ *
+ * # Name Resolution
+ *
+ * The method for resolving domain names, whether indirectly with functions like Dial
+ * or directly with functions like [LookupHost] and [LookupAddr], varies by operating system.
+ *
+ * On Unix systems, the resolver has two options for resolving names.
+ * It can use a pure Go resolver that sends DNS requests directly to the servers
+ * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C
+ * library routines such as getaddrinfo and getnameinfo.
+ *
+ * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS
+ * request consumes only a goroutine, while a blocked C call consumes an operating system thread.
+ * When cgo is available, the cgo-based resolver is used instead under a variety of
+ * conditions: on systems that do not let programs make direct DNS requests (OS X),
+ * when the LOCALDOMAIN environment variable is present (even if empty),
+ * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty,
+ * when the ASR_CONFIG environment variable is non-empty (OpenBSD only),
+ * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the
+ * Go resolver does not implement.
+ *
+ * On all systems (except Plan 9), when the cgo resolver is being used
+ * this package applies a concurrent cgo lookup limit to prevent the system
+ * from running out of system threads. Currently, it is limited to 500 concurrent lookups.
+ *
+ * The resolver decision can be overridden by setting the netdns value of the
+ * GODEBUG environment variable (see package runtime) to go or cgo, as in:
+ *
+ * ```
+ * export GODEBUG=netdns=go # force pure Go resolver
+ * export GODEBUG=netdns=cgo # force native resolver (cgo, win32)
+ * ```
+ *
+ * The decision can also be forced while building the Go source tree
+ * by setting the netgo or netcgo build tag.
+ *
+ * A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver
+ * to print debugging information about its decisions.
+ * To force a particular resolver while also printing debugging information,
+ * join the two settings by a plus sign, as in GODEBUG=netdns=go+1.
+ *
+ * The Go resolver will send an EDNS0 additional header with a DNS request,
+ * to signal a willingness to accept a larger DNS packet size.
+ * This can reportedly cause sporadic failures with the DNS server run
+ * by some modems and routers. Setting GODEBUG=netedns0=0 will disable
+ * sending the additional header.
+ *
+ * On macOS, if Go code that uses the net package is built with
+ * -buildmode=c-archive, linking the resulting archive into a C program
+ * requires passing -lresolv when linking the C code.
+ *
+ * On Plan 9, the resolver always accesses /net/cs and /net/dns.
+ *
+ * On Windows, in Go 1.18.x and earlier, the resolver always used C
+ * library functions, such as GetAddrInfo and DnsQuery.
+ */
+namespace net {
+ /**
+ * Conn is a generic stream-oriented network connection.
+ *
+ * Multiple goroutines may invoke methods on a Conn simultaneously.
+ */
+ interface Conn {
+ [key:string]: any;
+ /**
+ * Read reads data from the connection.
+ * Read can be made to time out and return an error after a fixed
+ * time limit; see SetDeadline and SetReadDeadline.
*/
- ping(): void
- }
- interface DB {
+ read(b: string|Array): number
/**
- * Close closes the database and prevents new queries from starting.
- * Close then waits for all queries that have started processing on the server
- * to finish.
- *
- * It is rare to Close a [DB], as the [DB] handle is meant to be
- * long-lived and shared between many goroutines.
+ * Write writes data to the connection.
+ * Write can be made to time out and return an error after a fixed
+ * time limit; see SetDeadline and SetWriteDeadline.
*/
- close(): void
- }
- interface DB {
+ write(b: string|Array): number
/**
- * SetMaxIdleConns sets the maximum number of connections in the idle
- * connection pool.
- *
- * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
- * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
- *
- * If n <= 0, no idle connections are retained.
- *
- * The default max idle connections is currently 2. This may change in
- * a future release.
+ * Close closes the connection.
+ * Any blocked Read or Write operations will be unblocked and return errors.
*/
- setMaxIdleConns(n: number): void
- }
- interface DB {
+ close(): void
/**
- * SetMaxOpenConns sets the maximum number of open connections to the database.
- *
- * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
- * MaxIdleConns, then MaxIdleConns will be reduced to match the new
- * MaxOpenConns limit.
- *
- * If n <= 0, then there is no limit on the number of open connections.
- * The default is 0 (unlimited).
+ * LocalAddr returns the local network address, if known.
*/
- setMaxOpenConns(n: number): void
- }
- interface DB {
+ localAddr(): Addr
/**
- * SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
- *
- * Expired connections may be closed lazily before reuse.
- *
- * If d <= 0, connections are not closed due to a connection's age.
+ * RemoteAddr returns the remote network address, if known.
*/
- setConnMaxLifetime(d: time.Duration): void
- }
- interface DB {
+ remoteAddr(): Addr
/**
- * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
+ * SetDeadline sets the read and write deadlines associated
+ * with the connection. It is equivalent to calling both
+ * SetReadDeadline and SetWriteDeadline.
*
- * Expired connections may be closed lazily before reuse.
+ * A deadline is an absolute time after which I/O operations
+ * fail instead of blocking. The deadline applies to all future
+ * and pending I/O, not just the immediately following call to
+ * Read or Write. After a deadline has been exceeded, the
+ * connection can be refreshed by setting a deadline in the future.
*
- * If d <= 0, connections are not closed due to a connection's idle time.
- */
- setConnMaxIdleTime(d: time.Duration): void
- }
- interface DB {
- /**
- * Stats returns database statistics.
- */
- stats(): DBStats
- }
- interface DB {
- /**
- * PrepareContext creates a prepared statement for later queries or executions.
- * Multiple queries or executions may be run concurrently from the
- * returned statement.
- * The caller must call the statement's [*Stmt.Close] method
- * when the statement is no longer needed.
+ * If the deadline is exceeded a call to Read or Write or to other
+ * I/O methods will return an error that wraps os.ErrDeadlineExceeded.
+ * This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
+ * The error's Timeout method will return true, but note that there
+ * are other possible errors for which the Timeout method will
+ * return true even if the deadline has not been exceeded.
*
- * The provided context is used for the preparation of the statement, not for the
- * execution of the statement.
+ * An idle timeout can be implemented by repeatedly extending
+ * the deadline after successful Read or Write calls.
+ *
+ * A zero value for t means I/O operations will not time out.
*/
- prepareContext(ctx: context.Context, query: string): (Stmt)
- }
- interface DB {
+ setDeadline(t: time.Time): void
/**
- * Prepare creates a prepared statement for later queries or executions.
- * Multiple queries or executions may be run concurrently from the
- * returned statement.
- * The caller must call the statement's [*Stmt.Close] method
- * when the statement is no longer needed.
- *
- * Prepare uses [context.Background] internally; to specify the context, use
- * [DB.PrepareContext].
+ * SetReadDeadline sets the deadline for future Read calls
+ * and any currently-blocked Read call.
+ * A zero value for t means Read will not time out.
*/
- prepare(query: string): (Stmt)
- }
- interface DB {
+ setReadDeadline(t: time.Time): void
/**
- * ExecContext executes a query without returning any rows.
- * The args are for any placeholder parameters in the query.
+ * SetWriteDeadline sets the deadline for future Write calls
+ * and any currently-blocked Write call.
+ * Even if write times out, it may return n > 0, indicating that
+ * some of the data was successfully written.
+ * A zero value for t means Write will not time out.
*/
- execContext(ctx: context.Context, query: string, ...args: any[]): Result
+ setWriteDeadline(t: time.Time): void
}
- interface DB {
+ /**
+ * A Listener is a generic network listener for stream-oriented protocols.
+ *
+ * Multiple goroutines may invoke methods on a Listener simultaneously.
+ */
+ interface Listener {
+ [key:string]: any;
/**
- * Exec executes a query without returning any rows.
- * The args are for any placeholder parameters in the query.
- *
- * Exec uses [context.Background] internally; to specify the context, use
- * [DB.ExecContext].
+ * Accept waits for and returns the next connection to the listener.
*/
- exec(query: string, ...args: any[]): Result
- }
- interface DB {
+ accept(): Conn
/**
- * QueryContext executes a query that returns rows, typically a SELECT.
- * The args are for any placeholder parameters in the query.
+ * Close closes the listener.
+ * Any blocked Accept operations will be unblocked and return errors.
*/
- queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
- }
- interface DB {
+ close(): void
/**
- * Query executes a query that returns rows, typically a SELECT.
- * The args are for any placeholder parameters in the query.
- *
- * Query uses [context.Background] internally; to specify the context, use
- * [DB.QueryContext].
+ * Addr returns the listener's network address.
*/
- query(query: string, ...args: any[]): (Rows)
+ addr(): Addr
}
- interface DB {
- /**
- * QueryRowContext executes a query that is expected to return at most one row.
- * QueryRowContext always returns a non-nil value. Errors are deferred until
- * [Row]'s Scan method is called.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, [*Row.Scan] scans the first selected row and discards
- * the rest.
- */
- queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+}
+
+/**
+ * Package multipart implements MIME multipart parsing, as defined in RFC
+ * 2046.
+ *
+ * The implementation is sufficient for HTTP (RFC 2388) and the multipart
+ * bodies generated by popular browsers.
+ *
+ * # Limits
+ *
+ * To protect against malicious inputs, this package sets limits on the size
+ * of the MIME data it processes.
+ *
+ * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a
+ * part to 10000 and [Reader.ReadForm] limits the total number of headers in all
+ * FileHeaders to 10000.
+ * These limits may be adjusted with the GODEBUG=multipartmaxheaders=
+ * setting.
+ *
+ * Reader.ReadForm further limits the number of parts in a form to 1000.
+ * This limit may be adjusted with the GODEBUG=multipartmaxparts=
+ * setting.
+ */
+namespace multipart {
+ /**
+ * A FileHeader describes a file part of a multipart request.
+ */
+ interface FileHeader {
+ filename: string
+ header: textproto.MIMEHeader
+ size: number
}
- interface DB {
+ interface FileHeader {
/**
- * QueryRow executes a query that is expected to return at most one row.
- * QueryRow always returns a non-nil value. Errors are deferred until
- * [Row]'s Scan method is called.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, [*Row.Scan] scans the first selected row and discards
- * the rest.
- *
- * QueryRow uses [context.Background] internally; to specify the context, use
- * [DB.QueryRowContext].
+ * Open opens and returns the [FileHeader]'s associated File.
*/
- queryRow(query: string, ...args: any[]): (Row)
+ open(): File
}
- interface DB {
+}
+
+namespace store {
+ /**
+ * Store defines a concurrent safe in memory key-value data store.
+ */
+ interface Store {
+ }
+ interface Store {
/**
- * BeginTx starts a transaction.
- *
- * The provided context is used until the transaction is committed or rolled back.
- * If the context is canceled, the sql package will roll back
- * the transaction. [Tx.Commit] will return an error if the context provided to
- * BeginTx is canceled.
- *
- * The provided [TxOptions] is optional and may be nil if defaults should be used.
- * If a non-default isolation level is used that the driver doesn't support,
- * an error will be returned.
+ * Reset clears the store and replaces the store data with a
+ * shallow copy of the provided newData.
*/
- beginTx(ctx: context.Context, opts: TxOptions): (Tx)
+ reset(newData: _TygojaDict): void
}
- interface DB {
+ interface Store {
/**
- * Begin starts a transaction. The default isolation level is dependent on
- * the driver.
- *
- * Begin uses [context.Background] internally; to specify the context, use
- * [DB.BeginTx].
+ * Length returns the current number of elements in the store.
*/
- begin(): (Tx)
+ length(): number
}
- interface DB {
+ interface Store {
/**
- * Driver returns the database's underlying driver.
+ * RemoveAll removes all the existing store entries.
*/
- driver(): any
+ removeAll(): void
}
- interface DB {
+ interface Store {
/**
- * Conn returns a single connection by either opening a new connection
- * or returning an existing connection from the connection pool. Conn will
- * block until either a connection is returned or ctx is canceled.
- * Queries run on the same Conn will be run in the same database session.
+ * Remove removes a single entry from the store.
*
- * Every Conn must be returned to the database pool after use by
- * calling [Conn.Close].
+ * Remove does nothing if key doesn't exist in the store.
*/
- conn(ctx: context.Context): (Conn)
+ remove(key: K): void
}
- /**
- * Tx is an in-progress database transaction.
- *
- * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback].
- *
- * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the
- * transaction fail with [ErrTxDone].
- *
- * The statements prepared for a transaction by calling
- * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed
- * by the call to [Tx.Commit] or [Tx.Rollback].
- */
- interface Tx {
+ interface Store {
+ /**
+ * Has checks if element with the specified key exist or not.
+ */
+ has(key: K): boolean
}
- interface Tx {
+ interface Store {
/**
- * Commit commits the transaction.
+ * Get returns a single element value from the store.
+ *
+ * If key is not set, the zero T value is returned.
*/
- commit(): void
+ get(key: K): T
}
- interface Tx {
+ interface Store {
/**
- * Rollback aborts the transaction.
+ * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not.
*/
- rollback(): void
+ getOk(key: K): [T, boolean]
}
- interface Tx {
+ interface Store {
/**
- * PrepareContext creates a prepared statement for use within a transaction.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * To use an existing prepared statement on this transaction, see [Tx.Stmt].
- *
- * The provided context will be used for the preparation of the context, not
- * for the execution of the returned statement. The returned statement
- * will run in the transaction context.
+ * GetAll returns a shallow copy of the current store data.
*/
- prepareContext(ctx: context.Context, query: string): (Stmt)
+ getAll(): _TygojaDict
}
- interface Tx {
+ interface Store {
/**
- * Prepare creates a prepared statement for use within a transaction.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * To use an existing prepared statement on this transaction, see [Tx.Stmt].
- *
- * Prepare uses [context.Background] internally; to specify the context, use
- * [Tx.PrepareContext].
+ * Values returns a slice with all of the current store values.
*/
- prepare(query: string): (Stmt)
+ values(): Array
}
- interface Tx {
+ interface Store {
/**
- * StmtContext returns a transaction-specific prepared statement from
- * an existing statement.
- *
- * Example:
- *
- * ```
- * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
- * ...
- * tx, err := db.Begin()
- * ...
- * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
- * ```
- *
- * The provided context is used for the preparation of the statement, not for the
- * execution of the statement.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
+ * Set sets (or overwrite if already exists) a new value for key.
*/
- stmtContext(ctx: context.Context, stmt: Stmt): (Stmt)
+ set(key: K, value: T): void
}
- interface Tx {
+ interface Store {
/**
- * Stmt returns a transaction-specific prepared statement from
- * an existing statement.
+ * SetFunc sets (or overwrite if already exists) a new value resolved
+ * from the function callback for the provided key.
+ *
+ * The function callback receives as argument the old store element value (if exists).
+ * If there is no old store element, the argument will be the T zero value.
*
* Example:
*
* ```
- * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
- * ...
- * tx, err := db.Begin()
- * ...
- * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
+ * s := store.New[string, int](nil)
+ * s.SetFunc("count", func(old int) int {
+ * return old + 1
+ * })
* ```
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * Stmt uses [context.Background] internally; to specify the context, use
- * [Tx.StmtContext].
*/
- stmt(stmt: Stmt): (Stmt)
+ setFunc(key: K, fn: (old: T) => T): void
}
- interface Tx {
+ interface Store {
/**
- * ExecContext executes a query that doesn't return rows.
- * For example: an INSERT and UPDATE.
+ * GetOrSet retrieves a single existing value for the provided key
+ * or stores a new one if it doesn't exist.
*/
- execContext(ctx: context.Context, query: string, ...args: any[]): Result
+ getOrSet(key: K, setFunc: () => T): T
}
- interface Tx {
+ interface Store {
/**
- * Exec executes a query that doesn't return rows.
- * For example: an INSERT and UPDATE.
+ * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key.
*
- * Exec uses [context.Background] internally; to specify the context, use
- * [Tx.ExecContext].
+ * This method is similar to Set() but **it will skip adding new elements**
+ * to the store if the store length has reached the specified limit.
+ * false is returned if maxAllowedElements limit is reached.
*/
- exec(query: string, ...args: any[]): Result
+ setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean
}
- interface Tx {
+ interface Store {
/**
- * QueryContext executes a query that returns rows, typically a SELECT.
+ * UnmarshalJSON implements [json.Unmarshaler] and imports the
+ * provided JSON data into the store.
+ *
+ * The store entries that match with the ones from the data will be overwritten with the new value.
*/
- queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
+ unmarshalJSON(data: string|Array): void
}
- interface Tx {
+ interface Store {
/**
- * Query executes a query that returns rows, typically a SELECT.
- *
- * Query uses [context.Background] internally; to specify the context, use
- * [Tx.QueryContext].
+ * MarshalJSON implements [json.Marshaler] and export the current
+ * store data into valid JSON.
*/
- query(query: string, ...args: any[]): (Rows)
+ marshalJSON(): string|Array
}
- interface Tx {
+}
+
+/**
+ * Package syntax parses regular expressions into parse trees and compiles
+ * parse trees into programs. Most clients of regular expressions will use the
+ * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package.
+ *
+ * # Syntax
+ *
+ * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows.
+ * Parts of the syntax can be disabled by passing alternate flags to [Parse].
+ *
+ * Single characters:
+ *
+ * ```
+ * . any character, possibly including newline (flag s=true)
+ * [xyz] character class
+ * [^xyz] negated character class
+ * \d Perl character class
+ * \D negated Perl character class
+ * [[:alpha:]] ASCII character class
+ * [[:^alpha:]] negated ASCII character class
+ * \pN Unicode character class (one-letter name)
+ * \p{Greek} Unicode character class
+ * \PN negated Unicode character class (one-letter name)
+ * \P{Greek} negated Unicode character class
+ * ```
+ *
+ * Composites:
+ *
+ * ```
+ * xy x followed by y
+ * x|y x or y (prefer x)
+ * ```
+ *
+ * Repetitions:
+ *
+ * ```
+ * x* zero or more x, prefer more
+ * x+ one or more x, prefer more
+ * x? zero or one x, prefer one
+ * x{n,m} n or n+1 or ... or m x, prefer more
+ * x{n,} n or more x, prefer more
+ * x{n} exactly n x
+ * x*? zero or more x, prefer fewer
+ * x+? one or more x, prefer fewer
+ * x?? zero or one x, prefer zero
+ * x{n,m}? n or n+1 or ... or m x, prefer fewer
+ * x{n,}? n or more x, prefer fewer
+ * x{n}? exactly n x
+ * ```
+ *
+ * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n}
+ * reject forms that create a minimum or maximum repetition count above 1000.
+ * Unlimited repetitions are not subject to this restriction.
+ *
+ * Grouping:
+ *
+ * ```
+ * (re) numbered capturing group (submatch)
+ * (?Pre) named & numbered capturing group (submatch)
+ * (?re) named & numbered capturing group (submatch)
+ * (?:re) non-capturing group
+ * (?flags) set flags within current group; non-capturing
+ * (?flags:re) set flags during re; non-capturing
+ *
+ * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are:
+ *
+ * i case-insensitive (default false)
+ * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false)
+ * s let . match \n (default false)
+ * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false)
+ * ```
+ *
+ * Empty strings:
+ *
+ * ```
+ * ^ at beginning of text or line (flag m=true)
+ * $ at end of text (like \z not \Z) or line (flag m=true)
+ * \A at beginning of text
+ * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other)
+ * \B not at ASCII word boundary
+ * \z at end of text
+ * ```
+ *
+ * Escape sequences:
+ *
+ * ```
+ * \a bell (== \007)
+ * \f form feed (== \014)
+ * \t horizontal tab (== \011)
+ * \n newline (== \012)
+ * \r carriage return (== \015)
+ * \v vertical tab character (== \013)
+ * \* literal *, for any punctuation character *
+ * \123 octal character code (up to three digits)
+ * \x7F hex character code (exactly two digits)
+ * \x{10FFFF} hex character code
+ * \Q...\E literal text ... even if ... has punctuation
+ * ```
+ *
+ * Character class elements:
+ *
+ * ```
+ * x single character
+ * A-Z character range (inclusive)
+ * \d Perl character class
+ * [:foo:] ASCII character class foo
+ * \p{Foo} Unicode character class Foo
+ * \pF Unicode character class F (one-letter name)
+ * ```
+ *
+ * Named character classes as character class elements:
+ *
+ * ```
+ * [\d] digits (== \d)
+ * [^\d] not digits (== \D)
+ * [\D] not digits (== \D)
+ * [^\D] not not digits (== \d)
+ * [[:name:]] named ASCII class inside character class (== [:name:])
+ * [^[:name:]] named ASCII class inside negated character class (== [:^name:])
+ * [\p{Name}] named Unicode property inside character class (== \p{Name})
+ * [^\p{Name}] named Unicode property inside negated character class (== \P{Name})
+ * ```
+ *
+ * Perl character classes (all ASCII-only):
+ *
+ * ```
+ * \d digits (== [0-9])
+ * \D not digits (== [^0-9])
+ * \s whitespace (== [\t\n\f\r ])
+ * \S not whitespace (== [^\t\n\f\r ])
+ * \w word characters (== [0-9A-Za-z_])
+ * \W not word characters (== [^0-9A-Za-z_])
+ * ```
+ *
+ * ASCII character classes:
+ *
+ * ```
+ * [[:alnum:]] alphanumeric (== [0-9A-Za-z])
+ * [[:alpha:]] alphabetic (== [A-Za-z])
+ * [[:ascii:]] ASCII (== [\x00-\x7F])
+ * [[:blank:]] blank (== [\t ])
+ * [[:cntrl:]] control (== [\x00-\x1F\x7F])
+ * [[:digit:]] digits (== [0-9])
+ * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])
+ * [[:lower:]] lower case (== [a-z])
+ * [[:print:]] printable (== [ -~] == [ [:graph:]])
+ * [[:punct:]] punctuation (== [!-/:-@[-`{-~])
+ * [[:space:]] whitespace (== [\t\n\v\f\r ])
+ * [[:upper:]] upper case (== [A-Z])
+ * [[:word:]] word characters (== [0-9A-Za-z_])
+ * [[:xdigit:]] hex digit (== [0-9A-Fa-f])
+ * ```
+ *
+ * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts].
+ */
+namespace syntax {
+ /**
+ * Flags control the behavior of the parser and record information about regexp context.
+ */
+ interface Flags extends Number{}
+}
+
+/**
+ * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+ *
+ * See README.md for more info.
+ */
+namespace jwt {
+ /**
+ * MapClaims is a claims type that uses the map[string]any for JSON
+ * decoding. This is the default claims type if you don't supply one
+ */
+ interface MapClaims extends _TygojaDict{}
+ interface MapClaims {
/**
- * QueryRowContext executes a query that is expected to return at most one row.
- * QueryRowContext always returns a non-nil value. Errors are deferred until
- * [Row]'s Scan method is called.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, the [*Row.Scan] scans the first selected row and discards
- * the rest.
+ * GetExpirationTime implements the Claims interface.
*/
- queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+ getExpirationTime(): (NumericDate)
}
- interface Tx {
+ interface MapClaims {
/**
- * QueryRow executes a query that is expected to return at most one row.
- * QueryRow always returns a non-nil value. Errors are deferred until
- * [Row]'s Scan method is called.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, the [*Row.Scan] scans the first selected row and discards
- * the rest.
- *
- * QueryRow uses [context.Background] internally; to specify the context, use
- * [Tx.QueryRowContext].
+ * GetNotBefore implements the Claims interface.
*/
- queryRow(query: string, ...args: any[]): (Row)
- }
- /**
- * Stmt is a prepared statement.
- * A Stmt is safe for concurrent use by multiple goroutines.
- *
- * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single
- * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will
- * become unusable and all operations will return an error.
- * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the
- * [DB]. When the Stmt needs to execute on a new underlying connection, it will
- * prepare itself on the new connection automatically.
- */
- interface Stmt {
+ getNotBefore(): (NumericDate)
}
- interface Stmt {
+ interface MapClaims {
/**
- * ExecContext executes a prepared statement with the given arguments and
- * returns a [Result] summarizing the effect of the statement.
+ * GetIssuedAt implements the Claims interface.
*/
- execContext(ctx: context.Context, ...args: any[]): Result
+ getIssuedAt(): (NumericDate)
}
- interface Stmt {
+ interface MapClaims {
/**
- * Exec executes a prepared statement with the given arguments and
- * returns a [Result] summarizing the effect of the statement.
- *
- * Exec uses [context.Background] internally; to specify the context, use
- * [Stmt.ExecContext].
+ * GetAudience implements the Claims interface.
*/
- exec(...args: any[]): Result
+ getAudience(): ClaimStrings
}
- interface Stmt {
+ interface MapClaims {
/**
- * QueryContext executes a prepared query statement with the given arguments
- * and returns the query results as a [*Rows].
+ * GetIssuer implements the Claims interface.
*/
- queryContext(ctx: context.Context, ...args: any[]): (Rows)
+ getIssuer(): string
}
- interface Stmt {
+ interface MapClaims {
/**
- * Query executes a prepared query statement with the given arguments
- * and returns the query results as a *Rows.
- *
- * Query uses [context.Background] internally; to specify the context, use
- * [Stmt.QueryContext].
+ * GetSubject implements the Claims interface.
*/
- query(...args: any[]): (Rows)
+ getSubject(): string
}
- interface Stmt {
+}
+
+namespace subscriptions {
+ /**
+ * Broker defines a struct for managing subscriptions clients.
+ */
+ interface Broker {
+ }
+ interface Broker {
/**
- * QueryRowContext executes a prepared query statement with the given arguments.
- * If an error occurs during the execution of the statement, that error will
- * be returned by a call to Scan on the returned [*Row], which is always non-nil.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, the [*Row.Scan] scans the first selected row and discards
- * the rest.
+ * Clients returns a shallow copy of all registered clients indexed
+ * with their connection id.
*/
- queryRowContext(ctx: context.Context, ...args: any[]): (Row)
+ clients(): _TygojaDict
}
- interface Stmt {
+ interface Broker {
/**
- * QueryRow executes a prepared query statement with the given arguments.
- * If an error occurs during the execution of the statement, that error will
- * be returned by a call to Scan on the returned [*Row], which is always non-nil.
- * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
- * Otherwise, the [*Row.Scan] scans the first selected row and discards
- * the rest.
- *
- * Example usage:
- *
- * ```
- * var name string
- * err := nameByUseridStmt.QueryRow(id).Scan(&name)
- * ```
- *
- * QueryRow uses [context.Background] internally; to specify the context, use
- * [Stmt.QueryRowContext].
+ * ChunkedClients splits the current clients into a chunked slice.
*/
- queryRow(...args: any[]): (Row)
+ chunkedClients(chunkSize: number): Array>
}
- interface Stmt {
+ interface Broker {
/**
- * Close closes the statement.
+ * TotalClients returns the total number of registered clients.
*/
- close(): void
- }
- /**
- * Rows is the result of a query. Its cursor starts before the first row
- * of the result set. Use [Rows.Next] to advance from row to row.
- */
- interface Rows {
+ totalClients(): number
}
- interface Rows {
+ interface Broker {
/**
- * Next prepares the next result row for reading with the [Rows.Scan] method. It
- * returns true on success, or false if there is no next result row or an error
- * happened while preparing it. [Rows.Err] should be consulted to distinguish between
- * the two cases.
+ * ClientById finds a registered client by its id.
*
- * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next].
+ * Returns non-nil error when client with clientId is not registered.
*/
- next(): boolean
+ clientById(clientId: string): Client
}
- interface Rows {
+ interface Broker {
/**
- * NextResultSet prepares the next result set for reading. It reports whether
- * there is further result sets, or false if there is no further result set
- * or if there is an error advancing to it. The [Rows.Err] method should be consulted
- * to distinguish between the two cases.
- *
- * After calling NextResultSet, the [Rows.Next] method should always be called before
- * scanning. If there are further result sets they may not have rows in the result
- * set.
+ * Register adds a new client to the broker instance.
*/
- nextResultSet(): boolean
+ register(client: Client): void
}
- interface Rows {
+ interface Broker {
/**
- * Err returns the error, if any, that was encountered during iteration.
- * Err may be called after an explicit or implicit [Rows.Close].
+ * Unregister removes a single client by its id and marks it as discarded.
+ *
+ * If client with clientId doesn't exist, this method does nothing.
*/
- err(): void
+ unregister(clientId: string): void
}
- interface Rows {
+ /**
+ * Client is an interface for a generic subscription client.
+ */
+ interface Client {
+ [key:string]: any;
/**
- * Columns returns the column names.
- * Columns returns an error if the rows are closed.
+ * Id Returns the unique id of the client.
*/
- columns(): Array
- }
- interface Rows {
+ id(): string
/**
- * ColumnTypes returns column information such as column type, length,
- * and nullable. Some information may not be available from some drivers.
+ * Channel returns the client's communication channel.
+ *
+ * NB! The channel shouldn't be used after calling Discard().
*/
- columnTypes(): Array<(ColumnType | undefined)>
- }
- interface Rows {
+ channel(): undefined
/**
- * Scan copies the columns in the current row into the values pointed
- * at by dest. The number of values in dest must be the same as the
- * number of columns in [Rows].
+ * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes.
+ * If no prefix is specified, returns all subscriptions.
+ */
+ subscriptions(...prefixes: string[]): _TygojaDict
+ /**
+ * Subscribe subscribes the client to the provided subscriptions list.
*
- * Scan converts columns read from the database into the following
- * common Go types and special types provided by the sql package:
+ * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter.
+ *
+ * Example:
*
* ```
- * *string
- * *[]byte
- * *int, *int8, *int16, *int32, *int64
- * *uint, *uint8, *uint16, *uint32, *uint64
- * *bool
- * *float32, *float64
- * *interface{}
- * *RawBytes
- * *Rows (cursor value)
- * any type implementing Scanner (see Scanner docs)
+ * Subscribe(
+ * "subscriptionA",
+ * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`,
+ * )
* ```
+ */
+ subscribe(...subs: string[]): void
+ /**
+ * Unsubscribe unsubscribes the client from the provided subscriptions list.
+ */
+ unsubscribe(...subs: string[]): void
+ /**
+ * HasSubscription checks if the client is subscribed to `sub`.
+ */
+ hasSubscription(sub: string): boolean
+ /**
+ * Set stores any value to the client's context.
+ */
+ set(key: string, value: any): void
+ /**
+ * Unset removes a single value from the client's context.
+ */
+ unset(key: string): void
+ /**
+ * Get retrieves the key value from the client's context.
+ */
+ get(key: string): any
+ /**
+ * Discard marks the client as "discarded" (and closes its channel),
+ * meaning that it shouldn't be used anymore for sending new messages.
*
- * In the most simple case, if the type of the value from the source
- * column is an integer, bool or string type T and dest is of type *T,
- * Scan simply assigns the value through the pointer.
- *
- * Scan also converts between string and numeric types, as long as no
- * information would be lost. While Scan stringifies all numbers
- * scanned from numeric database columns into *string, scans into
- * numeric types are checked for overflow. For example, a float64 with
- * value 300 or a string with value "300" can scan into a uint16, but
- * not into a uint8, though float64(255) or "255" can scan into a
- * uint8. One exception is that scans of some float64 numbers to
- * strings may lose information when stringifying. In general, scan
- * floating point columns into *float64.
- *
- * If a dest argument has type *[]byte, Scan saves in that argument a
- * copy of the corresponding data. The copy is owned by the caller and
- * can be modified and held indefinitely. The copy can be avoided by
- * using an argument of type [*RawBytes] instead; see the documentation
- * for [RawBytes] for restrictions on its use.
- *
- * If an argument has type *interface{}, Scan copies the value
- * provided by the underlying driver without conversion. When scanning
- * from a source value of type []byte to *interface{}, a copy of the
- * slice is made and the caller owns the result.
- *
- * Source values of type [time.Time] may be scanned into values of type
- * *time.Time, *interface{}, *string, or *[]byte. When converting to
- * the latter two, [time.RFC3339Nano] is used.
- *
- * Source values of type bool may be scanned into types *bool,
- * *interface{}, *string, *[]byte, or [*RawBytes].
- *
- * For scanning into *bool, the source may be true, false, 1, 0, or
- * string inputs parseable by [strconv.ParseBool].
- *
- * Scan can also convert a cursor returned from a query, such as
- * "select cursor(select * from my_table) from dual", into a
- * [*Rows] value that can itself be scanned from. The parent
- * select query will close any cursor [*Rows] if the parent [*Rows] is closed.
- *
- * If any of the first arguments implementing [Scanner] returns an error,
- * that error will be wrapped in the returned error.
+ * It is safe to call Discard() multiple times.
*/
- scan(...dest: any[]): void
- }
- interface Rows {
+ discard(): void
/**
- * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called
- * and returns false and there are no further result sets,
- * the [Rows] are closed automatically and it will suffice to check the
- * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err].
+ * IsDiscarded indicates whether the client has been "discarded"
+ * and should no longer be used.
*/
- close(): void
+ isDiscarded(): boolean
+ /**
+ * Send sends the specified message to the client's channel (if not discarded).
+ */
+ send(m: Message): void
}
/**
- * A Result summarizes an executed SQL command.
+ * Message defines a client's channel data.
*/
- interface Result {
- [key:string]: any;
- /**
- * LastInsertId returns the integer generated by the database
- * in response to a command. Typically this will be from an
- * "auto increment" column when inserting a new row. Not all
- * databases support this feature, and the syntax of such
- * statements varies.
- */
- lastInsertId(): number
+ interface Message {
+ name: string
+ data: string|Array
+ }
+ interface Message {
/**
- * RowsAffected returns the number of rows affected by an
- * update, insert, or delete. Not every database or database
- * driver may support this.
+ * WriteSSE writes the current message in a SSE format into the provided writer.
+ *
+ * For example, writing to a router.Event:
+ *
+ * ```
+ * m := Message{Name: "users/create", Data: []byte{...}}
+ * m.Write(e.Response, "yourEventId")
+ * e.Flush()
+ * ```
*/
- rowsAffected(): number
+ writeSSE(w: io.Writer, eventId: string): void
}
}
/**
- * Package syntax parses regular expressions into parse trees and compiles
- * parse trees into programs. Most clients of regular expressions will use the
- * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package.
- *
- * # Syntax
+ * Package sql provides a generic interface around SQL (or SQL-like)
+ * databases.
*
- * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows.
- * Parts of the syntax can be disabled by passing alternate flags to [Parse].
+ * The sql package must be used in conjunction with a database driver.
+ * See https://golang.org/s/sqldrivers for a list of drivers.
*
- * Single characters:
+ * Drivers that do not support context cancellation will not return until
+ * after the query is completed.
*
- * ```
- * . any character, possibly including newline (flag s=true)
- * [xyz] character class
- * [^xyz] negated character class
- * \d Perl character class
- * \D negated Perl character class
- * [[:alpha:]] ASCII character class
- * [[:^alpha:]] negated ASCII character class
- * \pN Unicode character class (one-letter name)
- * \p{Greek} Unicode character class
- * \PN negated Unicode character class (one-letter name)
- * \P{Greek} negated Unicode character class
- * ```
- *
- * Composites:
- *
- * ```
- * xy x followed by y
- * x|y x or y (prefer x)
- * ```
- *
- * Repetitions:
- *
- * ```
- * x* zero or more x, prefer more
- * x+ one or more x, prefer more
- * x? zero or one x, prefer one
- * x{n,m} n or n+1 or ... or m x, prefer more
- * x{n,} n or more x, prefer more
- * x{n} exactly n x
- * x*? zero or more x, prefer fewer
- * x+? one or more x, prefer fewer
- * x?? zero or one x, prefer zero
- * x{n,m}? n or n+1 or ... or m x, prefer fewer
- * x{n,}? n or more x, prefer fewer
- * x{n}? exactly n x
- * ```
- *
- * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n}
- * reject forms that create a minimum or maximum repetition count above 1000.
- * Unlimited repetitions are not subject to this restriction.
- *
- * Grouping:
- *
- * ```
- * (re) numbered capturing group (submatch)
- * (?Pre) named & numbered capturing group (submatch)
- * (?re) named & numbered capturing group (submatch)
- * (?:re) non-capturing group
- * (?flags) set flags within current group; non-capturing
- * (?flags:re) set flags during re; non-capturing
- *
- * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are:
- *
- * i case-insensitive (default false)
- * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false)
- * s let . match \n (default false)
- * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false)
- * ```
- *
- * Empty strings:
- *
- * ```
- * ^ at beginning of text or line (flag m=true)
- * $ at end of text (like \z not \Z) or line (flag m=true)
- * \A at beginning of text
- * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other)
- * \B not at ASCII word boundary
- * \z at end of text
- * ```
- *
- * Escape sequences:
- *
- * ```
- * \a bell (== \007)
- * \f form feed (== \014)
- * \t horizontal tab (== \011)
- * \n newline (== \012)
- * \r carriage return (== \015)
- * \v vertical tab character (== \013)
- * \* literal *, for any punctuation character *
- * \123 octal character code (up to three digits)
- * \x7F hex character code (exactly two digits)
- * \x{10FFFF} hex character code
- * \Q...\E literal text ... even if ... has punctuation
- * ```
- *
- * Character class elements:
- *
- * ```
- * x single character
- * A-Z character range (inclusive)
- * \d Perl character class
- * [:foo:] ASCII character class foo
- * \p{Foo} Unicode character class Foo
- * \pF Unicode character class F (one-letter name)
- * ```
- *
- * Named character classes as character class elements:
- *
- * ```
- * [\d] digits (== \d)
- * [^\d] not digits (== \D)
- * [\D] not digits (== \D)
- * [^\D] not not digits (== \d)
- * [[:name:]] named ASCII class inside character class (== [:name:])
- * [^[:name:]] named ASCII class inside negated character class (== [:^name:])
- * [\p{Name}] named Unicode property inside character class (== \p{Name})
- * [^\p{Name}] named Unicode property inside negated character class (== \P{Name})
- * ```
- *
- * Perl character classes (all ASCII-only):
- *
- * ```
- * \d digits (== [0-9])
- * \D not digits (== [^0-9])
- * \s whitespace (== [\t\n\f\r ])
- * \S not whitespace (== [^\t\n\f\r ])
- * \w word characters (== [0-9A-Za-z_])
- * \W not word characters (== [^0-9A-Za-z_])
- * ```
- *
- * ASCII character classes:
- *
- * ```
- * [[:alnum:]] alphanumeric (== [0-9A-Za-z])
- * [[:alpha:]] alphabetic (== [A-Za-z])
- * [[:ascii:]] ASCII (== [\x00-\x7F])
- * [[:blank:]] blank (== [\t ])
- * [[:cntrl:]] control (== [\x00-\x1F\x7F])
- * [[:digit:]] digits (== [0-9])
- * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])
- * [[:lower:]] lower case (== [a-z])
- * [[:print:]] printable (== [ -~] == [ [:graph:]])
- * [[:punct:]] punctuation (== [!-/:-@[-`{-~])
- * [[:space:]] whitespace (== [\t\n\v\f\r ])
- * [[:upper:]] upper case (== [A-Z])
- * [[:word:]] word characters (== [0-9A-Za-z_])
- * [[:xdigit:]] hex digit (== [0-9A-Fa-f])
- * ```
- *
- * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts].
+ * For usage examples, see the wiki page at
+ * https://golang.org/s/sqlwiki.
*/
-namespace syntax {
+namespace sql {
/**
- * Flags control the behavior of the parser and record information about regexp context.
+ * TxOptions holds the transaction options to be used in [DB.BeginTx].
*/
- interface Flags extends Number{}
-}
-
-namespace store {
+ interface TxOptions {
+ /**
+ * Isolation is the transaction isolation level.
+ * If zero, the driver or database's default level is used.
+ */
+ isolation: IsolationLevel
+ readOnly: boolean
+ }
/**
- * Store defines a concurrent safe in memory key-value data store.
+ * NullString represents a string that may be null.
+ * NullString implements the [Scanner] interface so
+ * it can be used as a scan destination:
+ *
+ * ```
+ * var s NullString
+ * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
+ * ...
+ * if s.Valid {
+ * // use s.String
+ * } else {
+ * // NULL value
+ * }
+ * ```
*/
- interface Store {
+ interface NullString {
+ string: string
+ valid: boolean // Valid is true if String is not NULL
}
- interface Store {
+ interface NullString {
/**
- * Reset clears the store and replaces the store data with a
- * shallow copy of the provided newData.
+ * Scan implements the [Scanner] interface.
*/
- reset(newData: _TygojaDict): void
+ scan(value: any): void
}
- interface Store {
+ interface NullString {
/**
- * Length returns the current number of elements in the store.
+ * Value implements the [driver.Valuer] interface.
*/
- length(): number
+ value(): any
}
- interface Store {
+ /**
+ * DB is a database handle representing a pool of zero or more
+ * underlying connections. It's safe for concurrent use by multiple
+ * goroutines.
+ *
+ * The sql package creates and frees connections automatically; it
+ * also maintains a free pool of idle connections. If the database has
+ * a concept of per-connection state, such state can be reliably observed
+ * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the
+ * returned [Tx] is bound to a single connection. Once [Tx.Commit] or
+ * [Tx.Rollback] is called on the transaction, that transaction's
+ * connection is returned to [DB]'s idle connection pool. The pool size
+ * can be controlled with [DB.SetMaxIdleConns].
+ */
+ interface DB {
+ }
+ interface DB {
/**
- * RemoveAll removes all the existing store entries.
+ * PingContext verifies a connection to the database is still alive,
+ * establishing a connection if necessary.
*/
- removeAll(): void
+ pingContext(ctx: context.Context): void
}
- interface Store {
+ interface DB {
/**
- * Remove removes a single entry from the store.
+ * Ping verifies a connection to the database is still alive,
+ * establishing a connection if necessary.
*
- * Remove does nothing if key doesn't exist in the store.
+ * Ping uses [context.Background] internally; to specify the context, use
+ * [DB.PingContext].
*/
- remove(key: K): void
+ ping(): void
}
- interface Store {
+ interface DB {
/**
- * Has checks if element with the specified key exist or not.
+ * Close closes the database and prevents new queries from starting.
+ * Close then waits for all queries that have started processing on the server
+ * to finish.
+ *
+ * It is rare to Close a [DB], as the [DB] handle is meant to be
+ * long-lived and shared between many goroutines.
*/
- has(key: K): boolean
+ close(): void
}
- interface Store {
+ interface DB {
/**
- * Get returns a single element value from the store.
+ * SetMaxIdleConns sets the maximum number of connections in the idle
+ * connection pool.
*
- * If key is not set, the zero T value is returned.
+ * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
+ * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
+ *
+ * If n <= 0, no idle connections are retained.
+ *
+ * The default max idle connections is currently 2. This may change in
+ * a future release.
*/
- get(key: K): T
+ setMaxIdleConns(n: number): void
}
- interface Store {
+ interface DB {
/**
- * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not.
- */
- getOk(key: K): [T, boolean]
- }
- interface Store {
- /**
- * GetAll returns a shallow copy of the current store data.
- */
- getAll(): _TygojaDict
- }
- interface Store {
- /**
- * Values returns a slice with all of the current store values.
+ * SetMaxOpenConns sets the maximum number of open connections to the database.
+ *
+ * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
+ * MaxIdleConns, then MaxIdleConns will be reduced to match the new
+ * MaxOpenConns limit.
+ *
+ * If n <= 0, then there is no limit on the number of open connections.
+ * The default is 0 (unlimited).
*/
- values(): Array
+ setMaxOpenConns(n: number): void
}
- interface Store {
+ interface DB {
/**
- * Set sets (or overwrite if already exists) a new value for key.
+ * SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
+ *
+ * Expired connections may be closed lazily before reuse.
+ *
+ * If d <= 0, connections are not closed due to a connection's age.
*/
- set(key: K, value: T): void
+ setConnMaxLifetime(d: time.Duration): void
}
- interface Store {
+ interface DB {
/**
- * SetFunc sets (or overwrite if already exists) a new value resolved
- * from the function callback for the provided key.
- *
- * The function callback receives as argument the old store element value (if exists).
- * If there is no old store element, the argument will be the T zero value.
+ * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
*
- * Example:
+ * Expired connections may be closed lazily before reuse.
*
- * ```
- * s := store.New[string, int](nil)
- * s.SetFunc("count", func(old int) int {
- * return old + 1
- * })
- * ```
+ * If d <= 0, connections are not closed due to a connection's idle time.
*/
- setFunc(key: K, fn: (old: T) => T): void
+ setConnMaxIdleTime(d: time.Duration): void
}
- interface Store {
+ interface DB {
/**
- * GetOrSet retrieves a single existing value for the provided key
- * or stores a new one if it doesn't exist.
+ * Stats returns database statistics.
*/
- getOrSet(key: K, setFunc: () => T): T
+ stats(): DBStats
}
- interface Store {
+ interface DB {
/**
- * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key.
+ * PrepareContext creates a prepared statement for later queries or executions.
+ * Multiple queries or executions may be run concurrently from the
+ * returned statement.
+ * The caller must call the statement's [*Stmt.Close] method
+ * when the statement is no longer needed.
*
- * This method is similar to Set() but **it will skip adding new elements**
- * to the store if the store length has reached the specified limit.
- * false is returned if maxAllowedElements limit is reached.
+ * The provided context is used for the preparation of the statement, not for the
+ * execution of the statement.
*/
- setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean
+ prepareContext(ctx: context.Context, query: string): (Stmt)
}
- interface Store {
+ interface DB {
/**
- * UnmarshalJSON implements [json.Unmarshaler] and imports the
- * provided JSON data into the store.
+ * Prepare creates a prepared statement for later queries or executions.
+ * Multiple queries or executions may be run concurrently from the
+ * returned statement.
+ * The caller must call the statement's [*Stmt.Close] method
+ * when the statement is no longer needed.
*
- * The store entries that match with the ones from the data will be overwritten with the new value.
+ * Prepare uses [context.Background] internally; to specify the context, use
+ * [DB.PrepareContext].
*/
- unmarshalJSON(data: string|Array): void
+ prepare(query: string): (Stmt)
}
- interface Store {
+ interface DB {
/**
- * MarshalJSON implements [json.Marshaler] and export the current
- * store data into valid JSON.
+ * ExecContext executes a query without returning any rows.
+ * The args are for any placeholder parameters in the query.
*/
- marshalJSON(): string|Array
+ execContext(ctx: context.Context, query: string, ...args: any[]): Result
}
-}
-
-/**
- * Package net provides a portable interface for network I/O, including
- * TCP/IP, UDP, domain name resolution, and Unix domain sockets.
- *
- * Although the package provides access to low-level networking
- * primitives, most clients will need only the basic interface provided
- * by the [Dial], [Listen], and Accept functions and the associated
- * [Conn] and [Listener] interfaces. The crypto/tls package uses
- * the same interfaces and similar Dial and Listen functions.
- *
- * The Dial function connects to a server:
- *
- * ```
- * conn, err := net.Dial("tcp", "golang.org:80")
- * if err != nil {
- * // handle error
- * }
- * fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n")
- * status, err := bufio.NewReader(conn).ReadString('\n')
- * // ...
- * ```
- *
- * The Listen function creates servers:
- *
- * ```
- * ln, err := net.Listen("tcp", ":8080")
- * if err != nil {
- * // handle error
- * }
- * for {
- * conn, err := ln.Accept()
- * if err != nil {
- * // handle error
- * }
- * go handleConnection(conn)
- * }
- * ```
- *
- * # Name Resolution
- *
- * The method for resolving domain names, whether indirectly with functions like Dial
- * or directly with functions like [LookupHost] and [LookupAddr], varies by operating system.
- *
- * On Unix systems, the resolver has two options for resolving names.
- * It can use a pure Go resolver that sends DNS requests directly to the servers
- * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C
- * library routines such as getaddrinfo and getnameinfo.
- *
- * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS
- * request consumes only a goroutine, while a blocked C call consumes an operating system thread.
- * When cgo is available, the cgo-based resolver is used instead under a variety of
- * conditions: on systems that do not let programs make direct DNS requests (OS X),
- * when the LOCALDOMAIN environment variable is present (even if empty),
- * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty,
- * when the ASR_CONFIG environment variable is non-empty (OpenBSD only),
- * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the
- * Go resolver does not implement.
- *
- * On all systems (except Plan 9), when the cgo resolver is being used
- * this package applies a concurrent cgo lookup limit to prevent the system
- * from running out of system threads. Currently, it is limited to 500 concurrent lookups.
- *
- * The resolver decision can be overridden by setting the netdns value of the
- * GODEBUG environment variable (see package runtime) to go or cgo, as in:
- *
- * ```
- * export GODEBUG=netdns=go # force pure Go resolver
- * export GODEBUG=netdns=cgo # force native resolver (cgo, win32)
- * ```
- *
- * The decision can also be forced while building the Go source tree
- * by setting the netgo or netcgo build tag.
- *
- * A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver
- * to print debugging information about its decisions.
- * To force a particular resolver while also printing debugging information,
- * join the two settings by a plus sign, as in GODEBUG=netdns=go+1.
- *
- * The Go resolver will send an EDNS0 additional header with a DNS request,
- * to signal a willingness to accept a larger DNS packet size.
- * This can reportedly cause sporadic failures with the DNS server run
- * by some modems and routers. Setting GODEBUG=netedns0=0 will disable
- * sending the additional header.
- *
- * On macOS, if Go code that uses the net package is built with
- * -buildmode=c-archive, linking the resulting archive into a C program
- * requires passing -lresolv when linking the C code.
- *
- * On Plan 9, the resolver always accesses /net/cs and /net/dns.
- *
- * On Windows, in Go 1.18.x and earlier, the resolver always used C
- * library functions, such as GetAddrInfo and DnsQuery.
- */
-namespace net {
- /**
- * Conn is a generic stream-oriented network connection.
- *
- * Multiple goroutines may invoke methods on a Conn simultaneously.
- */
- interface Conn {
- [key:string]: any;
+ interface DB {
/**
- * Read reads data from the connection.
- * Read can be made to time out and return an error after a fixed
- * time limit; see SetDeadline and SetReadDeadline.
+ * Exec executes a query without returning any rows.
+ * The args are for any placeholder parameters in the query.
+ *
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [DB.ExecContext].
*/
- read(b: string|Array): number
+ exec(query: string, ...args: any[]): Result
+ }
+ interface DB {
/**
- * Write writes data to the connection.
- * Write can be made to time out and return an error after a fixed
- * time limit; see SetDeadline and SetWriteDeadline.
+ * QueryContext executes a query that returns rows, typically a SELECT.
+ * The args are for any placeholder parameters in the query.
*/
- write(b: string|Array): number
+ queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
+ }
+ interface DB {
/**
- * Close closes the connection.
- * Any blocked Read or Write operations will be unblocked and return errors.
+ * Query executes a query that returns rows, typically a SELECT.
+ * The args are for any placeholder parameters in the query.
+ *
+ * Query uses [context.Background] internally; to specify the context, use
+ * [DB.QueryContext].
*/
- close(): void
+ query(query: string, ...args: any[]): (Rows)
+ }
+ interface DB {
/**
- * LocalAddr returns the local network address, if known.
+ * QueryRowContext executes a query that is expected to return at most one row.
+ * QueryRowContext always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- localAddr(): Addr
+ queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+ }
+ interface DB {
/**
- * RemoteAddr returns the remote network address, if known.
+ * QueryRow executes a query that is expected to return at most one row.
+ * QueryRow always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, [*Row.Scan] scans the first selected row and discards
+ * the rest.
+ *
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [DB.QueryRowContext].
*/
- remoteAddr(): Addr
+ queryRow(query: string, ...args: any[]): (Row)
+ }
+ interface DB {
/**
- * SetDeadline sets the read and write deadlines associated
- * with the connection. It is equivalent to calling both
- * SetReadDeadline and SetWriteDeadline.
- *
- * A deadline is an absolute time after which I/O operations
- * fail instead of blocking. The deadline applies to all future
- * and pending I/O, not just the immediately following call to
- * Read or Write. After a deadline has been exceeded, the
- * connection can be refreshed by setting a deadline in the future.
- *
- * If the deadline is exceeded a call to Read or Write or to other
- * I/O methods will return an error that wraps os.ErrDeadlineExceeded.
- * This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
- * The error's Timeout method will return true, but note that there
- * are other possible errors for which the Timeout method will
- * return true even if the deadline has not been exceeded.
+ * BeginTx starts a transaction.
*
- * An idle timeout can be implemented by repeatedly extending
- * the deadline after successful Read or Write calls.
+ * The provided context is used until the transaction is committed or rolled back.
+ * If the context is canceled, the sql package will roll back
+ * the transaction. [Tx.Commit] will return an error if the context provided to
+ * BeginTx is canceled.
*
- * A zero value for t means I/O operations will not time out.
- */
- setDeadline(t: time.Time): void
- /**
- * SetReadDeadline sets the deadline for future Read calls
- * and any currently-blocked Read call.
- * A zero value for t means Read will not time out.
- */
- setReadDeadline(t: time.Time): void
- /**
- * SetWriteDeadline sets the deadline for future Write calls
- * and any currently-blocked Write call.
- * Even if write times out, it may return n > 0, indicating that
- * some of the data was successfully written.
- * A zero value for t means Write will not time out.
- */
- setWriteDeadline(t: time.Time): void
- }
-}
-
-/**
- * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
- *
- * See README.md for more info.
- */
-namespace jwt {
- /**
- * MapClaims is a claims type that uses the map[string]interface{} for JSON
- * decoding. This is the default claims type if you don't supply one
- */
- interface MapClaims extends _TygojaDict{}
- interface MapClaims {
- /**
- * GetExpirationTime implements the Claims interface.
- */
- getExpirationTime(): (NumericDate)
- }
- interface MapClaims {
- /**
- * GetNotBefore implements the Claims interface.
- */
- getNotBefore(): (NumericDate)
- }
- interface MapClaims {
- /**
- * GetIssuedAt implements the Claims interface.
+ * The provided [TxOptions] is optional and may be nil if defaults should be used.
+ * If a non-default isolation level is used that the driver doesn't support,
+ * an error will be returned.
*/
- getIssuedAt(): (NumericDate)
+ beginTx(ctx: context.Context, opts: TxOptions): (Tx)
}
- interface MapClaims {
+ interface DB {
/**
- * GetAudience implements the Claims interface.
+ * Begin starts a transaction. The default isolation level is dependent on
+ * the driver.
+ *
+ * Begin uses [context.Background] internally; to specify the context, use
+ * [DB.BeginTx].
*/
- getAudience(): ClaimStrings
+ begin(): (Tx)
}
- interface MapClaims {
+ interface DB {
/**
- * GetIssuer implements the Claims interface.
+ * Driver returns the database's underlying driver.
*/
- getIssuer(): string
+ driver(): any
}
- interface MapClaims {
+ interface DB {
/**
- * GetSubject implements the Claims interface.
+ * Conn returns a single connection by either opening a new connection
+ * or returning an existing connection from the connection pool. Conn will
+ * block until either a connection is returned or ctx is canceled.
+ * Queries run on the same Conn will be run in the same database session.
+ *
+ * Every Conn must be returned to the database pool after use by
+ * calling [Conn.Close].
*/
- getSubject(): string
+ conn(ctx: context.Context): (Conn)
}
-}
-
-/**
- * Package types implements some commonly used db serializable types
- * like datetime, json, etc.
- */
-namespace types {
/**
- * DateTime represents a [time.Time] instance in UTC that is wrapped
- * and serialized using the app default date layout.
+ * Tx is an in-progress database transaction.
+ *
+ * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback].
+ *
+ * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the
+ * transaction fail with [ErrTxDone].
+ *
+ * The statements prepared for a transaction by calling
+ * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed
+ * by the call to [Tx.Commit] or [Tx.Rollback].
*/
- interface DateTime {
+ interface Tx {
}
- interface DateTime {
+ interface Tx {
/**
- * Time returns the internal [time.Time] instance.
+ * Commit commits the transaction.
*/
- time(): time.Time
+ commit(): void
}
- interface DateTime {
+ interface Tx {
/**
- * Add returns a new DateTime based on the current DateTime + the specified duration.
+ * Rollback aborts the transaction.
*/
- add(duration: time.Duration): DateTime
+ rollback(): void
}
- interface DateTime {
+ interface Tx {
/**
- * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one.
+ * PrepareContext creates a prepared statement for use within a transaction.
*
- * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration],
- * the maximum (or minimum) duration will be returned.
- */
- sub(u: DateTime): time.Duration
- }
- interface DateTime {
- /**
- * AddDate returns a new DateTime based on the current one + duration.
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
*
- * It follows the same rules as [time.AddDate].
+ * To use an existing prepared statement on this transaction, see [Tx.Stmt].
+ *
+ * The provided context will be used for the preparation of the context, not
+ * for the execution of the returned statement. The returned statement
+ * will run in the transaction context.
*/
- addDate(years: number, months: number, days: number): DateTime
+ prepareContext(ctx: context.Context, query: string): (Stmt)
}
- interface DateTime {
+ interface Tx {
/**
- * After reports whether the current DateTime instance is after u.
+ * Prepare creates a prepared statement for use within a transaction.
+ *
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
+ *
+ * To use an existing prepared statement on this transaction, see [Tx.Stmt].
+ *
+ * Prepare uses [context.Background] internally; to specify the context, use
+ * [Tx.PrepareContext].
*/
- after(u: DateTime): boolean
+ prepare(query: string): (Stmt)
}
- interface DateTime {
+ interface Tx {
/**
- * Before reports whether the current DateTime instance is before u.
+ * StmtContext returns a transaction-specific prepared statement from
+ * an existing statement.
+ *
+ * Example:
+ *
+ * ```
+ * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
+ * ...
+ * tx, err := db.Begin()
+ * ...
+ * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
+ * ```
+ *
+ * The provided context is used for the preparation of the statement, not for the
+ * execution of the statement.
+ *
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
*/
- before(u: DateTime): boolean
+ stmtContext(ctx: context.Context, stmt: Stmt): (Stmt)
}
- interface DateTime {
+ interface Tx {
/**
- * Compare compares the current DateTime instance with u.
- * If the current instance is before u, it returns -1.
- * If the current instance is after u, it returns +1.
- * If they're the same, it returns 0.
+ * Stmt returns a transaction-specific prepared statement from
+ * an existing statement.
+ *
+ * Example:
+ *
+ * ```
+ * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
+ * ...
+ * tx, err := db.Begin()
+ * ...
+ * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
+ * ```
+ *
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
+ *
+ * Stmt uses [context.Background] internally; to specify the context, use
+ * [Tx.StmtContext].
*/
- compare(u: DateTime): number
+ stmt(stmt: Stmt): (Stmt)
}
- interface DateTime {
+ interface Tx {
/**
- * Equal reports whether the current DateTime and u represent the same time instant.
- * Two DateTime can be equal even if they are in different locations.
- * For example, 6:00 +0200 and 4:00 UTC are Equal.
+ * ExecContext executes a query that doesn't return rows.
+ * For example: an INSERT and UPDATE.
*/
- equal(u: DateTime): boolean
+ execContext(ctx: context.Context, query: string, ...args: any[]): Result
}
- interface DateTime {
+ interface Tx {
/**
- * Unix returns the current DateTime as a Unix time, aka.
- * the number of seconds elapsed since January 1, 1970 UTC.
+ * Exec executes a query that doesn't return rows.
+ * For example: an INSERT and UPDATE.
+ *
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [Tx.ExecContext].
*/
- unix(): number
+ exec(query: string, ...args: any[]): Result
}
- interface DateTime {
+ interface Tx {
/**
- * IsZero checks whether the current DateTime instance has zero time value.
+ * QueryContext executes a query that returns rows, typically a SELECT.
*/
- isZero(): boolean
+ queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
}
- interface DateTime {
+ interface Tx {
/**
- * String serializes the current DateTime instance into a formatted
- * UTC date string.
+ * Query executes a query that returns rows, typically a SELECT.
*
- * The zero value is serialized to an empty string.
+ * Query uses [context.Background] internally; to specify the context, use
+ * [Tx.QueryContext].
*/
- string(): string
+ query(query: string, ...args: any[]): (Rows)
}
- interface DateTime {
+ interface Tx {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
- */
- marshalJSON(): string|Array
- }
- interface DateTime {
- /**
- * UnmarshalJSON implements the [json.Unmarshaler] interface.
- */
- unmarshalJSON(b: string|Array): void
- }
- interface DateTime {
- /**
- * Value implements the [driver.Valuer] interface.
+ * QueryRowContext executes a query that is expected to return at most one row.
+ * QueryRowContext always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- value(): any
+ queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
}
- interface DateTime {
+ interface Tx {
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current DateTime instance.
+ * QueryRow executes a query that is expected to return at most one row.
+ * QueryRow always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
+ *
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [Tx.QueryRowContext].
*/
- scan(value: any): void
+ queryRow(query: string, ...args: any[]): (Row)
}
/**
- * GeoPoint defines a struct for storing geo coordinates as serialized json object
- * (e.g. {lon:0,lat:0}).
+ * Stmt is a prepared statement.
+ * A Stmt is safe for concurrent use by multiple goroutines.
*
- * Note: using object notation and not a plain array to avoid the confusion
- * as there doesn't seem to be a fixed standard for the coordinates order.
+ * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single
+ * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will
+ * become unusable and all operations will return an error.
+ * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the
+ * [DB]. When the Stmt needs to execute on a new underlying connection, it will
+ * prepare itself on the new connection automatically.
*/
- interface GeoPoint {
- lon: number
- lat: number
- }
- interface GeoPoint {
- /**
- * String returns the string representation of the current GeoPoint instance.
- */
- string(): string
- }
- interface GeoPoint {
- /**
- * AsMap implements [core.mapExtractor] and returns a value suitable
- * to be used in an API rule expression.
- */
- asMap(): _TygojaDict
+ interface Stmt {
}
- interface GeoPoint {
+ interface Stmt {
/**
- * Value implements the [driver.Valuer] interface.
+ * ExecContext executes a prepared statement with the given arguments and
+ * returns a [Result] summarizing the effect of the statement.
*/
- value(): any
+ execContext(ctx: context.Context, ...args: any[]): Result
}
- interface GeoPoint {
+ interface Stmt {
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current GeoPoint instance.
+ * Exec executes a prepared statement with the given arguments and
+ * returns a [Result] summarizing the effect of the statement.
*
- * The value argument could be nil (no-op), another GeoPoint instance,
- * map or serialized json object with lat-lon props.
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [Stmt.ExecContext].
*/
- scan(value: any): void
+ exec(...args: any[]): Result
}
- /**
- * JSONArray defines a slice that is safe for json and db read/write.
- */
- interface JSONArray extends Array{}
- /**
- * JSONMap defines a map that is safe for json and db read/write.
- */
- interface JSONMap extends _TygojaDict{}
- /**
- * JSONRaw defines a json value type that is safe for db read/write.
- */
- interface JSONRaw extends Array{}
- interface JSONRaw {
+ interface Stmt {
/**
- * String returns the current JSONRaw instance as a json encoded string.
+ * QueryContext executes a prepared query statement with the given arguments
+ * and returns the query results as a [*Rows].
*/
- string(): string
+ queryContext(ctx: context.Context, ...args: any[]): (Rows)
}
- interface JSONRaw {
+ interface Stmt {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
+ * Query executes a prepared query statement with the given arguments
+ * and returns the query results as a *Rows.
+ *
+ * Query uses [context.Background] internally; to specify the context, use
+ * [Stmt.QueryContext].
*/
- marshalJSON(): string|Array
+ query(...args: any[]): (Rows)
}
- interface JSONRaw {
+ interface Stmt {
/**
- * UnmarshalJSON implements the [json.Unmarshaler] interface.
+ * QueryRowContext executes a prepared query statement with the given arguments.
+ * If an error occurs during the execution of the statement, that error will
+ * be returned by a call to Scan on the returned [*Row], which is always non-nil.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- unmarshalJSON(b: string|Array): void
+ queryRowContext(ctx: context.Context, ...args: any[]): (Row)
}
- interface JSONRaw {
+ interface Stmt {
/**
- * Value implements the [driver.Valuer] interface.
+ * QueryRow executes a prepared query statement with the given arguments.
+ * If an error occurs during the execution of the statement, that error will
+ * be returned by a call to Scan on the returned [*Row], which is always non-nil.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
+ *
+ * Example usage:
+ *
+ * ```
+ * var name string
+ * err := nameByUseridStmt.QueryRow(id).Scan(&name)
+ * ```
+ *
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [Stmt.QueryRowContext].
*/
- value(): any
+ queryRow(...args: any[]): (Row)
}
- interface JSONRaw {
+ interface Stmt {
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current JSONRaw instance.
+ * Close closes the statement.
*/
- scan(value: any): void
+ close(): void
}
-}
-
-namespace search {
/**
- * Result defines the returned search result structure.
+ * Rows is the result of a query. Its cursor starts before the first row
+ * of the result set. Use [Rows.Next] to advance from row to row.
*/
- interface Result {
- items: any
- page: number
- perPage: number
- totalItems: number
- totalPages: number
+ interface Rows {
}
- /**
- * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result.
- */
- interface ResolverResult {
+ interface Rows {
/**
- * Identifier is the plain SQL identifier/column that will be used
- * in the final db expression as left or right operand.
+ * Next prepares the next result row for reading with the [Rows.Scan] method. It
+ * returns true on success, or false if there is no next result row or an error
+ * happened while preparing it. [Rows.Err] should be consulted to distinguish between
+ * the two cases.
+ *
+ * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next].
*/
- identifier: string
+ next(): boolean
+ }
+ interface Rows {
/**
- * NoCoalesce instructs to not use COALESCE or NULL fallbacks
- * when building the identifier expression.
+ * NextResultSet prepares the next result set for reading. It reports whether
+ * there is further result sets, or false if there is no further result set
+ * or if there is an error advancing to it. The [Rows.Err] method should be consulted
+ * to distinguish between the two cases.
+ *
+ * After calling NextResultSet, the [Rows.Next] method should always be called before
+ * scanning. If there are further result sets they may not have rows in the result
+ * set.
*/
- noCoalesce: boolean
+ nextResultSet(): boolean
+ }
+ interface Rows {
/**
- * Params is a map with db placeholder->value pairs that will be added
- * to the query when building both resolved operands/sides in a single expression.
+ * Err returns the error, if any, that was encountered during iteration.
+ * Err may be called after an explicit or implicit [Rows.Close].
*/
- params: dbx.Params
+ err(): void
+ }
+ interface Rows {
/**
- * MultiMatchSubQuery is an optional sub query expression that will be added
- * in addition to the combined ResolverResult expression during build.
+ * Columns returns the column names.
+ * Columns returns an error if the rows are closed.
*/
- multiMatchSubQuery: dbx.Expression
+ columns(): Array
+ }
+ interface Rows {
/**
- * AfterBuild is an optional function that will be called after building
- * and combining the result of both resolved operands/sides in a single expression.
+ * ColumnTypes returns column information such as column type, length,
+ * and nullable. Some information may not be available from some drivers.
*/
- afterBuild: (expr: dbx.Expression) => dbx.Expression
+ columnTypes(): Array<(ColumnType | undefined)>
}
-}
-
-/**
- * Package slog provides structured logging,
- * in which log records include a message,
- * a severity level, and various other attributes
- * expressed as key-value pairs.
- *
- * It defines a type, [Logger],
- * which provides several methods (such as [Logger.Info] and [Logger.Error])
- * for reporting events of interest.
- *
- * Each Logger is associated with a [Handler].
- * A Logger output method creates a [Record] from the method arguments
- * and passes it to the Handler, which decides how to handle it.
- * There is a default Logger accessible through top-level functions
- * (such as [Info] and [Error]) that call the corresponding Logger methods.
- *
- * A log record consists of a time, a level, a message, and a set of key-value
- * pairs, where the keys are strings and the values may be of any type.
- * As an example,
- *
- * ```
- * slog.Info("hello", "count", 3)
- * ```
- *
- * creates a record containing the time of the call,
- * a level of Info, the message "hello", and a single
- * pair with key "count" and value 3.
- *
- * The [Info] top-level function calls the [Logger.Info] method on the default Logger.
- * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
- * Besides these convenience methods for common levels,
- * there is also a [Logger.Log] method which takes the level as an argument.
- * Each of these methods has a corresponding top-level function that uses the
- * default logger.
- *
- * The default handler formats the log record's message, time, level, and attributes
- * as a string and passes it to the [log] package.
- *
- * ```
- * 2022/11/08 15:28:26 INFO hello count=3
- * ```
- *
- * For more control over the output format, create a logger with a different handler.
- * This statement uses [New] to create a new logger with a [TextHandler]
- * that writes structured records in text form to standard error:
- *
- * ```
- * logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
- * ```
+ interface Rows {
+ /**
+ * Scan copies the columns in the current row into the values pointed
+ * at by dest. The number of values in dest must be the same as the
+ * number of columns in [Rows].
+ *
+ * Scan converts columns read from the database into the following
+ * common Go types and special types provided by the sql package:
+ *
+ * ```
+ * *string
+ * *[]byte
+ * *int, *int8, *int16, *int32, *int64
+ * *uint, *uint8, *uint16, *uint32, *uint64
+ * *bool
+ * *float32, *float64
+ * *interface{}
+ * *RawBytes
+ * *Rows (cursor value)
+ * any type implementing Scanner (see Scanner docs)
+ * ```
+ *
+ * In the most simple case, if the type of the value from the source
+ * column is an integer, bool or string type T and dest is of type *T,
+ * Scan simply assigns the value through the pointer.
+ *
+ * Scan also converts between string and numeric types, as long as no
+ * information would be lost. While Scan stringifies all numbers
+ * scanned from numeric database columns into *string, scans into
+ * numeric types are checked for overflow. For example, a float64 with
+ * value 300 or a string with value "300" can scan into a uint16, but
+ * not into a uint8, though float64(255) or "255" can scan into a
+ * uint8. One exception is that scans of some float64 numbers to
+ * strings may lose information when stringifying. In general, scan
+ * floating point columns into *float64.
+ *
+ * If a dest argument has type *[]byte, Scan saves in that argument a
+ * copy of the corresponding data. The copy is owned by the caller and
+ * can be modified and held indefinitely. The copy can be avoided by
+ * using an argument of type [*RawBytes] instead; see the documentation
+ * for [RawBytes] for restrictions on its use.
+ *
+ * If an argument has type *interface{}, Scan copies the value
+ * provided by the underlying driver without conversion. When scanning
+ * from a source value of type []byte to *interface{}, a copy of the
+ * slice is made and the caller owns the result.
+ *
+ * Source values of type [time.Time] may be scanned into values of type
+ * *time.Time, *interface{}, *string, or *[]byte. When converting to
+ * the latter two, [time.RFC3339Nano] is used.
+ *
+ * Source values of type bool may be scanned into types *bool,
+ * *interface{}, *string, *[]byte, or [*RawBytes].
+ *
+ * For scanning into *bool, the source may be true, false, 1, 0, or
+ * string inputs parseable by [strconv.ParseBool].
+ *
+ * Scan can also convert a cursor returned from a query, such as
+ * "select cursor(select * from my_table) from dual", into a
+ * [*Rows] value that can itself be scanned from. The parent
+ * select query will close any cursor [*Rows] if the parent [*Rows] is closed.
+ *
+ * If any of the first arguments implementing [Scanner] returns an error,
+ * that error will be wrapped in the returned error.
+ */
+ scan(...dest: any[]): void
+ }
+ interface Rows {
+ /**
+ * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called
+ * and returns false and there are no further result sets,
+ * the [Rows] are closed automatically and it will suffice to check the
+ * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err].
+ */
+ close(): void
+ }
+ /**
+ * A Result summarizes an executed SQL command.
+ */
+ interface Result {
+ [key:string]: any;
+ /**
+ * LastInsertId returns the integer generated by the database
+ * in response to a command. Typically this will be from an
+ * "auto increment" column when inserting a new row. Not all
+ * databases support this feature, and the syntax of such
+ * statements varies.
+ */
+ lastInsertId(): number
+ /**
+ * RowsAffected returns the number of rows affected by an
+ * update, insert, or delete. Not every database or database
+ * driver may support this.
+ */
+ rowsAffected(): number
+ }
+}
+
+/**
+ * Package http provides HTTP client and server implementations.
*
- * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously
- * parsed by machine. This statement:
+ * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests:
*
* ```
- * logger.Info("hello", "count", 3)
+ * resp, err := http.Get("http://example.com/")
+ * ...
+ * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
+ * ...
+ * resp, err := http.PostForm("http://example.com/form",
+ * url.Values{"key": {"Value"}, "id": {"123"}})
* ```
*
- * produces this output:
+ * The caller must close the response body when finished with it:
*
* ```
- * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
+ * resp, err := http.Get("http://example.com/")
+ * if err != nil {
+ * // handle error
+ * }
+ * defer resp.Body.Close()
+ * body, err := io.ReadAll(resp.Body)
+ * // ...
* ```
*
- * The package also provides [JSONHandler], whose output is line-delimited JSON:
- *
- * ```
- * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
- * logger.Info("hello", "count", 3)
- * ```
+ * # Clients and Transports
*
- * produces this output:
+ * For control over HTTP client headers, redirect policy, and other
+ * settings, create a [Client]:
*
* ```
- * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
- * ```
- *
- * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
- * There are options for setting the minimum level (see Levels, below),
- * displaying the source file and line of the log call, and
- * modifying attributes before they are logged.
+ * client := &http.Client{
+ * CheckRedirect: redirectPolicyFunc,
+ * }
*
- * Setting a logger as the default with
+ * resp, err := client.Get("http://example.com")
+ * // ...
*
+ * req, err := http.NewRequest("GET", "http://example.com", nil)
+ * // ...
+ * req.Header.Add("If-None-Match", `W/"wyzzy"`)
+ * resp, err := client.Do(req)
+ * // ...
* ```
- * slog.SetDefault(logger)
- * ```
- *
- * will cause the top-level functions like [Info] to use it.
- * [SetDefault] also updates the default logger used by the [log] package,
- * so that existing applications that use [log.Printf] and related functions
- * will send log records to the logger's handler without needing to be rewritten.
*
- * Some attributes are common to many log calls.
- * For example, you may wish to include the URL or trace identifier of a server request
- * with all log events arising from the request.
- * Rather than repeat the attribute with every log call, you can use [Logger.With]
- * to construct a new Logger containing the attributes:
+ * For control over proxies, TLS configuration, keep-alives,
+ * compression, and other settings, create a [Transport]:
*
* ```
- * logger2 := logger.With("url", r.URL)
+ * tr := &http.Transport{
+ * MaxIdleConns: 10,
+ * IdleConnTimeout: 30 * time.Second,
+ * DisableCompression: true,
+ * }
+ * client := &http.Client{Transport: tr}
+ * resp, err := client.Get("https://example.com")
* ```
*
- * The arguments to With are the same key-value pairs used in [Logger.Info].
- * The result is a new Logger with the same handler as the original, but additional
- * attributes that will appear in the output of every call.
- *
- * # Levels
- *
- * A [Level] is an integer representing the importance or severity of a log event.
- * The higher the level, the more severe the event.
- * This package defines constants for the most common levels,
- * but any int can be used as a level.
+ * Clients and Transports are safe for concurrent use by multiple
+ * goroutines and for efficiency should only be created once and re-used.
*
- * In an application, you may wish to log messages only at a certain level or greater.
- * One common configuration is to log messages at Info or higher levels,
- * suppressing debug logging until it is needed.
- * The built-in handlers can be configured with the minimum level to output by
- * setting [HandlerOptions.Level].
- * The program's `main` function typically does this.
- * The default value is LevelInfo.
+ * # Servers
*
- * Setting the [HandlerOptions.Level] field to a [Level] value
- * fixes the handler's minimum level throughout its lifetime.
- * Setting it to a [LevelVar] allows the level to be varied dynamically.
- * A LevelVar holds a Level and is safe to read or write from multiple
- * goroutines.
- * To vary the level dynamically for an entire program, first initialize
- * a global LevelVar:
+ * ListenAndServe starts an HTTP server with a given address and handler.
+ * The handler is usually nil, which means to use [DefaultServeMux].
+ * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]:
*
* ```
- * var programLevel = new(slog.LevelVar) // Info by default
- * ```
+ * http.Handle("/foo", fooHandler)
*
- * Then use the LevelVar to construct a handler, and make it the default:
+ * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
+ * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
+ * })
*
- * ```
- * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
- * slog.SetDefault(slog.New(h))
+ * log.Fatal(http.ListenAndServe(":8080", nil))
* ```
*
- * Now the program can change its logging level with a single statement:
+ * More control over the server's behavior is available by creating a
+ * custom Server:
*
* ```
- * programLevel.Set(slog.LevelDebug)
+ * s := &http.Server{
+ * Addr: ":8080",
+ * Handler: myHandler,
+ * ReadTimeout: 10 * time.Second,
+ * WriteTimeout: 10 * time.Second,
+ * MaxHeaderBytes: 1 << 20,
+ * }
+ * log.Fatal(s.ListenAndServe())
* ```
*
- * # Groups
- *
- * Attributes can be collected into groups.
- * A group has a name that is used to qualify the names of its attributes.
- * How this qualification is displayed depends on the handler.
- * [TextHandler] separates the group and attribute names with a dot.
- * [JSONHandler] treats each group as a separate JSON object, with the group name as the key.
+ * # HTTP/2
*
- * Use [Group] to create a Group attribute from a name and a list of key-value pairs:
+ * Starting with Go 1.6, the http package has transparent support for the
+ * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
+ * can do so by setting [Transport.TLSNextProto] (for clients) or
+ * [Server.TLSNextProto] (for servers) to a non-nil, empty
+ * map. Alternatively, the following GODEBUG settings are
+ * currently supported:
*
* ```
- * slog.Group("request",
- * "method", r.Method,
- * "url", r.URL)
+ * GODEBUG=http2client=0 # disable HTTP/2 client support
+ * GODEBUG=http2server=0 # disable HTTP/2 server support
+ * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
+ * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
* ```
*
- * TextHandler would display this group as
- *
- * ```
- * request.method=GET request.url=http://example.com
- * ```
- *
- * JSONHandler would display it as
- *
- * ```
- * "request":{"method":"GET","url":"http://example.com"}
- * ```
- *
- * Use [Logger.WithGroup] to qualify all of a Logger's output
- * with a group name. Calling WithGroup on a Logger results in a
- * new Logger with the same Handler as the original, but with all
- * its attributes qualified by the group name.
- *
- * This can help prevent duplicate attribute keys in large systems,
- * where subsystems might use the same keys.
- * Pass each subsystem a different Logger with its own group name so that
- * potential duplicates are qualified:
- *
- * ```
- * logger := slog.Default().With("id", systemID)
- * parserLogger := logger.WithGroup("parser")
- * parseInput(input, parserLogger)
- * ```
- *
- * When parseInput logs with parserLogger, its keys will be qualified with "parser",
- * so even if it uses the common key "id", the log line will have distinct keys.
- *
- * # Contexts
- *
- * Some handlers may wish to include information from the [context.Context] that is
- * available at the call site. One example of such information
- * is the identifier for the current span when tracing is enabled.
- *
- * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
- * argument, as do their corresponding top-level functions.
- *
- * Although the convenience methods on Logger (Info and so on) and the
- * corresponding top-level functions do not take a context, the alternatives ending
- * in "Context" do. For example,
- *
- * ```
- * slog.InfoContext(ctx, "message")
- * ```
- *
- * It is recommended to pass a context to an output method if one is available.
- *
- * # Attrs and Values
- *
- * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
- * alternating keys and values. The statement
- *
- * ```
- * slog.Info("hello", slog.Int("count", 3))
- * ```
- *
- * behaves the same as
- *
- * ```
- * slog.Info("hello", "count", 3)
- * ```
- *
- * There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
- * for common types, as well as the function [Any] for constructing Attrs of any
- * type.
- *
- * The value part of an Attr is a type called [Value].
- * Like an [any], a Value can hold any Go value,
- * but it can represent typical values, including all numbers and strings,
- * without an allocation.
- *
- * For the most efficient log output, use [Logger.LogAttrs].
- * It is similar to [Logger.Log] but accepts only Attrs, not alternating
- * keys and values; this allows it, too, to avoid allocation.
- *
- * The call
- *
- * ```
- * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3))
- * ```
- *
- * is the most efficient way to achieve the same output as
- *
- * ```
- * slog.InfoContext(ctx, "hello", "count", 3)
- * ```
- *
- * # Customizing a type's logging behavior
- *
- * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
- * method is used for logging. You can use this to control how values of the type
- * appear in logs. For example, you can redact secret information like passwords,
- * or gather a struct's fields in a Group. See the examples under [LogValuer] for
- * details.
- *
- * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
- * method handles these cases carefully, avoiding infinite loops and unbounded recursion.
- * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly.
- *
- * # Wrapping output methods
- *
- * The logger functions use reflection over the call stack to find the file name
- * and line number of the logging call within the application. This can produce
- * incorrect source information for functions that wrap slog. For instance, if you
- * define this function in file mylog.go:
- *
- * ```
- * func Infof(logger *slog.Logger, format string, args ...any) {
- * logger.Info(fmt.Sprintf(format, args...))
- * }
- * ```
- *
- * and you call it like this in main.go:
- *
- * ```
- * Infof(slog.Default(), "hello, %s", "world")
- * ```
- *
- * then slog will report the source file as mylog.go, not main.go.
- *
- * A correct implementation of Infof will obtain the source location
- * (pc) and pass it to NewRecord.
- * The Infof function in the package-level example called "wrapping"
- * demonstrates how to do this.
- *
- * # Working with Records
- *
- * Sometimes a Handler will need to modify a Record
- * before passing it on to another Handler or backend.
- * A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
- * and hidden fields that refer to state (such as attributes) indirectly. This
- * means that modifying a simple copy of a Record (e.g. by calling
- * [Record.Add] or [Record.AddAttrs] to add attributes)
- * may have unexpected effects on the original.
- * Before modifying a Record, use [Record.Clone] to
- * create a copy that shares no state with the original,
- * or create a new Record with [NewRecord]
- * and build up its Attrs by traversing the old ones with [Record.Attrs].
- *
- * # Performance considerations
- *
- * If profiling your application demonstrates that logging is taking significant time,
- * the following suggestions may help.
- *
- * If many log lines have a common attribute, use [Logger.With] to create a Logger with
- * that attribute. The built-in handlers will format that attribute only once, at the
- * call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
- * and a well-written Handler should take advantage of it.
- *
- * The arguments to a log call are always evaluated, even if the log event is discarded.
- * If possible, defer computation so that it happens only if the value is actually logged.
- * For example, consider the call
- *
- * ```
- * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
- * ```
- *
- * The URL.String method will be called even if the logger discards Info-level events.
- * Instead, pass the URL directly:
- *
- * ```
- * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
- * ```
- *
- * The built-in [TextHandler] will call its String method, but only
- * if the log event is enabled.
- * Avoiding the call to String also preserves the structure of the underlying value.
- * For example [JSONHandler] emits the components of the parsed URL as a JSON object.
- * If you want to avoid eagerly paying the cost of the String call
- * without causing the handler to potentially inspect the structure of the value,
- * wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
- *
- * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
- * calls. Say you need to log some expensive value:
- *
- * ```
- * slog.Debug("frobbing", "value", computeExpensiveValue(arg))
- * ```
- *
- * Even if this line is disabled, computeExpensiveValue will be called.
- * To avoid that, define a type implementing LogValuer:
- *
- * ```
- * type expensive struct { arg int }
- *
- * func (e expensive) LogValue() slog.Value {
- * return slog.AnyValue(computeExpensiveValue(e.arg))
- * }
- * ```
- *
- * Then use a value of that type in log calls:
- *
- * ```
- * slog.Debug("frobbing", "value", expensive{arg})
- * ```
- *
- * Now computeExpensiveValue will only be called when the line is enabled.
- *
- * The built-in handlers acquire a lock before calling [io.Writer.Write]
- * to ensure that exactly one [Record] is written at a time in its entirety.
- * Although each log record has a timestamp,
- * the built-in handlers do not use that time to sort the written records.
- * User-defined handlers are responsible for their own locking and sorting.
- *
- * # Writing a handler
+ * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
*
- * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide.
+ * The http package's [Transport] and [Server] both automatically enable
+ * HTTP/2 support for simple configurations. To enable HTTP/2 for more
+ * complex configurations, to use lower-level HTTP/2 features, or to use
+ * a newer version of Go's http2 package, import "golang.org/x/net/http2"
+ * directly and use its ConfigureTransport and/or ConfigureServer
+ * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
+ * package takes precedence over the net/http package's built-in HTTP/2
+ * support.
*/
-namespace slog {
+namespace http {
// @ts-ignore
- import loginternal = internal
+ import mathrand = rand
/**
- * A Logger records structured information about each call to its
- * Log, Debug, Info, Warn, and Error methods.
- * For each call, it creates a [Record] and passes it to a [Handler].
- *
- * To create a new Logger, call [New] or a Logger method
- * that begins "With".
+ * PushOptions describes options for [Pusher.Push].
*/
- interface Logger {
- }
- interface Logger {
+ interface PushOptions {
/**
- * Handler returns l's Handler.
+ * Method specifies the HTTP method for the promised request.
+ * If set, it must be "GET" or "HEAD". Empty means "GET".
*/
- handler(): Handler
- }
- interface Logger {
+ method: string
/**
- * With returns a Logger that includes the given attributes
- * in each output operation. Arguments are converted to
- * attributes as if by [Logger.Log].
+ * Header specifies additional promised request headers. This cannot
+ * include HTTP/2 pseudo header fields like ":path" and ":scheme",
+ * which will be added automatically.
*/
- with(...args: any[]): (Logger)
+ header: Header
}
- interface Logger {
- /**
- * WithGroup returns a Logger that starts a group, if name is non-empty.
- * The keys of all attributes added to the Logger will be qualified by the given
- * name. (How that qualification happens depends on the [Handler.WithGroup]
- * method of the Logger's Handler.)
- *
- * If name is empty, WithGroup returns the receiver.
+ // @ts-ignore
+ import urlpkg = url
+ /**
+ * A Request represents an HTTP request received by a server
+ * or to be sent by a client.
+ *
+ * The field semantics differ slightly between client and server
+ * usage. In addition to the notes on the fields below, see the
+ * documentation for [Request.Write] and [RoundTripper].
+ */
+ interface Request {
+ /**
+ * Method specifies the HTTP method (GET, POST, PUT, etc.).
+ * For client requests, an empty string means GET.
*/
- withGroup(name: string): (Logger)
- }
- interface Logger {
+ method: string
/**
- * Enabled reports whether l emits log records at the given context and level.
+ * URL specifies either the URI being requested (for server
+ * requests) or the URL to access (for client requests).
+ *
+ * For server requests, the URL is parsed from the URI
+ * supplied on the Request-Line as stored in RequestURI. For
+ * most requests, fields other than Path and RawQuery will be
+ * empty. (See RFC 7230, Section 5.3)
+ *
+ * For client requests, the URL's Host specifies the server to
+ * connect to, while the Request's Host field optionally
+ * specifies the Host header value to send in the HTTP
+ * request.
*/
- enabled(ctx: context.Context, level: Level): boolean
- }
- interface Logger {
+ url?: url.URL
/**
- * Log emits a log record with the current time and the given level and message.
- * The Record's Attrs consist of the Logger's attributes followed by
- * the Attrs specified by args.
+ * The protocol version for incoming server requests.
+ *
+ * For client requests, these fields are ignored. The HTTP
+ * client code always uses either HTTP/1.1 or HTTP/2.
+ * See the docs on Transport for details.
+ */
+ proto: string // "HTTP/1.0"
+ protoMajor: number // 1
+ protoMinor: number // 0
+ /**
+ * Header contains the request header fields either received
+ * by the server or to be sent by the client.
+ *
+ * If a server received a request with header lines,
*
- * The attribute arguments are processed as follows:
* ```
- * - If an argument is an Attr, it is used as is.
- * - If an argument is a string and this is not the last argument,
- * the following argument is treated as the value and the two are combined
- * into an Attr.
- * - Otherwise, the argument is treated as a value with key "!BADKEY".
+ * Host: example.com
+ * accept-encoding: gzip, deflate
+ * Accept-Language: en-us
+ * fOO: Bar
+ * foo: two
+ * ```
+ *
+ * then
+ *
+ * ```
+ * Header = map[string][]string{
+ * "Accept-Encoding": {"gzip, deflate"},
+ * "Accept-Language": {"en-us"},
+ * "Foo": {"Bar", "two"},
+ * }
* ```
+ *
+ * For incoming requests, the Host header is promoted to the
+ * Request.Host field and removed from the Header map.
+ *
+ * HTTP defines that header names are case-insensitive. The
+ * request parser implements this by using CanonicalHeaderKey,
+ * making the first character and any characters following a
+ * hyphen uppercase and the rest lowercase.
+ *
+ * For client requests, certain headers such as Content-Length
+ * and Connection are automatically written when needed and
+ * values in Header may be ignored. See the documentation
+ * for the Request.Write method.
*/
- log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void
- }
- interface Logger {
+ header: Header
/**
- * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
+ * Body is the request's body.
+ *
+ * For client requests, a nil body means the request has no
+ * body, such as a GET request. The HTTP Client's Transport
+ * is responsible for calling the Close method.
+ *
+ * For server requests, the Request Body is always non-nil
+ * but will return EOF immediately when no body is present.
+ * The Server will close the request body. The ServeHTTP
+ * Handler does not need to.
+ *
+ * Body must allow Read to be called concurrently with Close.
+ * In particular, calling Close should unblock a Read waiting
+ * for input.
*/
- logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void
- }
- interface Logger {
+ body: io.ReadCloser
/**
- * Debug logs at [LevelDebug].
+ * GetBody defines an optional func to return a new copy of
+ * Body. It is used for client requests when a redirect requires
+ * reading the body more than once. Use of GetBody still
+ * requires setting Body.
+ *
+ * For server requests, it is unused.
*/
- debug(msg: string, ...args: any[]): void
- }
- interface Logger {
+ getBody: () => io.ReadCloser
/**
- * DebugContext logs at [LevelDebug] with the given context.
+ * ContentLength records the length of the associated content.
+ * The value -1 indicates that the length is unknown.
+ * Values >= 0 indicate that the given number of bytes may
+ * be read from Body.
+ *
+ * For client requests, a value of 0 with a non-nil Body is
+ * also treated as unknown.
*/
- debugContext(ctx: context.Context, msg: string, ...args: any[]): void
- }
- interface Logger {
+ contentLength: number
/**
- * Info logs at [LevelInfo].
+ * TransferEncoding lists the transfer encodings from outermost to
+ * innermost. An empty list denotes the "identity" encoding.
+ * TransferEncoding can usually be ignored; chunked encoding is
+ * automatically added and removed as necessary when sending and
+ * receiving requests.
*/
- info(msg: string, ...args: any[]): void
- }
- interface Logger {
+ transferEncoding: Array
/**
- * InfoContext logs at [LevelInfo] with the given context.
+ * Close indicates whether to close the connection after
+ * replying to this request (for servers) or after sending this
+ * request and reading its response (for clients).
+ *
+ * For server requests, the HTTP server handles this automatically
+ * and this field is not needed by Handlers.
+ *
+ * For client requests, setting this field prevents re-use of
+ * TCP connections between requests to the same hosts, as if
+ * Transport.DisableKeepAlives were set.
*/
- infoContext(ctx: context.Context, msg: string, ...args: any[]): void
- }
- interface Logger {
+ close: boolean
/**
- * Warn logs at [LevelWarn].
+ * For server requests, Host specifies the host on which the
+ * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
+ * is either the value of the "Host" header or the host name
+ * given in the URL itself. For HTTP/2, it is the value of the
+ * ":authority" pseudo-header field.
+ * It may be of the form "host:port". For international domain
+ * names, Host may be in Punycode or Unicode form. Use
+ * golang.org/x/net/idna to convert it to either format if
+ * needed.
+ * To prevent DNS rebinding attacks, server Handlers should
+ * validate that the Host header has a value for which the
+ * Handler considers itself authoritative. The included
+ * ServeMux supports patterns registered to particular host
+ * names and thus protects its registered Handlers.
+ *
+ * For client requests, Host optionally overrides the Host
+ * header to send. If empty, the Request.Write method uses
+ * the value of URL.Host. Host may contain an international
+ * domain name.
*/
- warn(msg: string, ...args: any[]): void
- }
- interface Logger {
+ host: string
/**
- * WarnContext logs at [LevelWarn] with the given context.
+ * Form contains the parsed form data, including both the URL
+ * field's query parameters and the PATCH, POST, or PUT form data.
+ * This field is only available after ParseForm is called.
+ * The HTTP client ignores Form and uses Body instead.
*/
- warnContext(ctx: context.Context, msg: string, ...args: any[]): void
- }
- interface Logger {
+ form: url.Values
/**
- * Error logs at [LevelError].
+ * PostForm contains the parsed form data from PATCH, POST
+ * or PUT body parameters.
+ *
+ * This field is only available after ParseForm is called.
+ * The HTTP client ignores PostForm and uses Body instead.
*/
- error(msg: string, ...args: any[]): void
- }
- interface Logger {
+ postForm: url.Values
/**
- * ErrorContext logs at [LevelError] with the given context.
+ * MultipartForm is the parsed multipart form, including file uploads.
+ * This field is only available after ParseMultipartForm is called.
+ * The HTTP client ignores MultipartForm and uses Body instead.
*/
- errorContext(ctx: context.Context, msg: string, ...args: any[]): void
- }
-}
-
-/**
- * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
- * object, creating another object (Reader or Writer) that also implements
- * the interface but provides buffering and some help for textual I/O.
- */
-namespace bufio {
- /**
- * ReadWriter stores pointers to a [Reader] and a [Writer].
- * It implements [io.ReadWriter].
- */
- type _sFTbWSf = Reader&Writer
- interface ReadWriter extends _sFTbWSf {
- }
-}
-
-/**
- * Package multipart implements MIME multipart parsing, as defined in RFC
- * 2046.
- *
- * The implementation is sufficient for HTTP (RFC 2388) and the multipart
- * bodies generated by popular browsers.
- *
- * # Limits
- *
- * To protect against malicious inputs, this package sets limits on the size
- * of the MIME data it processes.
- *
- * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a
- * part to 10000 and [Reader.ReadForm] limits the total number of headers in all
- * FileHeaders to 10000.
- * These limits may be adjusted with the GODEBUG=multipartmaxheaders=
- * setting.
- *
- * Reader.ReadForm further limits the number of parts in a form to 1000.
- * This limit may be adjusted with the GODEBUG=multipartmaxparts=
- * setting.
- */
-namespace multipart {
- /**
- * A FileHeader describes a file part of a multipart request.
- */
- interface FileHeader {
- filename: string
- header: textproto.MIMEHeader
- size: number
- }
- interface FileHeader {
+ multipartForm?: multipart.Form
/**
- * Open opens and returns the [FileHeader]'s associated File.
- */
- open(): File
- }
-}
-
-/**
- * Package http provides HTTP client and server implementations.
- *
- * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests:
- *
- * ```
- * resp, err := http.Get("http://example.com/")
- * ...
- * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
- * ...
- * resp, err := http.PostForm("http://example.com/form",
- * url.Values{"key": {"Value"}, "id": {"123"}})
- * ```
- *
- * The caller must close the response body when finished with it:
- *
- * ```
- * resp, err := http.Get("http://example.com/")
- * if err != nil {
- * // handle error
- * }
- * defer resp.Body.Close()
- * body, err := io.ReadAll(resp.Body)
- * // ...
- * ```
- *
- * # Clients and Transports
- *
- * For control over HTTP client headers, redirect policy, and other
- * settings, create a [Client]:
- *
- * ```
- * client := &http.Client{
- * CheckRedirect: redirectPolicyFunc,
- * }
- *
- * resp, err := client.Get("http://example.com")
- * // ...
- *
- * req, err := http.NewRequest("GET", "http://example.com", nil)
- * // ...
- * req.Header.Add("If-None-Match", `W/"wyzzy"`)
- * resp, err := client.Do(req)
- * // ...
- * ```
- *
- * For control over proxies, TLS configuration, keep-alives,
- * compression, and other settings, create a [Transport]:
- *
- * ```
- * tr := &http.Transport{
- * MaxIdleConns: 10,
- * IdleConnTimeout: 30 * time.Second,
- * DisableCompression: true,
- * }
- * client := &http.Client{Transport: tr}
- * resp, err := client.Get("https://example.com")
- * ```
- *
- * Clients and Transports are safe for concurrent use by multiple
- * goroutines and for efficiency should only be created once and re-used.
- *
- * # Servers
- *
- * ListenAndServe starts an HTTP server with a given address and handler.
- * The handler is usually nil, which means to use [DefaultServeMux].
- * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]:
- *
- * ```
- * http.Handle("/foo", fooHandler)
- *
- * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
- * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
- * })
- *
- * log.Fatal(http.ListenAndServe(":8080", nil))
- * ```
- *
- * More control over the server's behavior is available by creating a
- * custom Server:
- *
- * ```
- * s := &http.Server{
- * Addr: ":8080",
- * Handler: myHandler,
- * ReadTimeout: 10 * time.Second,
- * WriteTimeout: 10 * time.Second,
- * MaxHeaderBytes: 1 << 20,
- * }
- * log.Fatal(s.ListenAndServe())
- * ```
- *
- * # HTTP/2
- *
- * Starting with Go 1.6, the http package has transparent support for the
- * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
- * can do so by setting [Transport.TLSNextProto] (for clients) or
- * [Server.TLSNextProto] (for servers) to a non-nil, empty
- * map. Alternatively, the following GODEBUG settings are
- * currently supported:
- *
- * ```
- * GODEBUG=http2client=0 # disable HTTP/2 client support
- * GODEBUG=http2server=0 # disable HTTP/2 server support
- * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
- * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
- * ```
- *
- * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
- *
- * The http package's [Transport] and [Server] both automatically enable
- * HTTP/2 support for simple configurations. To enable HTTP/2 for more
- * complex configurations, to use lower-level HTTP/2 features, or to use
- * a newer version of Go's http2 package, import "golang.org/x/net/http2"
- * directly and use its ConfigureTransport and/or ConfigureServer
- * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
- * package takes precedence over the net/http package's built-in HTTP/2
- * support.
- */
-namespace http {
- // @ts-ignore
- import mathrand = rand
- /**
- * PushOptions describes options for [Pusher.Push].
- */
- interface PushOptions {
+ * Trailer specifies additional headers that are sent after the request
+ * body.
+ *
+ * For server requests, the Trailer map initially contains only the
+ * trailer keys, with nil values. (The client declares which trailers it
+ * will later send.) While the handler is reading from Body, it must
+ * not reference Trailer. After reading from Body returns EOF, Trailer
+ * can be read again and will contain non-nil values, if they were sent
+ * by the client.
+ *
+ * For client requests, Trailer must be initialized to a map containing
+ * the trailer keys to later send. The values may be nil or their final
+ * values. The ContentLength must be 0 or -1, to send a chunked request.
+ * After the HTTP request is sent the map values can be updated while
+ * the request body is read. Once the body returns EOF, the caller must
+ * not mutate Trailer.
+ *
+ * Few HTTP clients, servers, or proxies support HTTP trailers.
+ */
+ trailer: Header
/**
- * Method specifies the HTTP method for the promised request.
- * If set, it must be "GET" or "HEAD". Empty means "GET".
+ * RemoteAddr allows HTTP servers and other software to record
+ * the network address that sent the request, usually for
+ * logging. This field is not filled in by ReadRequest and
+ * has no defined format. The HTTP server in this package
+ * sets RemoteAddr to an "IP:port" address before invoking a
+ * handler.
+ * This field is ignored by the HTTP client.
*/
- method: string
+ remoteAddr: string
/**
- * Header specifies additional promised request headers. This cannot
- * include HTTP/2 pseudo header fields like ":path" and ":scheme",
- * which will be added automatically.
+ * RequestURI is the unmodified request-target of the
+ * Request-Line (RFC 7230, Section 3.1.1) as sent by the client
+ * to a server. Usually the URL field should be used instead.
+ * It is an error to set this field in an HTTP client request.
*/
- header: Header
- }
- // @ts-ignore
- import urlpkg = url
- /**
- * A Request represents an HTTP request received by a server
- * or to be sent by a client.
- *
- * The field semantics differ slightly between client and server
- * usage. In addition to the notes on the fields below, see the
- * documentation for [Request.Write] and [RoundTripper].
- */
- interface Request {
+ requestURI: string
/**
- * Method specifies the HTTP method (GET, POST, PUT, etc.).
- * For client requests, an empty string means GET.
+ * TLS allows HTTP servers and other software to record
+ * information about the TLS connection on which the request
+ * was received. This field is not filled in by ReadRequest.
+ * The HTTP server in this package sets the field for
+ * TLS-enabled connections before invoking a handler;
+ * otherwise it leaves the field nil.
+ * This field is ignored by the HTTP client.
*/
- method: string
+ tls?: any
/**
- * URL specifies either the URI being requested (for server
- * requests) or the URL to access (for client requests).
+ * Cancel is an optional channel whose closure indicates that the client
+ * request should be regarded as canceled. Not all implementations of
+ * RoundTripper may support Cancel.
*
- * For server requests, the URL is parsed from the URI
- * supplied on the Request-Line as stored in RequestURI. For
- * most requests, fields other than Path and RawQuery will be
- * empty. (See RFC 7230, Section 5.3)
+ * For server requests, this field is not applicable.
*
- * For client requests, the URL's Host specifies the server to
- * connect to, while the Request's Host field optionally
- * specifies the Host header value to send in the HTTP
- * request.
+ * Deprecated: Set the Request's context with NewRequestWithContext
+ * instead. If a Request's Cancel field and context are both
+ * set, it is undefined whether Cancel is respected.
*/
- url?: url.URL
+ cancel: undefined
/**
- * The protocol version for incoming server requests.
- *
- * For client requests, these fields are ignored. The HTTP
- * client code always uses either HTTP/1.1 or HTTP/2.
- * See the docs on Transport for details.
+ * Response is the redirect response which caused this request
+ * to be created. This field is only populated during client
+ * redirects.
*/
- proto: string // "HTTP/1.0"
- protoMajor: number // 1
- protoMinor: number // 0
+ response?: Response
/**
- * Header contains the request header fields either received
- * by the server or to be sent by the client.
- *
- * If a server received a request with header lines,
- *
- * ```
- * Host: example.com
- * accept-encoding: gzip, deflate
- * Accept-Language: en-us
- * fOO: Bar
- * foo: two
- * ```
+ * Pattern is the [ServeMux] pattern that matched the request.
+ * It is empty if the request was not matched against a pattern.
+ */
+ pattern: string
+ }
+ interface Request {
+ /**
+ * Context returns the request's context. To change the context, use
+ * [Request.Clone] or [Request.WithContext].
*
- * then
+ * The returned context is always non-nil; it defaults to the
+ * background context.
*
- * ```
- * Header = map[string][]string{
- * "Accept-Encoding": {"gzip, deflate"},
- * "Accept-Language": {"en-us"},
- * "Foo": {"Bar", "two"},
- * }
- * ```
- *
- * For incoming requests, the Host header is promoted to the
- * Request.Host field and removed from the Header map.
- *
- * HTTP defines that header names are case-insensitive. The
- * request parser implements this by using CanonicalHeaderKey,
- * making the first character and any characters following a
- * hyphen uppercase and the rest lowercase.
- *
- * For client requests, certain headers such as Content-Length
- * and Connection are automatically written when needed and
- * values in Header may be ignored. See the documentation
- * for the Request.Write method.
- */
- header: Header
- /**
- * Body is the request's body.
- *
- * For client requests, a nil body means the request has no
- * body, such as a GET request. The HTTP Client's Transport
- * is responsible for calling the Close method.
- *
- * For server requests, the Request Body is always non-nil
- * but will return EOF immediately when no body is present.
- * The Server will close the request body. The ServeHTTP
- * Handler does not need to.
- *
- * Body must allow Read to be called concurrently with Close.
- * In particular, calling Close should unblock a Read waiting
- * for input.
- */
- body: io.ReadCloser
- /**
- * GetBody defines an optional func to return a new copy of
- * Body. It is used for client requests when a redirect requires
- * reading the body more than once. Use of GetBody still
- * requires setting Body.
- *
- * For server requests, it is unused.
- */
- getBody: () => io.ReadCloser
- /**
- * ContentLength records the length of the associated content.
- * The value -1 indicates that the length is unknown.
- * Values >= 0 indicate that the given number of bytes may
- * be read from Body.
- *
- * For client requests, a value of 0 with a non-nil Body is
- * also treated as unknown.
- */
- contentLength: number
- /**
- * TransferEncoding lists the transfer encodings from outermost to
- * innermost. An empty list denotes the "identity" encoding.
- * TransferEncoding can usually be ignored; chunked encoding is
- * automatically added and removed as necessary when sending and
- * receiving requests.
- */
- transferEncoding: Array
- /**
- * Close indicates whether to close the connection after
- * replying to this request (for servers) or after sending this
- * request and reading its response (for clients).
- *
- * For server requests, the HTTP server handles this automatically
- * and this field is not needed by Handlers.
- *
- * For client requests, setting this field prevents re-use of
- * TCP connections between requests to the same hosts, as if
- * Transport.DisableKeepAlives were set.
- */
- close: boolean
- /**
- * For server requests, Host specifies the host on which the
- * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
- * is either the value of the "Host" header or the host name
- * given in the URL itself. For HTTP/2, it is the value of the
- * ":authority" pseudo-header field.
- * It may be of the form "host:port". For international domain
- * names, Host may be in Punycode or Unicode form. Use
- * golang.org/x/net/idna to convert it to either format if
- * needed.
- * To prevent DNS rebinding attacks, server Handlers should
- * validate that the Host header has a value for which the
- * Handler considers itself authoritative. The included
- * ServeMux supports patterns registered to particular host
- * names and thus protects its registered Handlers.
- *
- * For client requests, Host optionally overrides the Host
- * header to send. If empty, the Request.Write method uses
- * the value of URL.Host. Host may contain an international
- * domain name.
- */
- host: string
- /**
- * Form contains the parsed form data, including both the URL
- * field's query parameters and the PATCH, POST, or PUT form data.
- * This field is only available after ParseForm is called.
- * The HTTP client ignores Form and uses Body instead.
- */
- form: url.Values
- /**
- * PostForm contains the parsed form data from PATCH, POST
- * or PUT body parameters.
- *
- * This field is only available after ParseForm is called.
- * The HTTP client ignores PostForm and uses Body instead.
- */
- postForm: url.Values
- /**
- * MultipartForm is the parsed multipart form, including file uploads.
- * This field is only available after ParseMultipartForm is called.
- * The HTTP client ignores MultipartForm and uses Body instead.
- */
- multipartForm?: multipart.Form
- /**
- * Trailer specifies additional headers that are sent after the request
- * body.
- *
- * For server requests, the Trailer map initially contains only the
- * trailer keys, with nil values. (The client declares which trailers it
- * will later send.) While the handler is reading from Body, it must
- * not reference Trailer. After reading from Body returns EOF, Trailer
- * can be read again and will contain non-nil values, if they were sent
- * by the client.
- *
- * For client requests, Trailer must be initialized to a map containing
- * the trailer keys to later send. The values may be nil or their final
- * values. The ContentLength must be 0 or -1, to send a chunked request.
- * After the HTTP request is sent the map values can be updated while
- * the request body is read. Once the body returns EOF, the caller must
- * not mutate Trailer.
- *
- * Few HTTP clients, servers, or proxies support HTTP trailers.
- */
- trailer: Header
- /**
- * RemoteAddr allows HTTP servers and other software to record
- * the network address that sent the request, usually for
- * logging. This field is not filled in by ReadRequest and
- * has no defined format. The HTTP server in this package
- * sets RemoteAddr to an "IP:port" address before invoking a
- * handler.
- * This field is ignored by the HTTP client.
- */
- remoteAddr: string
- /**
- * RequestURI is the unmodified request-target of the
- * Request-Line (RFC 7230, Section 3.1.1) as sent by the client
- * to a server. Usually the URL field should be used instead.
- * It is an error to set this field in an HTTP client request.
- */
- requestURI: string
- /**
- * TLS allows HTTP servers and other software to record
- * information about the TLS connection on which the request
- * was received. This field is not filled in by ReadRequest.
- * The HTTP server in this package sets the field for
- * TLS-enabled connections before invoking a handler;
- * otherwise it leaves the field nil.
- * This field is ignored by the HTTP client.
- */
- tls?: any
- /**
- * Cancel is an optional channel whose closure indicates that the client
- * request should be regarded as canceled. Not all implementations of
- * RoundTripper may support Cancel.
- *
- * For server requests, this field is not applicable.
- *
- * Deprecated: Set the Request's context with NewRequestWithContext
- * instead. If a Request's Cancel field and context are both
- * set, it is undefined whether Cancel is respected.
- */
- cancel: undefined
- /**
- * Response is the redirect response which caused this request
- * to be created. This field is only populated during client
- * redirects.
- */
- response?: Response
- /**
- * Pattern is the [ServeMux] pattern that matched the request.
- * It is empty if the request was not matched against a pattern.
- */
- pattern: string
- }
- interface Request {
- /**
- * Context returns the request's context. To change the context, use
- * [Request.Clone] or [Request.WithContext].
- *
- * The returned context is always non-nil; it defaults to the
- * background context.
- *
- * For outgoing client requests, the context controls cancellation.
+ * For outgoing client requests, the context controls cancellation.
*
* For incoming server requests, the context is canceled when the
* client's connection closes, the request is canceled (with HTTP/2),
@@ -18744,51 +18217,211 @@ namespace http {
}
}
-namespace hook {
+/**
+ * Package blob defines a lightweight abstration for interacting with
+ * various storage services (local filesystem, S3, etc.).
+ *
+ * NB!
+ * For compatibility with earlier PocketBase versions and to prevent
+ * unnecessary breaking changes, this package is based and implemented
+ * as a minimal, stripped down version of the previously used gocloud.dev/blob.
+ * While there is no promise that it won't diverge in the future to accommodate
+ * better some PocketBase specific use cases, currently it copies and
+ * tries to follow as close as possible the same implementations,
+ * conventions and rules for the key escaping/unescaping, blob read/write
+ * interfaces and struct options as gocloud.dev/blob, therefore the
+ * credits goes to the original Go Cloud Development Kit Authors.
+ */
+namespace blob {
/**
- * Event implements [Resolver] and it is intended to be used as a base
- * Hook event that you can embed in your custom typed event structs.
- *
- * Example:
- *
- * ```
- * type CustomEvent struct {
- * hook.Event
- *
- * SomeField int
- * }
- * ```
+ * ListObject represents a single blob returned from List.
*/
- interface Event {
- }
- interface Event {
+ interface ListObject {
/**
- * Next calls the next hook handler.
+ * Key is the key for this blob.
*/
- next(): void
- }
- /**
- * Handler defines a single Hook handler.
- * Multiple handlers can share the same id.
- * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler.
- */
- interface Handler {
+ key: string
/**
- * Func defines the handler function to execute.
- *
- * Note that users need to call e.Next() in order to proceed with
- * the execution of the hook chain.
+ * ModTime is the time the blob was last modified.
*/
- func: (_arg0: T) => void
+ modTime: time.Time
/**
- * Id is the unique identifier of the handler.
- *
- * It could be used later to remove the handler from a hook via [Hook.Remove].
- *
- * If missing, an autogenerated value will be assigned when adding
- * the handler to a hook.
+ * Size is the size of the blob's content in bytes.
*/
- id: string
+ size: number
+ /**
+ * MD5 is an MD5 hash of the blob contents or nil if not available.
+ */
+ md5: string|Array
+ /**
+ * IsDir indicates that this result represents a "directory" in the
+ * hierarchical namespace, ending in ListOptions.Delimiter. Key can be
+ * passed as ListOptions.Prefix to list items in the "directory".
+ * Fields other than Key and IsDir will not be set if IsDir is true.
+ */
+ isDir: boolean
+ }
+ /**
+ * Attributes contains attributes about a blob.
+ */
+ interface Attributes {
+ /**
+ * CacheControl specifies caching attributes that services may use
+ * when serving the blob.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
+ */
+ cacheControl: string
+ /**
+ * ContentDisposition specifies whether the blob content is expected to be
+ * displayed inline or as an attachment.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
+ */
+ contentDisposition: string
+ /**
+ * ContentEncoding specifies the encoding used for the blob's content, if any.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
+ */
+ contentEncoding: string
+ /**
+ * ContentLanguage specifies the language used in the blob's content, if any.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
+ */
+ contentLanguage: string
+ /**
+ * ContentType is the MIME type of the blob. It will not be empty.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
+ */
+ contentType: string
+ /**
+ * Metadata holds key/value pairs associated with the blob.
+ * Keys are guaranteed to be in lowercase, even if the backend service
+ * has case-sensitive keys (although note that Metadata written via
+ * this package will always be lowercased). If there are duplicate
+ * case-insensitive keys (e.g., "foo" and "FOO"), only one value
+ * will be kept, and it is undefined which one.
+ */
+ metadata: _TygojaDict
+ /**
+ * CreateTime is the time the blob was created, if available. If not available,
+ * CreateTime will be the zero time.
+ */
+ createTime: time.Time
+ /**
+ * ModTime is the time the blob was last modified.
+ */
+ modTime: time.Time
+ /**
+ * Size is the size of the blob's content in bytes.
+ */
+ size: number
+ /**
+ * MD5 is an MD5 hash of the blob contents or nil if not available.
+ */
+ md5: string|Array
+ /**
+ * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
+ */
+ eTag: string
+ }
+ /**
+ * Reader reads bytes from a blob.
+ * It implements io.ReadSeekCloser, and must be closed after reads are finished.
+ */
+ interface Reader {
+ }
+ interface Reader {
+ /**
+ * Read implements io.Reader (https://golang.org/pkg/io/#Reader).
+ */
+ read(p: string|Array): number
+ }
+ interface Reader {
+ /**
+ * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
+ */
+ seek(offset: number, whence: number): number
+ }
+ interface Reader {
+ /**
+ * Close implements io.Closer (https://golang.org/pkg/io/#Closer).
+ */
+ close(): void
+ }
+ interface Reader {
+ /**
+ * ContentType returns the MIME type of the blob.
+ */
+ contentType(): string
+ }
+ interface Reader {
+ /**
+ * ModTime returns the time the blob was last modified.
+ */
+ modTime(): time.Time
+ }
+ interface Reader {
+ /**
+ * Size returns the size of the blob content in bytes.
+ */
+ size(): number
+ }
+ interface Reader {
+ /**
+ * WriteTo reads from r and writes to w until there's no more data or
+ * an error occurs.
+ * The return value is the number of bytes written to w.
+ *
+ * It implements the io.WriterTo interface.
+ */
+ writeTo(w: io.Writer): number
+ }
+}
+
+namespace hook {
+ /**
+ * Event implements [Resolver] and it is intended to be used as a base
+ * Hook event that you can embed in your custom typed event structs.
+ *
+ * Example:
+ *
+ * ```
+ * type CustomEvent struct {
+ * hook.Event
+ *
+ * SomeField int
+ * }
+ * ```
+ */
+ interface Event {
+ }
+ interface Event {
+ /**
+ * Next calls the next hook handler.
+ */
+ next(): void
+ }
+ /**
+ * Handler defines a single Hook handler.
+ * Multiple handlers can share the same id.
+ * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler.
+ */
+ interface Handler {
+ /**
+ * Func defines the handler function to execute.
+ *
+ * Note that users need to call e.Next() in order to proceed with
+ * the execution of the hook chain.
+ */
+ func: (_arg0: T) => void
+ /**
+ * Id is the unique identifier of the handler.
+ *
+ * It could be used later to remove the handler from a hook via [Hook.Remove].
+ *
+ * If missing, an autogenerated value will be assigned when adding
+ * the handler to a hook.
+ */
+ id: string
/**
* Priority allows changing the default exec priority of the handler within a hook.
*
@@ -18826,2126 +18459,2558 @@ namespace hook {
* TaggedHook defines a proxy hook which register handlers that are triggered only
* if the TaggedHook.tags are empty or includes at least one of the event data tag(s).
*/
- type _sSpEQZd = mainHook
- interface TaggedHook extends _sSpEQZd {
+ type _srVyIru = mainHook
+ interface TaggedHook extends _srVyIru {
}
}
-namespace exec {
+/**
+ * Package types implements some commonly used db serializable types
+ * like datetime, json, etc.
+ */
+namespace types {
/**
- * Cmd represents an external command being prepared or run.
- *
- * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput]
- * methods.
+ * DateTime represents a [time.Time] instance in UTC that is wrapped
+ * and serialized using the app default date layout.
*/
- interface Cmd {
+ interface DateTime {
+ }
+ interface DateTime {
/**
- * Path is the path of the command to run.
- *
- * This is the only field that must be set to a non-zero
- * value. If Path is relative, it is evaluated relative
- * to Dir.
+ * Time returns the internal [time.Time] instance.
*/
- path: string
+ time(): time.Time
+ }
+ interface DateTime {
/**
- * Args holds command line arguments, including the command as Args[0].
- * If the Args field is empty or nil, Run uses {Path}.
- *
- * In typical use, both Path and Args are set by calling Command.
+ * Add returns a new DateTime based on the current DateTime + the specified duration.
*/
- args: Array
+ add(duration: time.Duration): DateTime
+ }
+ interface DateTime {
/**
- * Env specifies the environment of the process.
- * Each entry is of the form "key=value".
- * If Env is nil, the new process uses the current process's
- * environment.
- * If Env contains duplicate environment keys, only the last
- * value in the slice for each duplicate key is used.
- * As a special case on Windows, SYSTEMROOT is always added if
- * missing and not explicitly set to the empty string.
+ * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one.
+ *
+ * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration],
+ * the maximum (or minimum) duration will be returned.
*/
- env: Array
+ sub(u: DateTime): time.Duration
+ }
+ interface DateTime {
/**
- * Dir specifies the working directory of the command.
- * If Dir is the empty string, Run runs the command in the
- * calling process's current directory.
+ * AddDate returns a new DateTime based on the current one + duration.
+ *
+ * It follows the same rules as [time.AddDate].
*/
- dir: string
+ addDate(years: number, months: number, days: number): DateTime
+ }
+ interface DateTime {
/**
- * Stdin specifies the process's standard input.
- *
- * If Stdin is nil, the process reads from the null device (os.DevNull).
- *
- * If Stdin is an *os.File, the process's standard input is connected
- * directly to that file.
- *
- * Otherwise, during the execution of the command a separate
- * goroutine reads from Stdin and delivers that data to the command
- * over a pipe. In this case, Wait does not complete until the goroutine
- * stops copying, either because it has reached the end of Stdin
- * (EOF or a read error), or because writing to the pipe returned an error,
- * or because a nonzero WaitDelay was set and expired.
+ * After reports whether the current DateTime instance is after u.
*/
- stdin: io.Reader
+ after(u: DateTime): boolean
+ }
+ interface DateTime {
/**
- * Stdout and Stderr specify the process's standard output and error.
- *
- * If either is nil, Run connects the corresponding file descriptor
- * to the null device (os.DevNull).
- *
- * If either is an *os.File, the corresponding output from the process
- * is connected directly to that file.
- *
- * Otherwise, during the execution of the command a separate goroutine
- * reads from the process over a pipe and delivers that data to the
- * corresponding Writer. In this case, Wait does not complete until the
- * goroutine reaches EOF or encounters an error or a nonzero WaitDelay
- * expires.
- *
- * If Stdout and Stderr are the same writer, and have a type that can
- * be compared with ==, at most one goroutine at a time will call Write.
- */
- stdout: io.Writer
- stderr: io.Writer
- /**
- * ExtraFiles specifies additional open files to be inherited by the
- * new process. It does not include standard input, standard output, or
- * standard error. If non-nil, entry i becomes file descriptor 3+i.
- *
- * ExtraFiles is not supported on Windows.
+ * Before reports whether the current DateTime instance is before u.
*/
- extraFiles: Array<(os.File | undefined)>
+ before(u: DateTime): boolean
+ }
+ interface DateTime {
/**
- * SysProcAttr holds optional, operating system-specific attributes.
- * Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
+ * Compare compares the current DateTime instance with u.
+ * If the current instance is before u, it returns -1.
+ * If the current instance is after u, it returns +1.
+ * If they're the same, it returns 0.
*/
- sysProcAttr?: syscall.SysProcAttr
+ compare(u: DateTime): number
+ }
+ interface DateTime {
/**
- * Process is the underlying process, once started.
+ * Equal reports whether the current DateTime and u represent the same time instant.
+ * Two DateTime can be equal even if they are in different locations.
+ * For example, 6:00 +0200 and 4:00 UTC are Equal.
*/
- process?: os.Process
+ equal(u: DateTime): boolean
+ }
+ interface DateTime {
/**
- * ProcessState contains information about an exited process.
- * If the process was started successfully, Wait or Run will
- * populate its ProcessState when the command completes.
+ * Unix returns the current DateTime as a Unix time, aka.
+ * the number of seconds elapsed since January 1, 1970 UTC.
*/
- processState?: os.ProcessState
- err: Error // LookPath error, if any.
+ unix(): number
+ }
+ interface DateTime {
/**
- * If Cancel is non-nil, the command must have been created with
- * CommandContext and Cancel will be called when the command's
- * Context is done. By default, CommandContext sets Cancel to
- * call the Kill method on the command's Process.
- *
- * Typically a custom Cancel will send a signal to the command's
- * Process, but it may instead take other actions to initiate cancellation,
- * such as closing a stdin or stdout pipe or sending a shutdown request on a
- * network socket.
- *
- * If the command exits with a success status after Cancel is
- * called, and Cancel does not return an error equivalent to
- * os.ErrProcessDone, then Wait and similar methods will return a non-nil
- * error: either an error wrapping the one returned by Cancel,
- * or the error from the Context.
- * (If the command exits with a non-success status, or Cancel
- * returns an error that wraps os.ErrProcessDone, Wait and similar methods
- * continue to return the command's usual exit status.)
- *
- * If Cancel is set to nil, nothing will happen immediately when the command's
- * Context is done, but a nonzero WaitDelay will still take effect. That may
- * be useful, for example, to work around deadlocks in commands that do not
- * support shutdown signals but are expected to always finish quickly.
- *
- * Cancel will not be called if Start returns a non-nil error.
+ * IsZero checks whether the current DateTime instance has zero time value.
*/
- cancel: () => void
+ isZero(): boolean
+ }
+ interface DateTime {
/**
- * If WaitDelay is non-zero, it bounds the time spent waiting on two sources
- * of unexpected delay in Wait: a child process that fails to exit after the
- * associated Context is canceled, and a child process that exits but leaves
- * its I/O pipes unclosed.
- *
- * The WaitDelay timer starts when either the associated Context is done or a
- * call to Wait observes that the child process has exited, whichever occurs
- * first. When the delay has elapsed, the command shuts down the child process
- * and/or its I/O pipes.
- *
- * If the child process has failed to exit — perhaps because it ignored or
- * failed to receive a shutdown signal from a Cancel function, or because no
- * Cancel function was set — then it will be terminated using os.Process.Kill.
- *
- * Then, if the I/O pipes communicating with the child process are still open,
- * those pipes are closed in order to unblock any goroutines currently blocked
- * on Read or Write calls.
- *
- * If pipes are closed due to WaitDelay, no Cancel call has occurred,
- * and the command has otherwise exited with a successful status, Wait and
- * similar methods will return ErrWaitDelay instead of nil.
+ * String serializes the current DateTime instance into a formatted
+ * UTC date string.
*
- * If WaitDelay is zero (the default), I/O pipes will be read until EOF,
- * which might not occur until orphaned subprocesses of the command have
- * also closed their descriptors for the pipes.
+ * The zero value is serialized to an empty string.
*/
- waitDelay: time.Duration
+ string(): string
}
- interface Cmd {
+ interface DateTime {
/**
- * String returns a human-readable description of c.
- * It is intended only for debugging.
- * In particular, it is not suitable for use as input to a shell.
- * The output of String may vary across Go releases.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- string(): string
+ marshalJSON(): string|Array
}
- interface Cmd {
+ interface DateTime {
/**
- * Run starts the specified command and waits for it to complete.
- *
- * The returned error is nil if the command runs, has no problems
- * copying stdin, stdout, and stderr, and exits with a zero exit
- * status.
- *
- * If the command starts but does not complete successfully, the error is of
- * type [*ExitError]. Other error types may be returned for other situations.
- *
- * If the calling goroutine has locked the operating system thread
- * with [runtime.LockOSThread] and modified any inheritable OS-level
- * thread state (for example, Linux or Plan 9 name spaces), the new
- * process will inherit the caller's thread state.
+ * UnmarshalJSON implements the [json.Unmarshaler] interface.
*/
- run(): void
+ unmarshalJSON(b: string|Array): void
}
- interface Cmd {
+ interface DateTime {
/**
- * Start starts the specified command but does not wait for it to complete.
- *
- * If Start returns successfully, the c.Process field will be set.
- *
- * After a successful call to Start the [Cmd.Wait] method must be called in
- * order to release associated system resources.
+ * Value implements the [driver.Valuer] interface.
*/
- start(): void
+ value(): any
}
- interface Cmd {
+ interface DateTime {
/**
- * Wait waits for the command to exit and waits for any copying to
- * stdin or copying from stdout or stderr to complete.
- *
- * The command must have been started by [Cmd.Start].
- *
- * The returned error is nil if the command runs, has no problems
- * copying stdin, stdout, and stderr, and exits with a zero exit
- * status.
- *
- * If the command fails to run or doesn't complete successfully, the
- * error is of type [*ExitError]. Other error types may be
- * returned for I/O problems.
- *
- * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits
- * for the respective I/O loop copying to or from the process to complete.
- *
- * Wait releases any resources associated with the [Cmd].
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current DateTime instance.
*/
- wait(): void
+ scan(value: any): void
}
- interface Cmd {
+ /**
+ * GeoPoint defines a struct for storing geo coordinates as serialized json object
+ * (e.g. {lon:0,lat:0}).
+ *
+ * Note: using object notation and not a plain array to avoid the confusion
+ * as there doesn't seem to be a fixed standard for the coordinates order.
+ */
+ interface GeoPoint {
+ lon: number
+ lat: number
+ }
+ interface GeoPoint {
/**
- * Output runs the command and returns its standard output.
- * Any returned error will usually be of type [*ExitError].
- * If c.Stderr was nil, Output populates [ExitError.Stderr].
+ * String returns the string representation of the current GeoPoint instance.
*/
- output(): string|Array
+ string(): string
}
- interface Cmd {
+ interface GeoPoint {
/**
- * CombinedOutput runs the command and returns its combined standard
- * output and standard error.
+ * AsMap implements [core.mapExtractor] and returns a value suitable
+ * to be used in an API rule expression.
*/
- combinedOutput(): string|Array
+ asMap(): _TygojaDict
}
- interface Cmd {
+ interface GeoPoint {
/**
- * StdinPipe returns a pipe that will be connected to the command's
- * standard input when the command starts.
- * The pipe will be closed automatically after [Cmd.Wait] sees the command exit.
- * A caller need only call Close to force the pipe to close sooner.
- * For example, if the command being run will not exit until standard input
- * is closed, the caller must close the pipe.
+ * Value implements the [driver.Valuer] interface.
*/
- stdinPipe(): io.WriteCloser
+ value(): any
}
- interface Cmd {
+ interface GeoPoint {
/**
- * StdoutPipe returns a pipe that will be connected to the command's
- * standard output when the command starts.
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current GeoPoint instance.
*
- * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
- * need not close the pipe themselves. It is thus incorrect to call Wait
- * before all reads from the pipe have completed.
- * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe.
- * See the example for idiomatic usage.
+ * The value argument could be nil (no-op), another GeoPoint instance,
+ * map or serialized json object with lat-lon props.
*/
- stdoutPipe(): io.ReadCloser
+ scan(value: any): void
}
- interface Cmd {
+ /**
+ * JSONArray defines a slice that is safe for json and db read/write.
+ */
+ interface JSONArray extends Array{}
+ /**
+ * JSONMap defines a map that is safe for json and db read/write.
+ */
+ interface JSONMap extends _TygojaDict{}
+ /**
+ * JSONRaw defines a json value type that is safe for db read/write.
+ */
+ interface JSONRaw extends Array{}
+ interface JSONRaw {
/**
- * StderrPipe returns a pipe that will be connected to the command's
- * standard error when the command starts.
- *
- * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
- * need not close the pipe themselves. It is thus incorrect to call Wait
- * before all reads from the pipe have completed.
- * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe.
- * See the StdoutPipe example for idiomatic usage.
+ * String returns the current JSONRaw instance as a json encoded string.
*/
- stderrPipe(): io.ReadCloser
+ string(): string
}
- interface Cmd {
+ interface JSONRaw {
/**
- * Environ returns a copy of the environment in which the command would be run
- * as it is currently configured.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- environ(): Array
+ marshalJSON(): string|Array
}
-}
-
-namespace mailer {
- /**
- * Message defines a generic email message struct.
- */
- interface Message {
- from: { address: string; name?: string; }
- to: Array<{ address: string; name?: string; }>
- bcc: Array<{ address: string; name?: string; }>
- cc: Array<{ address: string; name?: string; }>
- subject: string
- html: string
- text: string
- headers: _TygojaDict
- attachments: _TygojaDict
- inlineAttachments: _TygojaDict
+ interface JSONRaw {
+ /**
+ * UnmarshalJSON implements the [json.Unmarshaler] interface.
+ */
+ unmarshalJSON(b: string|Array): void
}
- /**
- * Mailer defines a base mail client interface.
- */
- interface Mailer {
- [key:string]: any;
+ interface JSONRaw {
/**
- * Send sends an email with the provided Message.
+ * Value implements the [driver.Valuer] interface.
*/
- send(message: Message): void
+ value(): any
+ }
+ interface JSONRaw {
+ /**
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current JSONRaw instance.
+ */
+ scan(value: any): void
}
}
-/**
- * Package blob defines a lightweight abstration for interacting with
- * various storage services (local filesystem, S3, etc.).
- *
- * NB!
- * For compatibility with earlier PocketBase versions and to prevent
- * unnecessary breaking changes, this package is based and implemented
- * as a minimal, stripped down version of the previously used gocloud.dev/blob.
- * While there is no promise that it won't diverge in the future to accommodate
- * better some PocketBase specific use cases, currently it copies and
- * tries to follow as close as possible the same implementations,
- * conventions and rules for the key escaping/unescaping, blob read/write
- * interfaces and struct options as gocloud.dev/blob, therefore the
- * credits goes to the original Go Cloud Development Kit Authors.
- */
-namespace blob {
+namespace search {
/**
- * ListObject represents a single blob returned from List.
+ * Result defines the returned search result structure.
*/
- interface ListObject {
+ interface Result {
+ items: any
+ page: number
+ perPage: number
+ totalItems: number
+ totalPages: number
+ }
+ /**
+ * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result.
+ */
+ interface ResolverResult {
/**
- * Key is the key for this blob.
+ * Identifier is the plain SQL identifier/column that will be used
+ * in the final db expression as left or right operand.
*/
- key: string
+ identifier: string
/**
- * ModTime is the time the blob was last modified.
+ * NoCoalesce instructs to not use COALESCE or NULL fallbacks
+ * when building the identifier expression.
*/
- modTime: time.Time
+ noCoalesce: boolean
/**
- * Size is the size of the blob's content in bytes.
+ * Params is a map with db placeholder->value pairs that will be added
+ * to the query when building both resolved operands/sides in a single expression.
*/
- size: number
+ params: dbx.Params
/**
- * MD5 is an MD5 hash of the blob contents or nil if not available.
+ * MultiMatchSubQuery is an optional sub query expression that will be added
+ * in addition to the combined ResolverResult expression during build.
*/
- md5: string|Array
+ multiMatchSubQuery: dbx.Expression
/**
- * IsDir indicates that this result represents a "directory" in the
- * hierarchical namespace, ending in ListOptions.Delimiter. Key can be
- * passed as ListOptions.Prefix to list items in the "directory".
- * Fields other than Key and IsDir will not be set if IsDir is true.
+ * AfterBuild is an optional function that will be called after building
+ * and combining the result of both resolved operands/sides in a single expression.
*/
- isDir: boolean
+ afterBuild: (expr: dbx.Expression) => dbx.Expression
}
+}
+
+namespace router {
+ // @ts-ignore
+ import validation = ozzo_validation
/**
- * Attributes contains attributes about a blob.
+ * ApiError defines the struct for a basic api error response.
*/
- interface Attributes {
+ interface ApiError {
+ data: _TygojaDict
+ message: string
+ status: number
+ }
+ interface ApiError {
/**
- * CacheControl specifies caching attributes that services may use
- * when serving the blob.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
+ * Error makes it compatible with the `error` interface.
*/
- cacheControl: string
+ error(): string
+ }
+ interface ApiError {
/**
- * ContentDisposition specifies whether the blob content is expected to be
- * displayed inline or as an attachment.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
+ * RawData returns the unformatted error data (could be an internal error, text, etc.)
*/
- contentDisposition: string
+ rawData(): any
+ }
+ interface ApiError {
/**
- * ContentEncoding specifies the encoding used for the blob's content, if any.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
+ * Is reports whether the current ApiError wraps the target.
*/
- contentEncoding: string
+ is(target: Error): boolean
+ }
+ /**
+ * Event specifies based Route handler event that is usually intended
+ * to be embedded as part of a custom event struct.
+ *
+ * NB! It is expected that the Response and Request fields are always set.
+ */
+ type _sSZTUqv = hook.Event
+ interface Event extends _sSZTUqv {
+ response: http.ResponseWriter
+ request?: http.Request
+ }
+ interface Event {
/**
- * ContentLanguage specifies the language used in the blob's content, if any.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
+ * Written reports whether the current response has already been written.
+ *
+ * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface
+ * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
*/
- contentLanguage: string
+ written(): boolean
+ }
+ interface Event {
/**
- * ContentType is the MIME type of the blob. It will not be empty.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
+ * Status reports the status code of the current response.
+ *
+ * This method always returns 0 if e.Response doesn't implement the StatusTracker interface
+ * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
*/
- contentType: string
+ status(): number
+ }
+ interface Event {
/**
- * Metadata holds key/value pairs associated with the blob.
- * Keys are guaranteed to be in lowercase, even if the backend service
- * has case-sensitive keys (although note that Metadata written via
- * this package will always be lowercased). If there are duplicate
- * case-insensitive keys (e.g., "foo" and "FOO"), only one value
- * will be kept, and it is undefined which one.
+ * Flush flushes buffered data to the current response.
+ *
+ * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface
+ * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
*/
- metadata: _TygojaDict
+ flush(): void
+ }
+ interface Event {
/**
- * CreateTime is the time the blob was created, if available. If not available,
- * CreateTime will be the zero time.
+ * IsTLS reports whether the connection on which the request was received is TLS.
*/
- createTime: time.Time
+ isTLS(): boolean
+ }
+ interface Event {
/**
- * ModTime is the time the blob was last modified.
+ * SetCookie is an alias for [http.SetCookie].
+ *
+ * SetCookie adds a Set-Cookie header to the current response's headers.
+ * The provided cookie must have a valid Name.
+ * Invalid cookies may be silently dropped.
*/
- modTime: time.Time
+ setCookie(cookie: http.Cookie): void
+ }
+ interface Event {
/**
- * Size is the size of the blob's content in bytes.
+ * RemoteIP returns the IP address of the client that sent the request.
+ *
+ * IPv6 addresses are returned expanded.
+ * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001".
+ *
+ * Note that if you are behind reverse proxy(ies), this method returns
+ * the IP of the last connecting proxy.
*/
- size: number
+ remoteIP(): string
+ }
+ interface Event {
/**
- * MD5 is an MD5 hash of the blob contents or nil if not available.
+ * FindUploadedFiles extracts all form files of "key" from a http request
+ * and returns a slice with filesystem.File instances (if any).
*/
- md5: string|Array
+ findUploadedFiles(key: string): Array<(filesystem.File | undefined)>
+ }
+ interface Event {
/**
- * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
+ * Get retrieves single value from the current event data store.
*/
- eTag: string
- }
- /**
- * Reader reads bytes from a blob.
- * It implements io.ReadSeekCloser, and must be closed after reads are finished.
- */
- interface Reader {
+ get(key: string): any
}
- interface Reader {
+ interface Event {
/**
- * Read implements io.Reader (https://golang.org/pkg/io/#Reader).
+ * GetAll returns a copy of the current event data store.
*/
- read(p: string|Array): number
+ getAll(): _TygojaDict
}
- interface Reader {
+ interface Event {
/**
- * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
+ * Set saves single value into the current event data store.
*/
- seek(offset: number, whence: number): number
+ set(key: string, value: any): void
}
- interface Reader {
+ interface Event {
/**
- * Close implements io.Closer (https://golang.org/pkg/io/#Closer).
+ * SetAll saves all items from m into the current event data store.
*/
- close(): void
+ setAll(m: _TygojaDict): void
}
- interface Reader {
+ interface Event {
/**
- * ContentType returns the MIME type of the blob.
+ * String writes a plain string response.
*/
- contentType(): string
+ string(status: number, data: string): void
}
- interface Reader {
+ interface Event {
/**
- * ModTime returns the time the blob was last modified.
+ * HTML writes an HTML response.
*/
- modTime(): time.Time
+ html(status: number, data: string): void
}
- interface Reader {
+ interface Event {
/**
- * Size returns the size of the blob content in bytes.
+ * JSON writes a JSON response.
+ *
+ * It also provides a generic response data fields picker if the "fields" query parameter is set.
+ * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`,
+ * it should result in a JSON response like: `{"a":1, "b": 2}`.
*/
- size(): number
+ json(status: number, data: any): void
}
- interface Reader {
+ interface Event {
/**
- * WriteTo reads from r and writes to w until there's no more data or
- * an error occurs.
- * The return value is the number of bytes written to w.
+ * XML writes an XML response.
+ * It automatically prepends the generic [xml.Header] string to the response.
+ */
+ xml(status: number, data: any): void
+ }
+ interface Event {
+ /**
+ * Stream streams the specified reader into the response.
+ */
+ stream(status: number, contentType: string, reader: io.Reader): void
+ }
+ interface Event {
+ /**
+ * Blob writes a blob (bytes slice) response.
+ */
+ blob(status: number, contentType: string, b: string|Array): void
+ }
+ interface Event {
+ /**
+ * FileFS serves the specified filename from fsys.
*
- * It implements the io.WriterTo interface.
+ * It is similar to [echo.FileFS] for consistency with earlier versions.
*/
- writeTo(w: io.Writer): number
+ fileFS(fsys: fs.FS, filename: string): void
+ }
+ interface Event {
+ /**
+ * NoContent writes a response with no body (ex. 204).
+ */
+ noContent(status: number): void
+ }
+ interface Event {
+ /**
+ * Redirect writes a redirect response to the specified url.
+ * The status code must be in between 300 – 399 range.
+ */
+ redirect(status: number, url: string): void
+ }
+ interface Event {
+ error(status: number, message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ badRequestError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ notFoundError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ forbiddenError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ unauthorizedError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ tooManyRequestsError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ internalServerError(message: string, errData: any): (ApiError)
+ }
+ interface Event {
+ /**
+ * BindBody unmarshal the request body into the provided dst.
+ *
+ * dst must be either a struct pointer or map[string]any.
+ *
+ * The rules how the body will be scanned depends on the request Content-Type.
+ *
+ * Currently the following Content-Types are supported:
+ * ```
+ * - application/json
+ * - text/xml, application/xml
+ * - multipart/form-data, application/x-www-form-urlencoded
+ * ```
+ *
+ * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type):
+ * ```
+ * - "json" (json body)- uses the builtin Go json package for unmarshaling.
+ * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling.
+ * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method.
+ * ```
+ *
+ * NB! When dst is a struct make sure that it doesn't have public fields
+ * that shouldn't be bindable and it is advisible such fields to be unexported
+ * or have a separate struct just for the binding. For example:
+ *
+ * ```
+ * data := struct{
+ * somethingPrivate string
+ *
+ * Title string `json:"title" form:"title"`
+ * Total int `json:"total" form:"total"`
+ * }
+ * err := e.BindBody(&data)
+ * ```
+ */
+ bindBody(dst: any): void
+ }
+ /**
+ * Router defines a thin wrapper around the standard Go [http.ServeMux] by
+ * adding support for routing sub-groups, middlewares and other common utils.
+ *
+ * Example:
+ *
+ * ```
+ * r := NewRouter[*MyEvent](eventFactory)
+ *
+ * // middlewares
+ * r.BindFunc(m1, m2)
+ *
+ * // routes
+ * r.GET("/test", handler1)
+ *
+ * // sub-routers/groups
+ * api := r.Group("/api")
+ * api.GET("/admins", handler2)
+ *
+ * // generate a http.ServeMux instance based on the router configurations
+ * mux, _ := r.BuildMux()
+ *
+ * http.ListenAndServe("localhost:8090", mux)
+ * ```
+ */
+ type _svQvLRA = RouterGroup
+ interface Router extends _svQvLRA {
}
}
/**
- * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
- * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
- */
-namespace cobra {
- interface Command {
+ * Package slog provides structured logging,
+ * in which log records include a message,
+ * a severity level, and various other attributes
+ * expressed as key-value pairs.
+ *
+ * It defines a type, [Logger],
+ * which provides several methods (such as [Logger.Info] and [Logger.Error])
+ * for reporting events of interest.
+ *
+ * Each Logger is associated with a [Handler].
+ * A Logger output method creates a [Record] from the method arguments
+ * and passes it to the Handler, which decides how to handle it.
+ * There is a default Logger accessible through top-level functions
+ * (such as [Info] and [Error]) that call the corresponding Logger methods.
+ *
+ * A log record consists of a time, a level, a message, and a set of key-value
+ * pairs, where the keys are strings and the values may be of any type.
+ * As an example,
+ *
+ * ```
+ * slog.Info("hello", "count", 3)
+ * ```
+ *
+ * creates a record containing the time of the call,
+ * a level of Info, the message "hello", and a single
+ * pair with key "count" and value 3.
+ *
+ * The [Info] top-level function calls the [Logger.Info] method on the default Logger.
+ * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
+ * Besides these convenience methods for common levels,
+ * there is also a [Logger.Log] method which takes the level as an argument.
+ * Each of these methods has a corresponding top-level function that uses the
+ * default logger.
+ *
+ * The default handler formats the log record's message, time, level, and attributes
+ * as a string and passes it to the [log] package.
+ *
+ * ```
+ * 2022/11/08 15:28:26 INFO hello count=3
+ * ```
+ *
+ * For more control over the output format, create a logger with a different handler.
+ * This statement uses [New] to create a new logger with a [TextHandler]
+ * that writes structured records in text form to standard error:
+ *
+ * ```
+ * logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
+ * ```
+ *
+ * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously
+ * parsed by machine. This statement:
+ *
+ * ```
+ * logger.Info("hello", "count", 3)
+ * ```
+ *
+ * produces this output:
+ *
+ * ```
+ * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
+ * ```
+ *
+ * The package also provides [JSONHandler], whose output is line-delimited JSON:
+ *
+ * ```
+ * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
+ * logger.Info("hello", "count", 3)
+ * ```
+ *
+ * produces this output:
+ *
+ * ```
+ * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
+ * ```
+ *
+ * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
+ * There are options for setting the minimum level (see Levels, below),
+ * displaying the source file and line of the log call, and
+ * modifying attributes before they are logged.
+ *
+ * Setting a logger as the default with
+ *
+ * ```
+ * slog.SetDefault(logger)
+ * ```
+ *
+ * will cause the top-level functions like [Info] to use it.
+ * [SetDefault] also updates the default logger used by the [log] package,
+ * so that existing applications that use [log.Printf] and related functions
+ * will send log records to the logger's handler without needing to be rewritten.
+ *
+ * Some attributes are common to many log calls.
+ * For example, you may wish to include the URL or trace identifier of a server request
+ * with all log events arising from the request.
+ * Rather than repeat the attribute with every log call, you can use [Logger.With]
+ * to construct a new Logger containing the attributes:
+ *
+ * ```
+ * logger2 := logger.With("url", r.URL)
+ * ```
+ *
+ * The arguments to With are the same key-value pairs used in [Logger.Info].
+ * The result is a new Logger with the same handler as the original, but additional
+ * attributes that will appear in the output of every call.
+ *
+ * # Levels
+ *
+ * A [Level] is an integer representing the importance or severity of a log event.
+ * The higher the level, the more severe the event.
+ * This package defines constants for the most common levels,
+ * but any int can be used as a level.
+ *
+ * In an application, you may wish to log messages only at a certain level or greater.
+ * One common configuration is to log messages at Info or higher levels,
+ * suppressing debug logging until it is needed.
+ * The built-in handlers can be configured with the minimum level to output by
+ * setting [HandlerOptions.Level].
+ * The program's `main` function typically does this.
+ * The default value is LevelInfo.
+ *
+ * Setting the [HandlerOptions.Level] field to a [Level] value
+ * fixes the handler's minimum level throughout its lifetime.
+ * Setting it to a [LevelVar] allows the level to be varied dynamically.
+ * A LevelVar holds a Level and is safe to read or write from multiple
+ * goroutines.
+ * To vary the level dynamically for an entire program, first initialize
+ * a global LevelVar:
+ *
+ * ```
+ * var programLevel = new(slog.LevelVar) // Info by default
+ * ```
+ *
+ * Then use the LevelVar to construct a handler, and make it the default:
+ *
+ * ```
+ * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
+ * slog.SetDefault(slog.New(h))
+ * ```
+ *
+ * Now the program can change its logging level with a single statement:
+ *
+ * ```
+ * programLevel.Set(slog.LevelDebug)
+ * ```
+ *
+ * # Groups
+ *
+ * Attributes can be collected into groups.
+ * A group has a name that is used to qualify the names of its attributes.
+ * How this qualification is displayed depends on the handler.
+ * [TextHandler] separates the group and attribute names with a dot.
+ * [JSONHandler] treats each group as a separate JSON object, with the group name as the key.
+ *
+ * Use [Group] to create a Group attribute from a name and a list of key-value pairs:
+ *
+ * ```
+ * slog.Group("request",
+ * "method", r.Method,
+ * "url", r.URL)
+ * ```
+ *
+ * TextHandler would display this group as
+ *
+ * ```
+ * request.method=GET request.url=http://example.com
+ * ```
+ *
+ * JSONHandler would display it as
+ *
+ * ```
+ * "request":{"method":"GET","url":"http://example.com"}
+ * ```
+ *
+ * Use [Logger.WithGroup] to qualify all of a Logger's output
+ * with a group name. Calling WithGroup on a Logger results in a
+ * new Logger with the same Handler as the original, but with all
+ * its attributes qualified by the group name.
+ *
+ * This can help prevent duplicate attribute keys in large systems,
+ * where subsystems might use the same keys.
+ * Pass each subsystem a different Logger with its own group name so that
+ * potential duplicates are qualified:
+ *
+ * ```
+ * logger := slog.Default().With("id", systemID)
+ * parserLogger := logger.WithGroup("parser")
+ * parseInput(input, parserLogger)
+ * ```
+ *
+ * When parseInput logs with parserLogger, its keys will be qualified with "parser",
+ * so even if it uses the common key "id", the log line will have distinct keys.
+ *
+ * # Contexts
+ *
+ * Some handlers may wish to include information from the [context.Context] that is
+ * available at the call site. One example of such information
+ * is the identifier for the current span when tracing is enabled.
+ *
+ * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
+ * argument, as do their corresponding top-level functions.
+ *
+ * Although the convenience methods on Logger (Info and so on) and the
+ * corresponding top-level functions do not take a context, the alternatives ending
+ * in "Context" do. For example,
+ *
+ * ```
+ * slog.InfoContext(ctx, "message")
+ * ```
+ *
+ * It is recommended to pass a context to an output method if one is available.
+ *
+ * # Attrs and Values
+ *
+ * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
+ * alternating keys and values. The statement
+ *
+ * ```
+ * slog.Info("hello", slog.Int("count", 3))
+ * ```
+ *
+ * behaves the same as
+ *
+ * ```
+ * slog.Info("hello", "count", 3)
+ * ```
+ *
+ * There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
+ * for common types, as well as the function [Any] for constructing Attrs of any
+ * type.
+ *
+ * The value part of an Attr is a type called [Value].
+ * Like an [any], a Value can hold any Go value,
+ * but it can represent typical values, including all numbers and strings,
+ * without an allocation.
+ *
+ * For the most efficient log output, use [Logger.LogAttrs].
+ * It is similar to [Logger.Log] but accepts only Attrs, not alternating
+ * keys and values; this allows it, too, to avoid allocation.
+ *
+ * The call
+ *
+ * ```
+ * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3))
+ * ```
+ *
+ * is the most efficient way to achieve the same output as
+ *
+ * ```
+ * slog.InfoContext(ctx, "hello", "count", 3)
+ * ```
+ *
+ * # Customizing a type's logging behavior
+ *
+ * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
+ * method is used for logging. You can use this to control how values of the type
+ * appear in logs. For example, you can redact secret information like passwords,
+ * or gather a struct's fields in a Group. See the examples under [LogValuer] for
+ * details.
+ *
+ * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
+ * method handles these cases carefully, avoiding infinite loops and unbounded recursion.
+ * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly.
+ *
+ * # Wrapping output methods
+ *
+ * The logger functions use reflection over the call stack to find the file name
+ * and line number of the logging call within the application. This can produce
+ * incorrect source information for functions that wrap slog. For instance, if you
+ * define this function in file mylog.go:
+ *
+ * ```
+ * func Infof(logger *slog.Logger, format string, args ...any) {
+ * logger.Info(fmt.Sprintf(format, args...))
+ * }
+ * ```
+ *
+ * and you call it like this in main.go:
+ *
+ * ```
+ * Infof(slog.Default(), "hello, %s", "world")
+ * ```
+ *
+ * then slog will report the source file as mylog.go, not main.go.
+ *
+ * A correct implementation of Infof will obtain the source location
+ * (pc) and pass it to NewRecord.
+ * The Infof function in the package-level example called "wrapping"
+ * demonstrates how to do this.
+ *
+ * # Working with Records
+ *
+ * Sometimes a Handler will need to modify a Record
+ * before passing it on to another Handler or backend.
+ * A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
+ * and hidden fields that refer to state (such as attributes) indirectly. This
+ * means that modifying a simple copy of a Record (e.g. by calling
+ * [Record.Add] or [Record.AddAttrs] to add attributes)
+ * may have unexpected effects on the original.
+ * Before modifying a Record, use [Record.Clone] to
+ * create a copy that shares no state with the original,
+ * or create a new Record with [NewRecord]
+ * and build up its Attrs by traversing the old ones with [Record.Attrs].
+ *
+ * # Performance considerations
+ *
+ * If profiling your application demonstrates that logging is taking significant time,
+ * the following suggestions may help.
+ *
+ * If many log lines have a common attribute, use [Logger.With] to create a Logger with
+ * that attribute. The built-in handlers will format that attribute only once, at the
+ * call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
+ * and a well-written Handler should take advantage of it.
+ *
+ * The arguments to a log call are always evaluated, even if the log event is discarded.
+ * If possible, defer computation so that it happens only if the value is actually logged.
+ * For example, consider the call
+ *
+ * ```
+ * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
+ * ```
+ *
+ * The URL.String method will be called even if the logger discards Info-level events.
+ * Instead, pass the URL directly:
+ *
+ * ```
+ * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
+ * ```
+ *
+ * The built-in [TextHandler] will call its String method, but only
+ * if the log event is enabled.
+ * Avoiding the call to String also preserves the structure of the underlying value.
+ * For example [JSONHandler] emits the components of the parsed URL as a JSON object.
+ * If you want to avoid eagerly paying the cost of the String call
+ * without causing the handler to potentially inspect the structure of the value,
+ * wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
+ *
+ * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
+ * calls. Say you need to log some expensive value:
+ *
+ * ```
+ * slog.Debug("frobbing", "value", computeExpensiveValue(arg))
+ * ```
+ *
+ * Even if this line is disabled, computeExpensiveValue will be called.
+ * To avoid that, define a type implementing LogValuer:
+ *
+ * ```
+ * type expensive struct { arg int }
+ *
+ * func (e expensive) LogValue() slog.Value {
+ * return slog.AnyValue(computeExpensiveValue(e.arg))
+ * }
+ * ```
+ *
+ * Then use a value of that type in log calls:
+ *
+ * ```
+ * slog.Debug("frobbing", "value", expensive{arg})
+ * ```
+ *
+ * Now computeExpensiveValue will only be called when the line is enabled.
+ *
+ * The built-in handlers acquire a lock before calling [io.Writer.Write]
+ * to ensure that exactly one [Record] is written at a time in its entirety.
+ * Although each log record has a timestamp,
+ * the built-in handlers do not use that time to sort the written records.
+ * User-defined handlers are responsible for their own locking and sorting.
+ *
+ * # Writing a handler
+ *
+ * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide.
+ */
+namespace slog {
+ // @ts-ignore
+ import loginternal = internal
+ /**
+ * A Logger records structured information about each call to its
+ * Log, Debug, Info, Warn, and Error methods.
+ * For each call, it creates a [Record] and passes it to a [Handler].
+ *
+ * To create a new Logger, call [New] or a Logger method
+ * that begins "With".
+ */
+ interface Logger {
+ }
+ interface Logger {
/**
- * GenBashCompletion generates bash completion file and writes to the passed writer.
+ * Handler returns l's Handler.
*/
- genBashCompletion(w: io.Writer): void
+ handler(): Handler
}
- interface Command {
+ interface Logger {
/**
- * GenBashCompletionFile generates bash completion file.
+ * With returns a Logger that includes the given attributes
+ * in each output operation. Arguments are converted to
+ * attributes as if by [Logger.Log].
*/
- genBashCompletionFile(filename: string): void
+ with(...args: any[]): (Logger)
}
- interface Command {
+ interface Logger {
/**
- * GenBashCompletionFileV2 generates Bash completion version 2.
+ * WithGroup returns a Logger that starts a group, if name is non-empty.
+ * The keys of all attributes added to the Logger will be qualified by the given
+ * name. (How that qualification happens depends on the [Handler.WithGroup]
+ * method of the Logger's Handler.)
+ *
+ * If name is empty, WithGroup returns the receiver.
*/
- genBashCompletionFileV2(filename: string, includeDesc: boolean): void
+ withGroup(name: string): (Logger)
}
- interface Command {
+ interface Logger {
/**
- * GenBashCompletionV2 generates Bash completion file version 2
- * and writes it to the passed writer.
+ * Enabled reports whether l emits log records at the given context and level.
*/
- genBashCompletionV2(w: io.Writer, includeDesc: boolean): void
+ enabled(ctx: context.Context, level: Level): boolean
}
- // @ts-ignore
- import flag = pflag
- /**
- * Command is just that, a command for your application.
- * E.g. 'go run ...' - 'run' is the command. Cobra requires
- * you to define the usage and description as part of your command
- * definition to ensure usability.
- */
- interface Command {
+ interface Logger {
/**
- * Use is the one-line usage message.
- * Recommended syntax is as follows:
+ * Log emits a log record with the current time and the given level and message.
+ * The Record's Attrs consist of the Logger's attributes followed by
+ * the Attrs specified by args.
+ *
+ * The attribute arguments are processed as follows:
* ```
- * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
- * ... indicates that you can specify multiple values for the previous argument.
- * | indicates mutually exclusive information. You can use the argument to the left of the separator or the
- * argument to the right of the separator. You cannot use both arguments in a single use of the command.
- * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
- * optional, they are enclosed in brackets ([ ]).
+ * - If an argument is an Attr, it is used as is.
+ * - If an argument is a string and this is not the last argument,
+ * the following argument is treated as the value and the two are combined
+ * into an Attr.
+ * - Otherwise, the argument is treated as a value with key "!BADKEY".
* ```
- * Example: add [-F file | -D dir]... [-f format] profile
- */
- use: string
- /**
- * Aliases is an array of aliases that can be used instead of the first word in Use.
*/
- aliases: Array
+ log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * SuggestFor is an array of command names for which this command will be suggested -
- * similar to aliases but only suggests.
+ * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
*/
- suggestFor: Array
+ logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void
+ }
+ interface Logger {
/**
- * Short is the short description shown in the 'help' output.
+ * Debug logs at [LevelDebug].
*/
- short: string
+ debug(msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * The group id under which this subcommand is grouped in the 'help' output of its parent.
+ * DebugContext logs at [LevelDebug] with the given context.
*/
- groupID: string
+ debugContext(ctx: context.Context, msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * Long is the long message shown in the 'help ' output.
+ * Info logs at [LevelInfo].
*/
- long: string
+ info(msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * Example is examples of how to use the command.
+ * InfoContext logs at [LevelInfo] with the given context.
*/
- example: string
+ infoContext(ctx: context.Context, msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ * Warn logs at [LevelWarn].
*/
- validArgs: Array
+ warn(msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
- * It is a dynamic version of using ValidArgs.
- * Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ * WarnContext logs at [LevelWarn] with the given context.
*/
- validArgsFunction: CompletionFunc
+ warnContext(ctx: context.Context, msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * Expected arguments
+ * Error logs at [LevelError].
*/
- args: PositionalArgs
+ error(msg: string, ...args: any[]): void
+ }
+ interface Logger {
/**
- * ArgAliases is List of aliases for ValidArgs.
- * These are not suggested to the user in the shell completion,
- * but accepted if entered manually.
+ * ErrorContext logs at [LevelError] with the given context.
*/
- argAliases: Array
+ errorContext(ctx: context.Context, msg: string, ...args: any[]): void
+ }
+}
+
+namespace exec {
+ /**
+ * Cmd represents an external command being prepared or run.
+ *
+ * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput]
+ * methods.
+ */
+ interface Cmd {
/**
- * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
- * For portability with other shells, it is recommended to instead use ValidArgsFunction
+ * Path is the path of the command to run.
+ *
+ * This is the only field that must be set to a non-zero
+ * value. If Path is relative, it is evaluated relative
+ * to Dir.
*/
- bashCompletionFunction: string
+ path: string
/**
- * Deprecated defines, if this command is deprecated and should print this string when used.
+ * Args holds command line arguments, including the command as Args[0].
+ * If the Args field is empty or nil, Run uses {Path}.
+ *
+ * In typical use, both Path and Args are set by calling Command.
*/
- deprecated: string
+ args: Array
/**
- * Annotations are key/value pairs that can be used by applications to identify or
- * group commands or set special options.
+ * Env specifies the environment of the process.
+ * Each entry is of the form "key=value".
+ * If Env is nil, the new process uses the current process's
+ * environment.
+ * If Env contains duplicate environment keys, only the last
+ * value in the slice for each duplicate key is used.
+ * As a special case on Windows, SYSTEMROOT is always added if
+ * missing and not explicitly set to the empty string.
*/
- annotations: _TygojaDict
+ env: Array
/**
- * Version defines the version for this command. If this value is non-empty and the command does not
- * define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
- * will print content of the "Version" variable. A shorthand "v" flag will also be added if the
- * command does not define one.
+ * Dir specifies the working directory of the command.
+ * If Dir is the empty string, Run runs the command in the
+ * calling process's current directory.
*/
- version: string
+ dir: string
/**
- * The *Run functions are executed in the following order:
- * ```
- * * PersistentPreRun()
- * * PreRun()
- * * Run()
- * * PostRun()
- * * PersistentPostRun()
- * ```
- * All functions get the same args, the arguments after the command name.
- * The *PreRun and *PostRun functions will only be executed if the Run function of the current
- * command has been declared.
+ * Stdin specifies the process's standard input.
*
- * PersistentPreRun: children of this command will inherit and execute.
- */
- persistentPreRun: (cmd: Command, args: Array) => void
- /**
- * PersistentPreRunE: PersistentPreRun but returns an error.
- */
- persistentPreRunE: (cmd: Command, args: Array) => void
- /**
- * PreRun: children of this command will not inherit.
+ * If Stdin is nil, the process reads from the null device (os.DevNull).
+ *
+ * If Stdin is an *os.File, the process's standard input is connected
+ * directly to that file.
+ *
+ * Otherwise, during the execution of the command a separate
+ * goroutine reads from Stdin and delivers that data to the command
+ * over a pipe. In this case, Wait does not complete until the goroutine
+ * stops copying, either because it has reached the end of Stdin
+ * (EOF or a read error), or because writing to the pipe returned an error,
+ * or because a nonzero WaitDelay was set and expired.
*/
- preRun: (cmd: Command, args: Array) => void
+ stdin: io.Reader
/**
- * PreRunE: PreRun but returns an error.
+ * Stdout and Stderr specify the process's standard output and error.
+ *
+ * If either is nil, Run connects the corresponding file descriptor
+ * to the null device (os.DevNull).
+ *
+ * If either is an *os.File, the corresponding output from the process
+ * is connected directly to that file.
+ *
+ * Otherwise, during the execution of the command a separate goroutine
+ * reads from the process over a pipe and delivers that data to the
+ * corresponding Writer. In this case, Wait does not complete until the
+ * goroutine reaches EOF or encounters an error or a nonzero WaitDelay
+ * expires.
+ *
+ * If Stdout and Stderr are the same writer, and have a type that can
+ * be compared with ==, at most one goroutine at a time will call Write.
*/
- preRunE: (cmd: Command, args: Array) => void
+ stdout: io.Writer
+ stderr: io.Writer
/**
- * Run: Typically the actual work function. Most commands will only implement this.
+ * ExtraFiles specifies additional open files to be inherited by the
+ * new process. It does not include standard input, standard output, or
+ * standard error. If non-nil, entry i becomes file descriptor 3+i.
+ *
+ * ExtraFiles is not supported on Windows.
*/
- run: (cmd: Command, args: Array) => void
+ extraFiles: Array<(os.File | undefined)>
/**
- * RunE: Run but returns an error.
+ * SysProcAttr holds optional, operating system-specific attributes.
+ * Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
*/
- runE: (cmd: Command, args: Array) => void
+ sysProcAttr?: syscall.SysProcAttr
/**
- * PostRun: run after the Run command.
+ * Process is the underlying process, once started.
*/
- postRun: (cmd: Command, args: Array) => void
+ process?: os.Process
/**
- * PostRunE: PostRun but returns an error.
+ * ProcessState contains information about an exited process.
+ * If the process was started successfully, Wait or Run will
+ * populate its ProcessState when the command completes.
*/
- postRunE: (cmd: Command, args: Array) => void
+ processState?: os.ProcessState
+ err: Error // LookPath error, if any.
/**
- * PersistentPostRun: children of this command will inherit and execute after PostRun.
+ * If Cancel is non-nil, the command must have been created with
+ * CommandContext and Cancel will be called when the command's
+ * Context is done. By default, CommandContext sets Cancel to
+ * call the Kill method on the command's Process.
+ *
+ * Typically a custom Cancel will send a signal to the command's
+ * Process, but it may instead take other actions to initiate cancellation,
+ * such as closing a stdin or stdout pipe or sending a shutdown request on a
+ * network socket.
+ *
+ * If the command exits with a success status after Cancel is
+ * called, and Cancel does not return an error equivalent to
+ * os.ErrProcessDone, then Wait and similar methods will return a non-nil
+ * error: either an error wrapping the one returned by Cancel,
+ * or the error from the Context.
+ * (If the command exits with a non-success status, or Cancel
+ * returns an error that wraps os.ErrProcessDone, Wait and similar methods
+ * continue to return the command's usual exit status.)
+ *
+ * If Cancel is set to nil, nothing will happen immediately when the command's
+ * Context is done, but a nonzero WaitDelay will still take effect. That may
+ * be useful, for example, to work around deadlocks in commands that do not
+ * support shutdown signals but are expected to always finish quickly.
+ *
+ * Cancel will not be called if Start returns a non-nil error.
*/
- persistentPostRun: (cmd: Command, args: Array) => void
+ cancel: () => void
/**
- * PersistentPostRunE: PersistentPostRun but returns an error.
+ * If WaitDelay is non-zero, it bounds the time spent waiting on two sources
+ * of unexpected delay in Wait: a child process that fails to exit after the
+ * associated Context is canceled, and a child process that exits but leaves
+ * its I/O pipes unclosed.
+ *
+ * The WaitDelay timer starts when either the associated Context is done or a
+ * call to Wait observes that the child process has exited, whichever occurs
+ * first. When the delay has elapsed, the command shuts down the child process
+ * and/or its I/O pipes.
+ *
+ * If the child process has failed to exit — perhaps because it ignored or
+ * failed to receive a shutdown signal from a Cancel function, or because no
+ * Cancel function was set — then it will be terminated using os.Process.Kill.
+ *
+ * Then, if the I/O pipes communicating with the child process are still open,
+ * those pipes are closed in order to unblock any goroutines currently blocked
+ * on Read or Write calls.
+ *
+ * If pipes are closed due to WaitDelay, no Cancel call has occurred,
+ * and the command has otherwise exited with a successful status, Wait and
+ * similar methods will return ErrWaitDelay instead of nil.
+ *
+ * If WaitDelay is zero (the default), I/O pipes will be read until EOF,
+ * which might not occur until orphaned subprocesses of the command have
+ * also closed their descriptors for the pipes.
*/
- persistentPostRunE: (cmd: Command, args: Array) => void
+ waitDelay: time.Duration
+ }
+ interface Cmd {
/**
- * FParseErrWhitelist flag parse errors to be ignored
+ * String returns a human-readable description of c.
+ * It is intended only for debugging.
+ * In particular, it is not suitable for use as input to a shell.
+ * The output of String may vary across Go releases.
*/
- fParseErrWhitelist: FParseErrWhitelist
+ string(): string
+ }
+ interface Cmd {
/**
- * CompletionOptions is a set of options to control the handling of shell completion
+ * Run starts the specified command and waits for it to complete.
+ *
+ * The returned error is nil if the command runs, has no problems
+ * copying stdin, stdout, and stderr, and exits with a zero exit
+ * status.
+ *
+ * If the command starts but does not complete successfully, the error is of
+ * type [*ExitError]. Other error types may be returned for other situations.
+ *
+ * If the calling goroutine has locked the operating system thread
+ * with [runtime.LockOSThread] and modified any inheritable OS-level
+ * thread state (for example, Linux or Plan 9 name spaces), the new
+ * process will inherit the caller's thread state.
*/
- completionOptions: CompletionOptions
+ run(): void
+ }
+ interface Cmd {
/**
- * TraverseChildren parses flags on all parents before executing child command.
+ * Start starts the specified command but does not wait for it to complete.
+ *
+ * If Start returns successfully, the c.Process field will be set.
+ *
+ * After a successful call to Start the [Cmd.Wait] method must be called in
+ * order to release associated system resources.
*/
- traverseChildren: boolean
+ start(): void
+ }
+ interface Cmd {
/**
- * Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ * Wait waits for the command to exit and waits for any copying to
+ * stdin or copying from stdout or stderr to complete.
+ *
+ * The command must have been started by [Cmd.Start].
+ *
+ * The returned error is nil if the command runs, has no problems
+ * copying stdin, stdout, and stderr, and exits with a zero exit
+ * status.
+ *
+ * If the command fails to run or doesn't complete successfully, the
+ * error is of type [*ExitError]. Other error types may be
+ * returned for I/O problems.
+ *
+ * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits
+ * for the respective I/O loop copying to or from the process to complete.
+ *
+ * Wait releases any resources associated with the [Cmd].
*/
- hidden: boolean
+ wait(): void
+ }
+ interface Cmd {
/**
- * SilenceErrors is an option to quiet errors down stream.
+ * Output runs the command and returns its standard output.
+ * Any returned error will usually be of type [*ExitError].
+ * If c.Stderr was nil, Output populates [ExitError.Stderr].
*/
- silenceErrors: boolean
+ output(): string|Array
+ }
+ interface Cmd {
/**
- * SilenceUsage is an option to silence usage when an error occurs.
+ * CombinedOutput runs the command and returns its combined standard
+ * output and standard error.
*/
- silenceUsage: boolean
+ combinedOutput(): string|Array
+ }
+ interface Cmd {
/**
- * DisableFlagParsing disables the flag parsing.
- * If this is true all flags will be passed to the command as arguments.
+ * StdinPipe returns a pipe that will be connected to the command's
+ * standard input when the command starts.
+ * The pipe will be closed automatically after [Cmd.Wait] sees the command exit.
+ * A caller need only call Close to force the pipe to close sooner.
+ * For example, if the command being run will not exit until standard input
+ * is closed, the caller must close the pipe.
*/
- disableFlagParsing: boolean
+ stdinPipe(): io.WriteCloser
+ }
+ interface Cmd {
/**
- * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
- * will be printed by generating docs for this command.
+ * StdoutPipe returns a pipe that will be connected to the command's
+ * standard output when the command starts.
+ *
+ * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
+ * need not close the pipe themselves. It is thus incorrect to call Wait
+ * before all reads from the pipe have completed.
+ * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe.
+ * See the example for idiomatic usage.
*/
- disableAutoGenTag: boolean
+ stdoutPipe(): io.ReadCloser
+ }
+ interface Cmd {
/**
- * DisableFlagsInUseLine will disable the addition of [flags] to the usage
- * line of a command when printing help or generating docs
+ * StderrPipe returns a pipe that will be connected to the command's
+ * standard error when the command starts.
+ *
+ * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
+ * need not close the pipe themselves. It is thus incorrect to call Wait
+ * before all reads from the pipe have completed.
+ * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe.
+ * See the StdoutPipe example for idiomatic usage.
*/
- disableFlagsInUseLine: boolean
+ stderrPipe(): io.ReadCloser
+ }
+ interface Cmd {
/**
- * DisableSuggestions disables the suggestions based on Levenshtein distance
- * that go along with 'unknown command' messages.
+ * Environ returns a copy of the environment in which the command would be run
+ * as it is currently configured.
*/
- disableSuggestions: boolean
+ environ(): Array
+ }
+}
+
+namespace mailer {
+ /**
+ * Message defines a generic email message struct.
+ */
+ interface Message {
+ from: { address: string; name?: string; }
+ to: Array<{ address: string; name?: string; }>
+ bcc: Array<{ address: string; name?: string; }>
+ cc: Array<{ address: string; name?: string; }>
+ subject: string
+ html: string
+ text: string
+ headers: _TygojaDict
+ attachments: _TygojaDict
+ inlineAttachments: _TygojaDict
+ }
+ /**
+ * Mailer defines a base mail client interface.
+ */
+ interface Mailer {
+ [key:string]: any;
/**
- * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
- * Must be > 0.
+ * Send sends an email with the provided Message.
*/
- suggestionsMinimumDistance: number
+ send(message: Message): void
}
- interface Command {
+}
+
+namespace auth {
+ /**
+ * Provider defines a common interface for an OAuth2 client.
+ */
+ interface Provider {
+ [key:string]: any;
/**
- * Context returns underlying command context. If command was executed
- * with ExecuteContext or the context was set with SetContext, the
- * previously set context will be returned. Otherwise, nil is returned.
- *
- * Notice that a call to Execute and ExecuteC will replace a nil context of
- * a command with a context.Background, so a background context will be
- * returned by Context after one of these functions has been called.
+ * Context returns the context associated with the provider (if any).
*/
context(): context.Context
- }
- interface Command {
/**
- * SetContext sets context for the command. This context will be overwritten by
- * Command.ExecuteContext or Command.ExecuteContextC.
+ * SetContext assigns the specified context to the current provider.
*/
setContext(ctx: context.Context): void
- }
- interface Command {
/**
- * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
- * particularly useful when testing.
+ * PKCE indicates whether the provider can use the PKCE flow.
*/
- setArgs(a: Array): void
- }
- interface Command {
+ pkce(): boolean
/**
- * SetOutput sets the destination for usage and error messages.
- * If output is nil, os.Stderr is used.
- *
- * Deprecated: Use SetOut and/or SetErr instead
+ * SetPKCE toggles the state whether the provider can use the PKCE flow or not.
*/
- setOutput(output: io.Writer): void
- }
- interface Command {
+ setPKCE(enable: boolean): void
/**
- * SetOut sets the destination for usage messages.
- * If newOut is nil, os.Stdout is used.
+ * DisplayName usually returns provider name as it is officially written
+ * and it could be used directly in the UI.
*/
- setOut(newOut: io.Writer): void
- }
- interface Command {
+ displayName(): string
/**
- * SetErr sets the destination for error messages.
- * If newErr is nil, os.Stderr is used.
+ * SetDisplayName sets the provider's display name.
*/
- setErr(newErr: io.Writer): void
- }
- interface Command {
+ setDisplayName(displayName: string): void
/**
- * SetIn sets the source for input data
- * If newIn is nil, os.Stdin is used.
+ * Scopes returns the provider access permissions that will be requested.
*/
- setIn(newIn: io.Reader): void
- }
- interface Command {
+ scopes(): Array
/**
- * SetUsageFunc sets usage function. Usage can be defined by application.
+ * SetScopes sets the provider access permissions that will be requested later.
*/
- setUsageFunc(f: (_arg0: Command) => void): void
- }
- interface Command {
+ setScopes(scopes: Array): void
/**
- * SetUsageTemplate sets usage template. Can be defined by Application.
+ * ClientId returns the provider client's app ID.
*/
- setUsageTemplate(s: string): void
- }
- interface Command {
+ clientId(): string
/**
- * SetFlagErrorFunc sets a function to generate an error when flag parsing
- * fails.
+ * SetClientId sets the provider client's ID.
*/
- setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void
- }
- interface Command {
+ setClientId(clientId: string): void
/**
- * SetHelpFunc sets help function. Can be defined by Application.
+ * ClientSecret returns the provider client's app secret.
*/
- setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void
- }
- interface Command {
+ clientSecret(): string
/**
- * SetHelpCommand sets help command.
+ * SetClientSecret sets the provider client's app secret.
*/
- setHelpCommand(cmd: Command): void
- }
- interface Command {
+ setClientSecret(secret: string): void
/**
- * SetHelpCommandGroupID sets the group id of the help command.
+ * RedirectURL returns the end address to redirect the user
+ * going through the OAuth flow.
*/
- setHelpCommandGroupID(groupID: string): void
- }
- interface Command {
+ redirectURL(): string
/**
- * SetCompletionCommandGroupID sets the group id of the completion command.
+ * SetRedirectURL sets the provider's RedirectURL.
*/
- setCompletionCommandGroupID(groupID: string): void
- }
- interface Command {
+ setRedirectURL(url: string): void
/**
- * SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+ * AuthURL returns the provider's authorization service url.
*/
- setHelpTemplate(s: string): void
- }
- interface Command {
+ authURL(): string
/**
- * SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+ * SetAuthURL sets the provider's AuthURL.
*/
- setVersionTemplate(s: string): void
- }
- interface Command {
+ setAuthURL(url: string): void
/**
- * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
+ * TokenURL returns the provider's token exchange service url.
*/
- setErrPrefix(s: string): void
- }
- interface Command {
+ tokenURL(): string
/**
- * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
- * The user should not have a cyclic dependency on commands.
+ * SetTokenURL sets the provider's TokenURL.
*/
- setGlobalNormalizationFunc(n: (f: any, name: string) => any): void
- }
- interface Command {
+ setTokenURL(url: string): void
/**
- * OutOrStdout returns output to stdout.
+ * UserInfoURL returns the provider's user info api url.
*/
- outOrStdout(): io.Writer
- }
- interface Command {
+ userInfoURL(): string
/**
- * OutOrStderr returns output to stderr
+ * SetUserInfoURL sets the provider's UserInfoURL.
*/
- outOrStderr(): io.Writer
- }
- interface Command {
+ setUserInfoURL(url: string): void
/**
- * ErrOrStderr returns output to stderr
+ * Extra returns a shallow copy of any custom config data
+ * that the provider may be need.
*/
- errOrStderr(): io.Writer
- }
- interface Command {
+ extra(): _TygojaDict
/**
- * InOrStdin returns input to stdin
+ * SetExtra updates the provider's custom config data.
*/
- inOrStdin(): io.Reader
- }
- interface Command {
+ setExtra(data: _TygojaDict): void
/**
- * UsageFunc returns either the function set by SetUsageFunc for this command
- * or a parent, or it returns a default usage function.
+ * Client returns an http client using the provided token.
*/
- usageFunc(): (_arg0: Command) => void
- }
- interface Command {
+ client(token: oauth2.Token): (any)
/**
- * Usage puts out the usage for the command.
- * Used when a user provides invalid input.
- * Can be defined by user by overriding UsageFunc.
+ * BuildAuthURL returns a URL to the provider's consent page
+ * that asks for permissions for the required scopes explicitly.
*/
- usage(): void
- }
- interface Command {
+ buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string
/**
- * HelpFunc returns either the function set by SetHelpFunc for this command
- * or a parent, or it returns a function with default help behavior.
+ * FetchToken converts an authorization code to token.
*/
- helpFunc(): (_arg0: Command, _arg1: Array) => void
- }
- interface Command {
+ fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token)
/**
- * Help puts out the help for the command.
- * Used when a user calls help [command].
- * Can be defined by user by overriding HelpFunc.
+ * FetchRawUserInfo requests and marshalizes into `result` the
+ * the OAuth user api response.
*/
- help(): void
- }
- interface Command {
+ fetchRawUserInfo(token: oauth2.Token): string|Array
/**
- * UsageString returns usage string.
+ * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and
+ * marshalizes the user api response into a standardized AuthUser struct.
*/
- usageString(): string
+ fetchAuthUser(token: oauth2.Token): (AuthUser)
}
- interface Command {
+ /**
+ * AuthUser defines a standardized OAuth2 user data structure.
+ */
+ interface AuthUser {
+ expiry: types.DateTime
+ rawUser: _TygojaDict
+ id: string
+ name: string
+ username: string
+ email: string
+ avatarURL: string
+ accessToken: string
+ refreshToken: string
/**
- * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
- * command or a parent, or it returns a function which returns the original
- * error.
+ * @todo
+ * deprecated: use AvatarURL instead
+ * AvatarUrl will be removed after dropping v0.22 support
*/
- flagErrorFunc(): (_arg0: Command, _arg1: Error) => void
+ avatarUrl: string
}
- interface Command {
+ interface AuthUser {
/**
- * UsagePadding return padding for the usage.
+ * MarshalJSON implements the [json.Marshaler] interface.
+ *
+ * @todo remove after dropping v0.22 support
*/
- usagePadding(): number
+ marshalJSON(): string|Array
}
+}
+
+/**
+ * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+ * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+ */
+namespace cobra {
interface Command {
/**
- * CommandPathPadding return padding for the command path.
+ * GenBashCompletion generates bash completion file and writes to the passed writer.
*/
- commandPathPadding(): number
+ genBashCompletion(w: io.Writer): void
}
interface Command {
/**
- * NamePadding returns padding for the name.
+ * GenBashCompletionFile generates bash completion file.
*/
- namePadding(): number
+ genBashCompletionFile(filename: string): void
}
interface Command {
/**
- * UsageTemplate returns usage template for the command.
- * This function is kept for backwards-compatibility reasons.
+ * GenBashCompletionFileV2 generates Bash completion version 2.
*/
- usageTemplate(): string
+ genBashCompletionFileV2(filename: string, includeDesc: boolean): void
}
interface Command {
/**
- * HelpTemplate return help template for the command.
- * This function is kept for backwards-compatibility reasons.
+ * GenBashCompletionV2 generates Bash completion file version 2
+ * and writes it to the passed writer.
*/
- helpTemplate(): string
+ genBashCompletionV2(w: io.Writer, includeDesc: boolean): void
}
+ // @ts-ignore
+ import flag = pflag
+ /**
+ * Command is just that, a command for your application.
+ * E.g. 'go run ...' - 'run' is the command. Cobra requires
+ * you to define the usage and description as part of your command
+ * definition to ensure usability.
+ */
interface Command {
/**
- * VersionTemplate return version template for the command.
- * This function is kept for backwards-compatibility reasons.
+ * Use is the one-line usage message.
+ * Recommended syntax is as follows:
+ * ```
+ * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ * ... indicates that you can specify multiple values for the previous argument.
+ * | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ * argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ * optional, they are enclosed in brackets ([ ]).
+ * ```
+ * Example: add [-F file | -D dir]... [-f format] profile
*/
- versionTemplate(): string
- }
- interface Command {
+ use: string
/**
- * ErrPrefix return error message prefix for the command
+ * Aliases is an array of aliases that can be used instead of the first word in Use.
*/
- errPrefix(): string
- }
- interface Command {
+ aliases: Array
/**
- * Find the target command given the args and command tree
- * Meant to be run on the highest node. Only searches down.
+ * SuggestFor is an array of command names for which this command will be suggested -
+ * similar to aliases but only suggests.
*/
- find(args: Array): [(Command), Array]
- }
- interface Command {
+ suggestFor: Array
/**
- * Traverse the command tree to find the command, and parse args for
- * each parent.
+ * Short is the short description shown in the 'help' output.
*/
- traverse(args: Array): [(Command), Array]
- }
- interface Command {
+ short: string
/**
- * SuggestionsFor provides suggestions for the typedName.
+ * The group id under which this subcommand is grouped in the 'help' output of its parent.
*/
- suggestionsFor(typedName: string): Array
- }
- interface Command {
+ groupID: string
/**
- * VisitParents visits all parents of the command and invokes fn on each parent.
+ * Long is the long message shown in the 'help ' output.
*/
- visitParents(fn: (_arg0: Command) => void): void
- }
- interface Command {
+ long: string
/**
- * Root finds root command.
+ * Example is examples of how to use the command.
*/
- root(): (Command)
- }
- interface Command {
+ example: string
/**
- * ArgsLenAtDash will return the length of c.Flags().Args at the moment
- * when a -- was found during args parsing.
+ * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
*/
- argsLenAtDash(): number
- }
- interface Command {
+ validArgs: Array
/**
- * ExecuteContext is the same as Execute(), but sets the ctx on the command.
- * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
- * functions.
+ * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ * It is a dynamic version of using ValidArgs.
+ * Only one of ValidArgs and ValidArgsFunction can be used for a command.
*/
- executeContext(ctx: context.Context): void
- }
- interface Command {
+ validArgsFunction: CompletionFunc
/**
- * Execute uses the args (os.Args[1:] by default)
- * and run through the command tree finding appropriate matches
- * for commands and then corresponding flags.
+ * Expected arguments
*/
- execute(): void
- }
- interface Command {
+ args: PositionalArgs
/**
- * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
- * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
- * functions.
+ * ArgAliases is List of aliases for ValidArgs.
+ * These are not suggested to the user in the shell completion,
+ * but accepted if entered manually.
*/
- executeContextC(ctx: context.Context): (Command)
- }
- interface Command {
+ argAliases: Array
/**
- * ExecuteC executes the command.
+ * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ * For portability with other shells, it is recommended to instead use ValidArgsFunction
*/
- executeC(): (Command)
- }
- interface Command {
- validateArgs(args: Array): void
- }
- interface Command {
+ bashCompletionFunction: string
/**
- * ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+ * Deprecated defines, if this command is deprecated and should print this string when used.
*/
- validateRequiredFlags(): void
- }
- interface Command {
+ deprecated: string
/**
- * InitDefaultHelpFlag adds default help flag to c.
- * It is called automatically by executing the c or by calling help and usage.
- * If c already has help flag, it will do nothing.
+ * Annotations are key/value pairs that can be used by applications to identify or
+ * group commands or set special options.
*/
- initDefaultHelpFlag(): void
- }
- interface Command {
+ annotations: _TygojaDict
/**
- * InitDefaultVersionFlag adds default version flag to c.
- * It is called automatically by executing the c.
- * If c already has a version flag, it will do nothing.
- * If c.Version is empty, it will do nothing.
+ * Version defines the version for this command. If this value is non-empty and the command does not
+ * define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ * will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ * command does not define one.
*/
- initDefaultVersionFlag(): void
- }
- interface Command {
+ version: string
/**
- * InitDefaultHelpCmd adds default help command to c.
- * It is called automatically by executing the c or by calling help and usage.
- * If c already has help command or c has no subcommands, it will do nothing.
+ * The *Run functions are executed in the following order:
+ * ```
+ * * PersistentPreRun()
+ * * PreRun()
+ * * Run()
+ * * PostRun()
+ * * PersistentPostRun()
+ * ```
+ * All functions get the same args, the arguments after the command name.
+ * The *PreRun and *PostRun functions will only be executed if the Run function of the current
+ * command has been declared.
+ *
+ * PersistentPreRun: children of this command will inherit and execute.
*/
- initDefaultHelpCmd(): void
- }
- interface Command {
+ persistentPreRun: (cmd: Command, args: Array) => void
/**
- * ResetCommands delete parent, subcommand and help command from c.
+ * PersistentPreRunE: PersistentPreRun but returns an error.
*/
- resetCommands(): void
- }
- interface Command {
+ persistentPreRunE: (cmd: Command, args: Array) => void
/**
- * Commands returns a sorted slice of child commands.
+ * PreRun: children of this command will not inherit.
*/
- commands(): Array<(Command | undefined)>
- }
- interface Command {
+ preRun: (cmd: Command, args: Array) => void
/**
- * AddCommand adds one or more commands to this parent command.
+ * PreRunE: PreRun but returns an error.
*/
- addCommand(...cmds: (Command | undefined)[]): void
- }
- interface Command {
+ preRunE: (cmd: Command, args: Array) => void
/**
- * Groups returns a slice of child command groups.
+ * Run: Typically the actual work function. Most commands will only implement this.
*/
- groups(): Array<(Group | undefined)>
- }
- interface Command {
+ run: (cmd: Command, args: Array) => void
/**
- * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+ * RunE: Run but returns an error.
*/
- allChildCommandsHaveGroup(): boolean
- }
- interface Command {
+ runE: (cmd: Command, args: Array) => void
/**
- * ContainsGroup return if groupID exists in the list of command groups.
+ * PostRun: run after the Run command.
*/
- containsGroup(groupID: string): boolean
- }
- interface Command {
+ postRun: (cmd: Command, args: Array) => void
/**
- * AddGroup adds one or more command groups to this parent command.
+ * PostRunE: PostRun but returns an error.
*/
- addGroup(...groups: (Group | undefined)[]): void
- }
- interface Command {
+ postRunE: (cmd: Command, args: Array) => void
/**
- * RemoveCommand removes one or more commands from a parent command.
+ * PersistentPostRun: children of this command will inherit and execute after PostRun.
*/
- removeCommand(...cmds: (Command | undefined)[]): void
- }
- interface Command {
+ persistentPostRun: (cmd: Command, args: Array) => void
/**
- * Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+ * PersistentPostRunE: PersistentPostRun but returns an error.
*/
- print(...i: {
- }[]): void
- }
- interface Command {
+ persistentPostRunE: (cmd: Command, args: Array) => void
/**
- * Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+ * FParseErrWhitelist flag parse errors to be ignored
*/
- println(...i: {
- }[]): void
- }
- interface Command {
+ fParseErrWhitelist: FParseErrWhitelist
/**
- * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+ * CompletionOptions is a set of options to control the handling of shell completion
*/
- printf(format: string, ...i: {
- }[]): void
- }
- interface Command {
+ completionOptions: CompletionOptions
/**
- * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+ * TraverseChildren parses flags on all parents before executing child command.
*/
- printErr(...i: {
- }[]): void
- }
- interface Command {
+ traverseChildren: boolean
/**
- * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+ * Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
*/
- printErrln(...i: {
- }[]): void
- }
- interface Command {
+ hidden: boolean
/**
- * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+ * SilenceErrors is an option to quiet errors down stream.
*/
- printErrf(format: string, ...i: {
- }[]): void
- }
- interface Command {
+ silenceErrors: boolean
/**
- * CommandPath returns the full path to this command.
+ * SilenceUsage is an option to silence usage when an error occurs.
*/
- commandPath(): string
- }
- interface Command {
+ silenceUsage: boolean
/**
- * DisplayName returns the name to display in help text. Returns command Name()
- * If CommandDisplayNameAnnoation is not set
+ * DisableFlagParsing disables the flag parsing.
+ * If this is true all flags will be passed to the command as arguments.
*/
- displayName(): string
- }
- interface Command {
+ disableFlagParsing: boolean
/**
- * UseLine puts out the full usage for a given command (including parents).
+ * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ * will be printed by generating docs for this command.
*/
- useLine(): string
- }
- interface Command {
+ disableAutoGenTag: boolean
/**
- * DebugFlags used to determine which flags have been assigned to which commands
- * and which persist.
+ * DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ * line of a command when printing help or generating docs
*/
- debugFlags(): void
- }
- interface Command {
+ disableFlagsInUseLine: boolean
/**
- * Name returns the command's name: the first word in the use line.
+ * DisableSuggestions disables the suggestions based on Levenshtein distance
+ * that go along with 'unknown command' messages.
*/
- name(): string
- }
- interface Command {
+ disableSuggestions: boolean
/**
- * HasAlias determines if a given string is an alias of the command.
+ * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ * Must be > 0.
*/
- hasAlias(s: string): boolean
+ suggestionsMinimumDistance: number
}
interface Command {
/**
- * CalledAs returns the command name or alias that was used to invoke
- * this command or an empty string if the command has not been called.
+ * Context returns underlying command context. If command was executed
+ * with ExecuteContext or the context was set with SetContext, the
+ * previously set context will be returned. Otherwise, nil is returned.
+ *
+ * Notice that a call to Execute and ExecuteC will replace a nil context of
+ * a command with a context.Background, so a background context will be
+ * returned by Context after one of these functions has been called.
*/
- calledAs(): string
+ context(): context.Context
}
interface Command {
/**
- * NameAndAliases returns a list of the command name and all aliases
+ * SetContext sets context for the command. This context will be overwritten by
+ * Command.ExecuteContext or Command.ExecuteContextC.
*/
- nameAndAliases(): string
+ setContext(ctx: context.Context): void
}
interface Command {
/**
- * HasExample determines if the command has example.
+ * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+ * particularly useful when testing.
*/
- hasExample(): boolean
+ setArgs(a: Array): void
}
interface Command {
/**
- * Runnable determines if the command is itself runnable.
+ * SetOutput sets the destination for usage and error messages.
+ * If output is nil, os.Stderr is used.
+ *
+ * Deprecated: Use SetOut and/or SetErr instead
*/
- runnable(): boolean
+ setOutput(output: io.Writer): void
}
interface Command {
/**
- * HasSubCommands determines if the command has children commands.
+ * SetOut sets the destination for usage messages.
+ * If newOut is nil, os.Stdout is used.
*/
- hasSubCommands(): boolean
+ setOut(newOut: io.Writer): void
}
interface Command {
/**
- * IsAvailableCommand determines if a command is available as a non-help command
- * (this includes all non deprecated/hidden commands).
+ * SetErr sets the destination for error messages.
+ * If newErr is nil, os.Stderr is used.
*/
- isAvailableCommand(): boolean
+ setErr(newErr: io.Writer): void
}
interface Command {
/**
- * IsAdditionalHelpTopicCommand determines if a command is an additional
- * help topic command; additional help topic command is determined by the
- * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
- * are runnable/hidden/deprecated.
- * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+ * SetIn sets the source for input data
+ * If newIn is nil, os.Stdin is used.
*/
- isAdditionalHelpTopicCommand(): boolean
+ setIn(newIn: io.Reader): void
}
interface Command {
/**
- * HasHelpSubCommands determines if a command has any available 'help' sub commands
- * that need to be shown in the usage/help default template under 'additional help
- * topics'.
+ * SetUsageFunc sets usage function. Usage can be defined by application.
*/
- hasHelpSubCommands(): boolean
+ setUsageFunc(f: (_arg0: Command) => void): void
}
interface Command {
/**
- * HasAvailableSubCommands determines if a command has available sub commands that
- * need to be shown in the usage/help default template under 'available commands'.
+ * SetUsageTemplate sets usage template. Can be defined by Application.
*/
- hasAvailableSubCommands(): boolean
+ setUsageTemplate(s: string): void
}
interface Command {
/**
- * HasParent determines if the command is a child command.
+ * SetFlagErrorFunc sets a function to generate an error when flag parsing
+ * fails.
*/
- hasParent(): boolean
+ setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void
}
interface Command {
/**
- * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+ * SetHelpFunc sets help function. Can be defined by Application.
*/
- globalNormalizationFunc(): (f: any, name: string) => any
+ setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void
}
interface Command {
/**
- * Flags returns the complete FlagSet that applies
- * to this command (local and persistent declared here and by all parents).
+ * SetHelpCommand sets help command.
*/
- flags(): (any)
+ setHelpCommand(cmd: Command): void
}
interface Command {
/**
- * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
- * This function does not modify the flags of the current command, it's purpose is to return the current state.
+ * SetHelpCommandGroupID sets the group id of the help command.
*/
- localNonPersistentFlags(): (any)
+ setHelpCommandGroupID(groupID: string): void
}
interface Command {
/**
- * LocalFlags returns the local FlagSet specifically set in the current command.
- * This function does not modify the flags of the current command, it's purpose is to return the current state.
+ * SetCompletionCommandGroupID sets the group id of the completion command.
*/
- localFlags(): (any)
+ setCompletionCommandGroupID(groupID: string): void
}
interface Command {
/**
- * InheritedFlags returns all flags which were inherited from parent commands.
- * This function does not modify the flags of the current command, it's purpose is to return the current state.
+ * SetHelpTemplate sets help template to be used. Application can use it to set custom template.
*/
- inheritedFlags(): (any)
+ setHelpTemplate(s: string): void
}
interface Command {
/**
- * NonInheritedFlags returns all flags which were not inherited from parent commands.
- * This function does not modify the flags of the current command, it's purpose is to return the current state.
+ * SetVersionTemplate sets version template to be used. Application can use it to set custom template.
*/
- nonInheritedFlags(): (any)
+ setVersionTemplate(s: string): void
}
interface Command {
/**
- * PersistentFlags returns the persistent FlagSet specifically set in the current command.
+ * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
*/
- persistentFlags(): (any)
+ setErrPrefix(s: string): void
}
interface Command {
/**
- * ResetFlags deletes all flags from command.
+ * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+ * The user should not have a cyclic dependency on commands.
*/
- resetFlags(): void
+ setGlobalNormalizationFunc(n: (f: any, name: string) => any): void
}
interface Command {
/**
- * HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+ * OutOrStdout returns output to stdout.
*/
- hasFlags(): boolean
+ outOrStdout(): io.Writer
}
interface Command {
/**
- * HasPersistentFlags checks if the command contains persistent flags.
+ * OutOrStderr returns output to stderr
*/
- hasPersistentFlags(): boolean
+ outOrStderr(): io.Writer
}
interface Command {
/**
- * HasLocalFlags checks if the command has flags specifically declared locally.
+ * ErrOrStderr returns output to stderr
*/
- hasLocalFlags(): boolean
+ errOrStderr(): io.Writer
}
interface Command {
/**
- * HasInheritedFlags checks if the command has flags inherited from its parent command.
+ * InOrStdin returns input to stdin
*/
- hasInheritedFlags(): boolean
+ inOrStdin(): io.Reader
}
interface Command {
/**
- * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
- * structure) which are not hidden or deprecated.
+ * UsageFunc returns either the function set by SetUsageFunc for this command
+ * or a parent, or it returns a default usage function.
*/
- hasAvailableFlags(): boolean
+ usageFunc(): (_arg0: Command) => void
}
interface Command {
/**
- * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+ * Usage puts out the usage for the command.
+ * Used when a user provides invalid input.
+ * Can be defined by user by overriding UsageFunc.
*/
- hasAvailablePersistentFlags(): boolean
+ usage(): void
}
interface Command {
/**
- * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
- * or deprecated.
+ * HelpFunc returns either the function set by SetHelpFunc for this command
+ * or a parent, or it returns a function with default help behavior.
*/
- hasAvailableLocalFlags(): boolean
+ helpFunc(): (_arg0: Command, _arg1: Array) => void
}
interface Command {
/**
- * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
- * not hidden or deprecated.
+ * Help puts out the help for the command.
+ * Used when a user calls help [command].
+ * Can be defined by user by overriding HelpFunc.
*/
- hasAvailableInheritedFlags(): boolean
+ help(): void
}
interface Command {
/**
- * Flag climbs up the command tree looking for matching flag.
+ * UsageString returns usage string.
*/
- flag(name: string): (any)
+ usageString(): string
}
interface Command {
/**
- * ParseFlags parses persistent flag tree and local flags.
+ * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+ * command or a parent, or it returns a function which returns the original
+ * error.
*/
- parseFlags(args: Array): void
+ flagErrorFunc(): (_arg0: Command, _arg1: Error) => void
}
interface Command {
/**
- * Parent returns a commands parent command.
+ * UsagePadding return padding for the usage.
*/
- parent(): (Command)
+ usagePadding(): number
}
interface Command {
/**
- * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
- *
- * You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions],
- * or you can define your own.
+ * CommandPathPadding return padding for the command path.
*/
- registerFlagCompletionFunc(flagName: string, f: CompletionFunc): void
+ commandPathPadding(): number
}
interface Command {
/**
- * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+ * NamePadding returns padding for the name.
*/
- getFlagCompletionFunc(flagName: string): [CompletionFunc, boolean]
+ namePadding(): number
}
interface Command {
/**
- * InitDefaultCompletionCmd adds a default 'completion' command to c.
- * This function will do nothing if any of the following is true:
- * 1- the feature has been explicitly disabled by the program,
- * 2- c has no subcommands (to avoid creating one),
- * 3- c already has a 'completion' command provided by the program.
+ * UsageTemplate returns usage template for the command.
+ * This function is kept for backwards-compatibility reasons.
*/
- initDefaultCompletionCmd(...args: string[]): void
+ usageTemplate(): string
}
interface Command {
/**
- * GenFishCompletion generates fish completion file and writes to the passed writer.
+ * HelpTemplate return help template for the command.
+ * This function is kept for backwards-compatibility reasons.
*/
- genFishCompletion(w: io.Writer, includeDesc: boolean): void
+ helpTemplate(): string
}
interface Command {
/**
- * GenFishCompletionFile generates fish completion file.
+ * VersionTemplate return version template for the command.
+ * This function is kept for backwards-compatibility reasons.
*/
- genFishCompletionFile(filename: string, includeDesc: boolean): void
+ versionTemplate(): string
}
interface Command {
/**
- * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
- * if the command is invoked with a subset (but not all) of the given flags.
+ * ErrPrefix return error message prefix for the command
*/
- markFlagsRequiredTogether(...flagNames: string[]): void
+ errPrefix(): string
}
interface Command {
/**
- * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
- * if the command is invoked without at least one flag from the given set of flags.
+ * Find the target command given the args and command tree
+ * Meant to be run on the highest node. Only searches down.
*/
- markFlagsOneRequired(...flagNames: string[]): void
+ find(args: Array): [(Command), Array]
}
interface Command {
/**
- * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
- * if the command is invoked with more than one flag from the given set of flags.
+ * Traverse the command tree to find the command, and parse args for
+ * each parent.
*/
- markFlagsMutuallyExclusive(...flagNames: string[]): void
+ traverse(args: Array): [(Command), Array]
}
interface Command {
/**
- * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
- * first error encountered.
+ * SuggestionsFor provides suggestions for the typedName.
*/
- validateFlagGroups(): void
+ suggestionsFor(typedName: string): Array
}
interface Command {
/**
- * GenPowerShellCompletionFile generates powershell completion file without descriptions.
+ * VisitParents visits all parents of the command and invokes fn on each parent.
*/
- genPowerShellCompletionFile(filename: string): void
+ visitParents(fn: (_arg0: Command) => void): void
}
interface Command {
/**
- * GenPowerShellCompletion generates powershell completion file without descriptions
- * and writes it to the passed writer.
+ * Root finds root command.
*/
- genPowerShellCompletion(w: io.Writer): void
+ root(): (Command)
}
interface Command {
/**
- * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+ * ArgsLenAtDash will return the length of c.Flags().Args at the moment
+ * when a -- was found during args parsing.
*/
- genPowerShellCompletionFileWithDesc(filename: string): void
+ argsLenAtDash(): number
}
interface Command {
/**
- * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
- * and writes it to the passed writer.
+ * ExecuteContext is the same as Execute(), but sets the ctx on the command.
+ * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+ * functions.
*/
- genPowerShellCompletionWithDesc(w: io.Writer): void
+ executeContext(ctx: context.Context): void
}
interface Command {
/**
- * MarkFlagRequired instructs the various shell completion implementations to
- * prioritize the named flag when performing completion,
- * and causes your command to report an error if invoked without the flag.
+ * Execute uses the args (os.Args[1:] by default)
+ * and run through the command tree finding appropriate matches
+ * for commands and then corresponding flags.
*/
- markFlagRequired(name: string): void
+ execute(): void
}
interface Command {
/**
- * MarkPersistentFlagRequired instructs the various shell completion implementations to
- * prioritize the named persistent flag when performing completion,
- * and causes your command to report an error if invoked without the flag.
+ * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+ * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+ * functions.
*/
- markPersistentFlagRequired(name: string): void
+ executeContextC(ctx: context.Context): (Command)
}
interface Command {
/**
- * MarkFlagFilename instructs the various shell completion implementations to
- * limit completions for the named flag to the specified file extensions.
+ * ExecuteC executes the command.
*/
- markFlagFilename(name: string, ...extensions: string[]): void
+ executeC(): (Command)
}
interface Command {
- /**
- * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
- * The bash completion script will call the bash function f for the flag.
- *
- * This will only work for bash completion.
- * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
- * to register a Go function which will work across all shells.
- */
- markFlagCustom(name: string, f: string): void
+ validateArgs(args: Array): void
}
interface Command {
/**
- * MarkPersistentFlagFilename instructs the various shell completion
- * implementations to limit completions for the named persistent flag to the
- * specified file extensions.
+ * ValidateRequiredFlags validates all required flags are present and returns an error otherwise
*/
- markPersistentFlagFilename(name: string, ...extensions: string[]): void
+ validateRequiredFlags(): void
}
interface Command {
/**
- * MarkFlagDirname instructs the various shell completion implementations to
- * limit completions for the named flag to directory names.
+ * InitDefaultHelpFlag adds default help flag to c.
+ * It is called automatically by executing the c or by calling help and usage.
+ * If c already has help flag, it will do nothing.
*/
- markFlagDirname(name: string): void
+ initDefaultHelpFlag(): void
}
interface Command {
/**
- * MarkPersistentFlagDirname instructs the various shell completion
- * implementations to limit completions for the named persistent flag to
- * directory names.
+ * InitDefaultVersionFlag adds default version flag to c.
+ * It is called automatically by executing the c.
+ * If c already has a version flag, it will do nothing.
+ * If c.Version is empty, it will do nothing.
*/
- markPersistentFlagDirname(name: string): void
+ initDefaultVersionFlag(): void
}
interface Command {
/**
- * GenZshCompletionFile generates zsh completion file including descriptions.
+ * InitDefaultHelpCmd adds default help command to c.
+ * It is called automatically by executing the c or by calling help and usage.
+ * If c already has help command or c has no subcommands, it will do nothing.
*/
- genZshCompletionFile(filename: string): void
+ initDefaultHelpCmd(): void
}
interface Command {
/**
- * GenZshCompletion generates zsh completion file including descriptions
- * and writes it to the passed writer.
+ * ResetCommands delete parent, subcommand and help command from c.
*/
- genZshCompletion(w: io.Writer): void
+ resetCommands(): void
}
interface Command {
/**
- * GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+ * Commands returns a sorted slice of child commands.
*/
- genZshCompletionFileNoDesc(filename: string): void
+ commands(): Array<(Command | undefined)>
}
interface Command {
/**
- * GenZshCompletionNoDesc generates zsh completion file without descriptions
- * and writes it to the passed writer.
+ * AddCommand adds one or more commands to this parent command.
*/
- genZshCompletionNoDesc(w: io.Writer): void
+ addCommand(...cmds: (Command | undefined)[]): void
}
interface Command {
/**
- * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
- * not consistent with Bash completion. It has therefore been disabled.
- * Instead, when no other completion is specified, file completion is done by
- * default for every argument. One can disable file completion on a per-argument
- * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
- * To achieve file extension filtering, one can use ValidArgsFunction and
- * ShellCompDirectiveFilterFileExt.
- *
- * Deprecated
+ * Groups returns a slice of child command groups.
*/
- markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void
+ groups(): Array<(Group | undefined)>
}
interface Command {
/**
- * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
- * been disabled.
- * To achieve the same behavior across all shells, one can use
- * ValidArgs (for the first argument only) or ValidArgsFunction for
- * any argument (can include the first one also).
- *
- * Deprecated
+ * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
*/
- markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void
+ allChildCommandsHaveGroup(): boolean
}
-}
-
-namespace auth {
- /**
- * Provider defines a common interface for an OAuth2 client.
- */
- interface Provider {
- [key:string]: any;
- /**
- * Context returns the context associated with the provider (if any).
- */
- context(): context.Context
- /**
- * SetContext assigns the specified context to the current provider.
- */
- setContext(ctx: context.Context): void
- /**
- * PKCE indicates whether the provider can use the PKCE flow.
- */
- pkce(): boolean
- /**
- * SetPKCE toggles the state whether the provider can use the PKCE flow or not.
- */
- setPKCE(enable: boolean): void
- /**
- * DisplayName usually returns provider name as it is officially written
- * and it could be used directly in the UI.
- */
- displayName(): string
- /**
- * SetDisplayName sets the provider's display name.
- */
- setDisplayName(displayName: string): void
- /**
- * Scopes returns the provider access permissions that will be requested.
- */
- scopes(): Array
- /**
- * SetScopes sets the provider access permissions that will be requested later.
- */
- setScopes(scopes: Array): void
+ interface Command {
/**
- * ClientId returns the provider client's app ID.
+ * ContainsGroup return if groupID exists in the list of command groups.
*/
- clientId(): string
+ containsGroup(groupID: string): boolean
+ }
+ interface Command {
/**
- * SetClientId sets the provider client's ID.
+ * AddGroup adds one or more command groups to this parent command.
*/
- setClientId(clientId: string): void
+ addGroup(...groups: (Group | undefined)[]): void
+ }
+ interface Command {
/**
- * ClientSecret returns the provider client's app secret.
+ * RemoveCommand removes one or more commands from a parent command.
*/
- clientSecret(): string
+ removeCommand(...cmds: (Command | undefined)[]): void
+ }
+ interface Command {
/**
- * SetClientSecret sets the provider client's app secret.
+ * Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
*/
- setClientSecret(secret: string): void
+ print(...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * RedirectURL returns the end address to redirect the user
- * going through the OAuth flow.
+ * Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
*/
- redirectURL(): string
+ println(...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * SetRedirectURL sets the provider's RedirectURL.
+ * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
*/
- setRedirectURL(url: string): void
+ printf(format: string, ...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * AuthURL returns the provider's authorization service url.
+ * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
*/
- authURL(): string
+ printErr(...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * SetAuthURL sets the provider's AuthURL.
+ * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
*/
- setAuthURL(url: string): void
+ printErrln(...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * TokenURL returns the provider's token exchange service url.
+ * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
*/
- tokenURL(): string
+ printErrf(format: string, ...i: {
+ }[]): void
+ }
+ interface Command {
/**
- * SetTokenURL sets the provider's TokenURL.
+ * CommandPath returns the full path to this command.
*/
- setTokenURL(url: string): void
+ commandPath(): string
+ }
+ interface Command {
/**
- * UserInfoURL returns the provider's user info api url.
+ * DisplayName returns the name to display in help text. Returns command Name()
+ * If CommandDisplayNameAnnoation is not set
*/
- userInfoURL(): string
+ displayName(): string
+ }
+ interface Command {
/**
- * SetUserInfoURL sets the provider's UserInfoURL.
+ * UseLine puts out the full usage for a given command (including parents).
*/
- setUserInfoURL(url: string): void
+ useLine(): string
+ }
+ interface Command {
/**
- * Extra returns a shallow copy of any custom config data
- * that the provider may be need.
+ * DebugFlags used to determine which flags have been assigned to which commands
+ * and which persist.
*/
- extra(): _TygojaDict
+ debugFlags(): void
+ }
+ interface Command {
/**
- * SetExtra updates the provider's custom config data.
+ * Name returns the command's name: the first word in the use line.
*/
- setExtra(data: _TygojaDict): void
+ name(): string
+ }
+ interface Command {
/**
- * Client returns an http client using the provided token.
+ * HasAlias determines if a given string is an alias of the command.
*/
- client(token: oauth2.Token): (any)
+ hasAlias(s: string): boolean
+ }
+ interface Command {
/**
- * BuildAuthURL returns a URL to the provider's consent page
- * that asks for permissions for the required scopes explicitly.
+ * CalledAs returns the command name or alias that was used to invoke
+ * this command or an empty string if the command has not been called.
*/
- buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string
+ calledAs(): string
+ }
+ interface Command {
/**
- * FetchToken converts an authorization code to token.
+ * NameAndAliases returns a list of the command name and all aliases
*/
- fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token)
+ nameAndAliases(): string
+ }
+ interface Command {
/**
- * FetchRawUserInfo requests and marshalizes into `result` the
- * the OAuth user api response.
+ * HasExample determines if the command has example.
*/
- fetchRawUserInfo(token: oauth2.Token): string|Array
+ hasExample(): boolean
+ }
+ interface Command {
/**
- * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and
- * marshalizes the user api response into a standardized AuthUser struct.
+ * Runnable determines if the command is itself runnable.
*/
- fetchAuthUser(token: oauth2.Token): (AuthUser)
+ runnable(): boolean
}
- /**
- * AuthUser defines a standardized OAuth2 user data structure.
- */
- interface AuthUser {
- expiry: types.DateTime
- rawUser: _TygojaDict
- id: string
- name: string
- username: string
- email: string
- avatarURL: string
- accessToken: string
- refreshToken: string
+ interface Command {
/**
- * @todo
- * deprecated: use AvatarURL instead
- * AvatarUrl will be removed after dropping v0.22 support
+ * HasSubCommands determines if the command has children commands.
*/
- avatarUrl: string
+ hasSubCommands(): boolean
}
- interface AuthUser {
+ interface Command {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
- *
- * @todo remove after dropping v0.22 support
+ * IsAvailableCommand determines if a command is available as a non-help command
+ * (this includes all non deprecated/hidden commands).
*/
- marshalJSON(): string|Array
+ isAvailableCommand(): boolean
}
-}
-
-namespace router {
- // @ts-ignore
- import validation = ozzo_validation
- /**
- * ApiError defines the struct for a basic api error response.
- */
- interface ApiError {
- data: _TygojaDict
- message: string
- status: number
+ interface Command {
+ /**
+ * IsAdditionalHelpTopicCommand determines if a command is an additional
+ * help topic command; additional help topic command is determined by the
+ * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+ * are runnable/hidden/deprecated.
+ * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+ */
+ isAdditionalHelpTopicCommand(): boolean
}
- interface ApiError {
+ interface Command {
/**
- * Error makes it compatible with the `error` interface.
+ * HasHelpSubCommands determines if a command has any available 'help' sub commands
+ * that need to be shown in the usage/help default template under 'additional help
+ * topics'.
*/
- error(): string
+ hasHelpSubCommands(): boolean
}
- interface ApiError {
+ interface Command {
/**
- * RawData returns the unformatted error data (could be an internal error, text, etc.)
+ * HasAvailableSubCommands determines if a command has available sub commands that
+ * need to be shown in the usage/help default template under 'available commands'.
*/
- rawData(): any
+ hasAvailableSubCommands(): boolean
}
- interface ApiError {
+ interface Command {
/**
- * Is reports whether the current ApiError wraps the target.
+ * HasParent determines if the command is a child command.
*/
- is(target: Error): boolean
- }
- /**
- * Event specifies based Route handler event that is usually intended
- * to be embedded as part of a custom event struct.
- *
- * NB! It is expected that the Response and Request fields are always set.
- */
- type _sZPWzsb = hook.Event
- interface Event extends _sZPWzsb {
- response: http.ResponseWriter
- request?: http.Request
+ hasParent(): boolean
}
- interface Event {
+ interface Command {
/**
- * Written reports whether the current response has already been written.
- *
- * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface
- * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
+ * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
*/
- written(): boolean
+ globalNormalizationFunc(): (f: any, name: string) => any
}
- interface Event {
+ interface Command {
/**
- * Status reports the status code of the current response.
- *
- * This method always returns 0 if e.Response doesn't implement the StatusTracker interface
- * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
+ * Flags returns the complete FlagSet that applies
+ * to this command (local and persistent declared here and by all parents).
*/
- status(): number
+ flags(): (any)
}
- interface Event {
+ interface Command {
/**
- * Flush flushes buffered data to the current response.
- *
- * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface
- * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
+ * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+ * This function does not modify the flags of the current command, it's purpose is to return the current state.
*/
- flush(): void
+ localNonPersistentFlags(): (any)
}
- interface Event {
+ interface Command {
/**
- * IsTLS reports whether the connection on which the request was received is TLS.
+ * LocalFlags returns the local FlagSet specifically set in the current command.
+ * This function does not modify the flags of the current command, it's purpose is to return the current state.
*/
- isTLS(): boolean
+ localFlags(): (any)
}
- interface Event {
+ interface Command {
/**
- * SetCookie is an alias for [http.SetCookie].
- *
- * SetCookie adds a Set-Cookie header to the current response's headers.
- * The provided cookie must have a valid Name.
- * Invalid cookies may be silently dropped.
+ * InheritedFlags returns all flags which were inherited from parent commands.
+ * This function does not modify the flags of the current command, it's purpose is to return the current state.
*/
- setCookie(cookie: http.Cookie): void
+ inheritedFlags(): (any)
}
- interface Event {
+ interface Command {
/**
- * RemoteIP returns the IP address of the client that sent the request.
- *
- * IPv6 addresses are returned expanded.
- * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001".
- *
- * Note that if you are behind reverse proxy(ies), this method returns
- * the IP of the last connecting proxy.
+ * NonInheritedFlags returns all flags which were not inherited from parent commands.
+ * This function does not modify the flags of the current command, it's purpose is to return the current state.
*/
- remoteIP(): string
+ nonInheritedFlags(): (any)
}
- interface Event {
+ interface Command {
/**
- * FindUploadedFiles extracts all form files of "key" from a http request
- * and returns a slice with filesystem.File instances (if any).
+ * PersistentFlags returns the persistent FlagSet specifically set in the current command.
*/
- findUploadedFiles(key: string): Array<(filesystem.File | undefined)>
+ persistentFlags(): (any)
}
- interface Event {
+ interface Command {
/**
- * Get retrieves single value from the current event data store.
+ * ResetFlags deletes all flags from command.
*/
- get(key: string): any
+ resetFlags(): void
}
- interface Event {
+ interface Command {
/**
- * GetAll returns a copy of the current event data store.
+ * HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
*/
- getAll(): _TygojaDict
+ hasFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * Set saves single value into the current event data store.
+ * HasPersistentFlags checks if the command contains persistent flags.
*/
- set(key: string, value: any): void
+ hasPersistentFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * SetAll saves all items from m into the current event data store.
+ * HasLocalFlags checks if the command has flags specifically declared locally.
*/
- setAll(m: _TygojaDict): void
+ hasLocalFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * String writes a plain string response.
+ * HasInheritedFlags checks if the command has flags inherited from its parent command.
*/
- string(status: number, data: string): void
+ hasInheritedFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * HTML writes an HTML response.
+ * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+ * structure) which are not hidden or deprecated.
*/
- html(status: number, data: string): void
+ hasAvailableFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * JSON writes a JSON response.
- *
- * It also provides a generic response data fields picker if the "fields" query parameter is set.
- * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`,
- * it should result in a JSON response like: `{"a":1, "b": 2}`.
+ * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
*/
- json(status: number, data: any): void
+ hasAvailablePersistentFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * XML writes an XML response.
- * It automatically prepends the generic [xml.Header] string to the response.
+ * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+ * or deprecated.
*/
- xml(status: number, data: any): void
+ hasAvailableLocalFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * Stream streams the specified reader into the response.
+ * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+ * not hidden or deprecated.
*/
- stream(status: number, contentType: string, reader: io.Reader): void
+ hasAvailableInheritedFlags(): boolean
}
- interface Event {
+ interface Command {
/**
- * Blob writes a blob (bytes slice) response.
+ * Flag climbs up the command tree looking for matching flag.
*/
- blob(status: number, contentType: string, b: string|Array): void
+ flag(name: string): (any)
}
- interface Event {
+ interface Command {
/**
- * FileFS serves the specified filename from fsys.
- *
- * It is similar to [echo.FileFS] for consistency with earlier versions.
+ * ParseFlags parses persistent flag tree and local flags.
*/
- fileFS(fsys: fs.FS, filename: string): void
+ parseFlags(args: Array): void
}
- interface Event {
+ interface Command {
/**
- * NoContent writes a response with no body (ex. 204).
+ * Parent returns a commands parent command.
*/
- noContent(status: number): void
+ parent(): (Command)
}
- interface Event {
+ interface Command {
/**
- * Redirect writes a redirect response to the specified url.
- * The status code must be in between 300 – 399 range.
+ * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+ *
+ * You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions],
+ * or you can define your own.
*/
- redirect(status: number, url: string): void
- }
- interface Event {
- error(status: number, message: string, errData: any): (ApiError)
- }
- interface Event {
- badRequestError(message: string, errData: any): (ApiError)
- }
- interface Event {
- notFoundError(message: string, errData: any): (ApiError)
- }
- interface Event {
- forbiddenError(message: string, errData: any): (ApiError)
- }
- interface Event {
- unauthorizedError(message: string, errData: any): (ApiError)
- }
- interface Event {
- tooManyRequestsError(message: string, errData: any): (ApiError)
+ registerFlagCompletionFunc(flagName: string, f: CompletionFunc): void
}
- interface Event {
- internalServerError(message: string, errData: any): (ApiError)
+ interface Command {
+ /**
+ * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+ */
+ getFlagCompletionFunc(flagName: string): [CompletionFunc, boolean]
}
- interface Event {
+ interface Command {
/**
- * BindBody unmarshal the request body into the provided dst.
- *
- * dst must be either a struct pointer or map[string]any.
- *
- * The rules how the body will be scanned depends on the request Content-Type.
- *
- * Currently the following Content-Types are supported:
- * ```
- * - application/json
- * - text/xml, application/xml
- * - multipart/form-data, application/x-www-form-urlencoded
- * ```
- *
- * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type):
- * ```
- * - "json" (json body)- uses the builtin Go json package for unmarshaling.
- * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling.
- * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method.
- * ```
- *
- * NB! When dst is a struct make sure that it doesn't have public fields
- * that shouldn't be bindable and it is advisible such fields to be unexported
- * or have a separate struct just for the binding. For example:
- *
- * ```
- * data := struct{
- * somethingPrivate string
- *
- * Title string `json:"title" form:"title"`
- * Total int `json:"total" form:"total"`
- * }
- * err := e.BindBody(&data)
- * ```
+ * InitDefaultCompletionCmd adds a default 'completion' command to c.
+ * This function will do nothing if any of the following is true:
+ * 1- the feature has been explicitly disabled by the program,
+ * 2- c has no subcommands (to avoid creating one),
+ * 3- c already has a 'completion' command provided by the program.
*/
- bindBody(dst: any): void
+ initDefaultCompletionCmd(...args: string[]): void
}
- /**
- * Router defines a thin wrapper around the standard Go [http.ServeMux] by
- * adding support for routing sub-groups, middlewares and other common utils.
- *
- * Example:
- *
- * ```
- * r := NewRouter[*MyEvent](eventFactory)
- *
- * // middlewares
- * r.BindFunc(m1, m2)
- *
- * // routes
- * r.GET("/test", handler1)
- *
- * // sub-routers/groups
- * api := r.Group("/api")
- * api.GET("/admins", handler2)
- *
- * // generate a http.ServeMux instance based on the router configurations
- * mux, _ := r.BuildMux()
- *
- * http.ListenAndServe("localhost:8090", mux)
- * ```
- */
- type _sAyDGzz = RouterGroup
- interface Router extends _sAyDGzz {
+ interface Command {
+ /**
+ * GenFishCompletion generates fish completion file and writes to the passed writer.
+ */
+ genFishCompletion(w: io.Writer, includeDesc: boolean): void
}
-}
-
-namespace subscriptions {
- /**
- * Broker defines a struct for managing subscriptions clients.
- */
- interface Broker {
+ interface Command {
+ /**
+ * GenFishCompletionFile generates fish completion file.
+ */
+ genFishCompletionFile(filename: string, includeDesc: boolean): void
}
- interface Broker {
+ interface Command {
/**
- * Clients returns a shallow copy of all registered clients indexed
- * with their connection id.
+ * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+ * if the command is invoked with a subset (but not all) of the given flags.
*/
- clients(): _TygojaDict
+ markFlagsRequiredTogether(...flagNames: string[]): void
}
- interface Broker {
+ interface Command {
/**
- * ChunkedClients splits the current clients into a chunked slice.
+ * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
+ * if the command is invoked without at least one flag from the given set of flags.
*/
- chunkedClients(chunkSize: number): Array>
+ markFlagsOneRequired(...flagNames: string[]): void
}
- interface Broker {
+ interface Command {
/**
- * TotalClients returns the total number of registered clients.
+ * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+ * if the command is invoked with more than one flag from the given set of flags.
*/
- totalClients(): number
+ markFlagsMutuallyExclusive(...flagNames: string[]): void
}
- interface Broker {
+ interface Command {
/**
- * ClientById finds a registered client by its id.
- *
- * Returns non-nil error when client with clientId is not registered.
+ * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
+ * first error encountered.
*/
- clientById(clientId: string): Client
+ validateFlagGroups(): void
}
- interface Broker {
+ interface Command {
/**
- * Register adds a new client to the broker instance.
+ * GenPowerShellCompletionFile generates powershell completion file without descriptions.
*/
- register(client: Client): void
+ genPowerShellCompletionFile(filename: string): void
}
- interface Broker {
+ interface Command {
/**
- * Unregister removes a single client by its id and marks it as discarded.
- *
- * If client with clientId doesn't exist, this method does nothing.
+ * GenPowerShellCompletion generates powershell completion file without descriptions
+ * and writes it to the passed writer.
*/
- unregister(clientId: string): void
+ genPowerShellCompletion(w: io.Writer): void
}
- /**
- * Client is an interface for a generic subscription client.
- */
- interface Client {
- [key:string]: any;
+ interface Command {
/**
- * Id Returns the unique id of the client.
+ * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
*/
- id(): string
+ genPowerShellCompletionFileWithDesc(filename: string): void
+ }
+ interface Command {
/**
- * Channel returns the client's communication channel.
- *
- * NB! The channel shouldn't be used after calling Discard().
+ * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+ * and writes it to the passed writer.
*/
- channel(): undefined
+ genPowerShellCompletionWithDesc(w: io.Writer): void
+ }
+ interface Command {
/**
- * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes.
- * If no prefix is specified, returns all subscriptions.
+ * MarkFlagRequired instructs the various shell completion implementations to
+ * prioritize the named flag when performing completion,
+ * and causes your command to report an error if invoked without the flag.
*/
- subscriptions(...prefixes: string[]): _TygojaDict
+ markFlagRequired(name: string): void
+ }
+ interface Command {
/**
- * Subscribe subscribes the client to the provided subscriptions list.
- *
- * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter.
- *
- * Example:
- *
- * ```
- * Subscribe(
- * "subscriptionA",
- * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`,
- * )
- * ```
+ * MarkPersistentFlagRequired instructs the various shell completion implementations to
+ * prioritize the named persistent flag when performing completion,
+ * and causes your command to report an error if invoked without the flag.
*/
- subscribe(...subs: string[]): void
+ markPersistentFlagRequired(name: string): void
+ }
+ interface Command {
/**
- * Unsubscribe unsubscribes the client from the provided subscriptions list.
+ * MarkFlagFilename instructs the various shell completion implementations to
+ * limit completions for the named flag to the specified file extensions.
*/
- unsubscribe(...subs: string[]): void
+ markFlagFilename(name: string, ...extensions: string[]): void
+ }
+ interface Command {
/**
- * HasSubscription checks if the client is subscribed to `sub`.
+ * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+ * The bash completion script will call the bash function f for the flag.
+ *
+ * This will only work for bash completion.
+ * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+ * to register a Go function which will work across all shells.
*/
- hasSubscription(sub: string): boolean
+ markFlagCustom(name: string, f: string): void
+ }
+ interface Command {
/**
- * Set stores any value to the client's context.
+ * MarkPersistentFlagFilename instructs the various shell completion
+ * implementations to limit completions for the named persistent flag to the
+ * specified file extensions.
*/
- set(key: string, value: any): void
+ markPersistentFlagFilename(name: string, ...extensions: string[]): void
+ }
+ interface Command {
/**
- * Unset removes a single value from the client's context.
+ * MarkFlagDirname instructs the various shell completion implementations to
+ * limit completions for the named flag to directory names.
*/
- unset(key: string): void
+ markFlagDirname(name: string): void
+ }
+ interface Command {
/**
- * Get retrieves the key value from the client's context.
+ * MarkPersistentFlagDirname instructs the various shell completion
+ * implementations to limit completions for the named persistent flag to
+ * directory names.
*/
- get(key: string): any
+ markPersistentFlagDirname(name: string): void
+ }
+ interface Command {
/**
- * Discard marks the client as "discarded" (and closes its channel),
- * meaning that it shouldn't be used anymore for sending new messages.
- *
- * It is safe to call Discard() multiple times.
+ * GenZshCompletionFile generates zsh completion file including descriptions.
*/
- discard(): void
+ genZshCompletionFile(filename: string): void
+ }
+ interface Command {
/**
- * IsDiscarded indicates whether the client has been "discarded"
- * and should no longer be used.
+ * GenZshCompletion generates zsh completion file including descriptions
+ * and writes it to the passed writer.
*/
- isDiscarded(): boolean
+ genZshCompletion(w: io.Writer): void
+ }
+ interface Command {
/**
- * Send sends the specified message to the client's channel (if not discarded).
+ * GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
*/
- send(m: Message): void
+ genZshCompletionFileNoDesc(filename: string): void
}
- /**
- * Message defines a client's channel data.
- */
- interface Message {
- name: string
- data: string|Array
+ interface Command {
+ /**
+ * GenZshCompletionNoDesc generates zsh completion file without descriptions
+ * and writes it to the passed writer.
+ */
+ genZshCompletionNoDesc(w: io.Writer): void
}
- interface Message {
+ interface Command {
/**
- * WriteSSE writes the current message in a SSE format into the provided writer.
+ * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+ * not consistent with Bash completion. It has therefore been disabled.
+ * Instead, when no other completion is specified, file completion is done by
+ * default for every argument. One can disable file completion on a per-argument
+ * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+ * To achieve file extension filtering, one can use ValidArgsFunction and
+ * ShellCompDirectiveFilterFileExt.
*
- * For example, writing to a router.Event:
+ * Deprecated
+ */
+ markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void
+ }
+ interface Command {
+ /**
+ * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+ * been disabled.
+ * To achieve the same behavior across all shells, one can use
+ * ValidArgs (for the first argument only) or ValidArgsFunction for
+ * any argument (can include the first one also).
*
- * ```
- * m := Message{Name: "users/create", Data: []byte{...}}
- * m.Write(e.Response, "yourEventId")
- * e.Flush()
- * ```
+ * Deprecated
*/
- writeSSE(w: io.Writer, eventId: string): void
+ markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void
}
}
@@ -21052,8 +21117,17 @@ namespace sync {
*/
interface Locker {
[key:string]: any;
- lock(): void
- unlock(): void
+ lock(): void
+ unlock(): void
+ }
+}
+
+namespace io {
+ /**
+ * WriteCloser is the interface that groups the basic Write and Close methods.
+ */
+ interface WriteCloser {
+ [key:string]: any;
}
}
@@ -21098,15 +21172,6 @@ namespace syscall {
}
}
-namespace io {
- /**
- * WriteCloser is the interface that groups the basic Write and Close methods.
- */
- interface WriteCloser {
- [key:string]: any;
- }
-}
-
namespace time {
/**
* A Month specifies a month of the year (January = 1, ...).
@@ -21152,244 +21217,263 @@ namespace time {
namespace fs {
}
-namespace store {
-}
-
-/**
- * Package url parses URLs and implements query escaping.
- */
-namespace url {
+namespace bufio {
/**
- * A URL represents a parsed URL (technically, a URI reference).
- *
- * The general form represented is:
- *
- * ```
- * [scheme:][//[userinfo@]host][/]path[?query][#fragment]
- * ```
- *
- * URLs that do not start with a slash after the scheme are interpreted as:
- *
- * ```
- * scheme:opaque[?query][#fragment]
- * ```
- *
- * The Host field contains the host and port subcomponents of the URL.
- * When the port is present, it is separated from the host with a colon.
- * When the host is an IPv6 address, it must be enclosed in square brackets:
- * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port
- * into a string suitable for the Host field, adding square brackets to
- * the host when necessary.
- *
- * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
- * A consequence is that it is impossible to tell which slashes in the Path were
- * slashes in the raw URL and which were %2f. This distinction is rarely important,
- * but when it is, the code should use the [URL.EscapedPath] method, which preserves
- * the original encoding of Path.
- *
- * The RawPath field is an optional field which is only set when the default
- * encoding of Path is different from the escaped path. See the EscapedPath method
- * for more details.
- *
- * URL's String method uses the EscapedPath method to obtain the path.
+ * Reader implements buffering for an io.Reader object.
*/
- interface URL {
- scheme: string
- opaque: string // encoded opaque data
- user?: Userinfo // username and password information
- host: string // host or host:port (see Hostname and Port methods)
- path: string // path (relative paths may omit leading slash)
- rawPath: string // encoded path hint (see EscapedPath method)
- omitHost: boolean // do not emit empty host (authority)
- forceQuery: boolean // append a query ('?') even if RawQuery is empty
- rawQuery: string // encoded query values, without '?'
- fragment: string // fragment for references, without '#'
- rawFragment: string // encoded fragment hint (see EscapedFragment method)
+ interface Reader {
}
- interface URL {
+ interface Reader {
/**
- * EscapedPath returns the escaped form of u.Path.
- * In general there are multiple possible escaped forms of any path.
- * EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
- * Otherwise EscapedPath ignores u.RawPath and computes an escaped
- * form on its own.
- * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct
- * their results.
- * In general, code should call EscapedPath instead of
- * reading u.RawPath directly.
+ * Size returns the size of the underlying buffer in bytes.
*/
- escapedPath(): string
+ size(): number
}
- interface URL {
+ interface Reader {
/**
- * EscapedFragment returns the escaped form of u.Fragment.
- * In general there are multiple possible escaped forms of any fragment.
- * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
- * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
- * form on its own.
- * The [URL.String] method uses EscapedFragment to construct its result.
- * In general, code should call EscapedFragment instead of
- * reading u.RawFragment directly.
+ * Reset discards any buffered data, resets all state, and switches
+ * the buffered reader to read from r.
+ * Calling Reset on the zero value of [Reader] initializes the internal buffer
+ * to the default size.
+ * Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
*/
- escapedFragment(): string
+ reset(r: io.Reader): void
}
- interface URL {
+ interface Reader {
/**
- * String reassembles the [URL] into a valid URL string.
- * The general form of the result is one of:
+ * Peek returns the next n bytes without advancing the reader. The bytes stop
+ * being valid at the next read call. If Peek returns fewer than n bytes, it
+ * also returns an error explaining why the read is short. The error is
+ * [ErrBufferFull] if n is larger than b's buffer size.
*
- * ```
- * scheme:opaque?query#fragment
- * scheme://userinfo@host/path?query#fragment
- * ```
+ * Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
+ * until the next read operation.
+ */
+ peek(n: number): string|Array
+ }
+ interface Reader {
+ /**
+ * Discard skips the next n bytes, returning the number of bytes discarded.
*
- * If u.Opaque is non-empty, String uses the first form;
- * otherwise it uses the second form.
- * Any non-ASCII characters in host are escaped.
- * To obtain the path, String uses u.EscapedPath().
+ * If Discard skips fewer than n bytes, it also returns an error.
+ * If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
+ * reading from the underlying io.Reader.
+ */
+ discard(n: number): number
+ }
+ interface Reader {
+ /**
+ * Read reads data into p.
+ * It returns the number of bytes read into p.
+ * The bytes are taken from at most one Read on the underlying [Reader],
+ * hence n may be less than len(p).
+ * To read exactly len(p) bytes, use io.ReadFull(b, p).
+ * If the underlying [Reader] can return a non-zero count with io.EOF,
+ * then this Read method can do so as well; see the [io.Reader] docs.
+ */
+ read(p: string|Array): number
+ }
+ interface Reader {
+ /**
+ * ReadByte reads and returns a single byte.
+ * If no byte is available, returns an error.
+ */
+ readByte(): number
+ }
+ interface Reader {
+ /**
+ * UnreadByte unreads the last byte. Only the most recently read byte can be unread.
*
- * In the second form, the following rules apply:
- * ```
- * - if u.Scheme is empty, scheme: is omitted.
- * - if u.User is nil, userinfo@ is omitted.
- * - if u.Host is empty, host/ is omitted.
- * - if u.Scheme and u.Host are empty and u.User is nil,
- * the entire scheme://userinfo@host/ is omitted.
- * - if u.Host is non-empty and u.Path begins with a /,
- * the form host/path does not add its own /.
- * - if u.RawQuery is empty, ?query is omitted.
- * - if u.Fragment is empty, #fragment is omitted.
- * ```
+ * UnreadByte returns an error if the most recent method called on the
+ * [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
+ * considered read operations.
*/
- string(): string
+ unreadByte(): void
}
- interface URL {
+ interface Reader {
/**
- * Redacted is like [URL.String] but replaces any password with "xxxxx".
- * Only the password in u.User is redacted.
+ * ReadRune reads a single UTF-8 encoded Unicode character and returns the
+ * rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+ * and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
*/
- redacted(): string
+ readRune(): [number, number]
}
- /**
- * Values maps a string key to a list of values.
- * It is typically used for query parameters and form values.
- * Unlike in the http.Header map, the keys in a Values map
- * are case-sensitive.
- */
- interface Values extends _TygojaDict{}
- interface Values {
+ interface Reader {
/**
- * Get gets the first value associated with the given key.
- * If there are no values associated with the key, Get returns
- * the empty string. To access multiple values, use the map
- * directly.
+ * UnreadRune unreads the last rune. If the most recent method called on
+ * the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
+ * regard it is stricter than [Reader.UnreadByte], which will unread the last byte
+ * from any read operation.)
+ */
+ unreadRune(): void
+ }
+ interface Reader {
+ /**
+ * Buffered returns the number of bytes that can be read from the current buffer.
+ */
+ buffered(): number
+ }
+ interface Reader {
+ /**
+ * ReadSlice reads until the first occurrence of delim in the input,
+ * returning a slice pointing at the bytes in the buffer.
+ * The bytes stop being valid at the next read.
+ * If ReadSlice encounters an error before finding a delimiter,
+ * it returns all the data in the buffer and the error itself (often io.EOF).
+ * ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
+ * Because the data returned from ReadSlice will be overwritten
+ * by the next I/O operation, most clients should use
+ * [Reader.ReadBytes] or ReadString instead.
+ * ReadSlice returns err != nil if and only if line does not end in delim.
+ */
+ readSlice(delim: number): string|Array
+ }
+ interface Reader {
+ /**
+ * ReadLine is a low-level line-reading primitive. Most callers should use
+ * [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
+ *
+ * ReadLine tries to return a single line, not including the end-of-line bytes.
+ * If the line was too long for the buffer then isPrefix is set and the
+ * beginning of the line is returned. The rest of the line will be returned
+ * from future calls. isPrefix will be false when returning the last fragment
+ * of the line. The returned buffer is only valid until the next call to
+ * ReadLine. ReadLine either returns a non-nil line or it returns an error,
+ * never both.
+ *
+ * The text returned from ReadLine does not include the line end ("\r\n" or "\n").
+ * No indication or error is given if the input ends without a final line end.
+ * Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
+ * (possibly a character belonging to the line end) even if that byte is not
+ * part of the line returned by ReadLine.
+ */
+ readLine(): [string|Array, boolean]
+ }
+ interface Reader {
+ /**
+ * ReadBytes reads until the first occurrence of delim in the input,
+ * returning a slice containing the data up to and including the delimiter.
+ * If ReadBytes encounters an error before finding a delimiter,
+ * it returns the data read before the error and the error itself (often io.EOF).
+ * ReadBytes returns err != nil if and only if the returned data does not end in
+ * delim.
+ * For simple uses, a Scanner may be more convenient.
+ */
+ readBytes(delim: number): string|Array
+ }
+ interface Reader {
+ /**
+ * ReadString reads until the first occurrence of delim in the input,
+ * returning a string containing the data up to and including the delimiter.
+ * If ReadString encounters an error before finding a delimiter,
+ * it returns the data read before the error and the error itself (often io.EOF).
+ * ReadString returns err != nil if and only if the returned data does not end in
+ * delim.
+ * For simple uses, a Scanner may be more convenient.
*/
- get(key: string): string
+ readString(delim: number): string
}
- interface Values {
+ interface Reader {
/**
- * Set sets the key to value. It replaces any existing
- * values.
+ * WriteTo implements io.WriterTo.
+ * This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
+ * If the underlying reader supports the [Reader.WriteTo] method,
+ * this calls the underlying [Reader.WriteTo] without buffering.
*/
- set(key: string, value: string): void
+ writeTo(w: io.Writer): number
}
- interface Values {
- /**
- * Add adds the value to key. It appends to any existing
- * values associated with key.
- */
- add(key: string, value: string): void
+ /**
+ * Writer implements buffering for an [io.Writer] object.
+ * If an error occurs writing to a [Writer], no more data will be
+ * accepted and all subsequent writes, and [Writer.Flush], will return the error.
+ * After all data has been written, the client should call the
+ * [Writer.Flush] method to guarantee all data has been forwarded to
+ * the underlying [io.Writer].
+ */
+ interface Writer {
}
- interface Values {
+ interface Writer {
/**
- * Del deletes the values associated with key.
+ * Size returns the size of the underlying buffer in bytes.
*/
- del(key: string): void
+ size(): number
}
- interface Values {
+ interface Writer {
/**
- * Has checks whether a given key is set.
+ * Reset discards any unflushed buffered data, clears any error, and
+ * resets b to write its output to w.
+ * Calling Reset on the zero value of [Writer] initializes the internal buffer
+ * to the default size.
+ * Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
*/
- has(key: string): boolean
+ reset(w: io.Writer): void
}
- interface Values {
+ interface Writer {
/**
- * Encode encodes the values into “URL encoded” form
- * ("bar=baz&foo=quux") sorted by key.
+ * Flush writes any buffered data to the underlying [io.Writer].
*/
- encode(): string
+ flush(): void
}
- interface URL {
+ interface Writer {
/**
- * IsAbs reports whether the [URL] is absolute.
- * Absolute means that it has a non-empty scheme.
+ * Available returns how many bytes are unused in the buffer.
*/
- isAbs(): boolean
+ available(): number
}
- interface URL {
+ interface Writer {
/**
- * Parse parses a [URL] in the context of the receiver. The provided URL
- * may be relative or absolute. Parse returns nil, err on parse
- * failure, otherwise its return value is the same as [URL.ResolveReference].
+ * AvailableBuffer returns an empty buffer with b.Available() capacity.
+ * This buffer is intended to be appended to and
+ * passed to an immediately succeeding [Writer.Write] call.
+ * The buffer is only valid until the next write operation on b.
*/
- parse(ref: string): (URL)
+ availableBuffer(): string|Array
}
- interface URL {
+ interface Writer {
/**
- * ResolveReference resolves a URI reference to an absolute URI from
- * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
- * may be relative or absolute. ResolveReference always returns a new
- * [URL] instance, even if the returned URL is identical to either the
- * base or reference. If ref is an absolute URL, then ResolveReference
- * ignores base and returns a copy of ref.
+ * Buffered returns the number of bytes that have been written into the current buffer.
*/
- resolveReference(ref: URL): (URL)
+ buffered(): number
}
- interface URL {
+ interface Writer {
/**
- * Query parses RawQuery and returns the corresponding values.
- * It silently discards malformed value pairs.
- * To check errors use [ParseQuery].
+ * Write writes the contents of p into the buffer.
+ * It returns the number of bytes written.
+ * If nn < len(p), it also returns an error explaining
+ * why the write is short.
*/
- query(): Values
+ write(p: string|Array): number
}
- interface URL {
+ interface Writer {
/**
- * RequestURI returns the encoded path?query or opaque?query
- * string that would be used in an HTTP request for u.
+ * WriteByte writes a single byte.
*/
- requestURI(): string
+ writeByte(c: number): void
}
- interface URL {
+ interface Writer {
/**
- * Hostname returns u.Host, stripping any valid port number if present.
- *
- * If the result is enclosed in square brackets, as literal IPv6 addresses are,
- * the square brackets are removed from the result.
+ * WriteRune writes a single Unicode code point, returning
+ * the number of bytes written and any error.
*/
- hostname(): string
+ writeRune(r: number): number
}
- interface URL {
+ interface Writer {
/**
- * Port returns the port part of u.Host, without the leading colon.
- *
- * If u.Host doesn't contain a valid numeric port, Port returns an empty string.
+ * WriteString writes a string.
+ * It returns the number of bytes written.
+ * If the count is less than len(s), it also returns an error explaining
+ * why the write is short.
*/
- port(): string
- }
- interface URL {
- marshalBinary(): string|Array
- }
- interface URL {
- unmarshalBinary(text: string|Array): void
+ writeString(s: string): number
}
- interface URL {
+ interface Writer {
/**
- * JoinPath returns a new [URL] with the provided path elements joined to
- * any existing path and the resulting path cleaned of any ./ or ../ elements.
- * Any sequences of multiple / characters will be reduced to a single /.
+ * ReadFrom implements [io.ReaderFrom]. If the underlying writer
+ * supports the ReadFrom method, this calls the underlying ReadFrom.
+ * If there is buffered data and an underlying ReadFrom, this fills
+ * the buffer and writes it before calling ReadFrom.
*/
- joinPath(...elem: string[]): (URL)
+ readFrom(r: io.Reader): number
}
}
@@ -21409,73 +21493,144 @@ namespace net {
network(): string // name of the network (for example, "tcp", "udp")
string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80")
}
+}
+
+/**
+ * Package textproto implements generic support for text-based request/response
+ * protocols in the style of HTTP, NNTP, and SMTP.
+ *
+ * The package provides:
+ *
+ * [Error], which represents a numeric error response from
+ * a server.
+ *
+ * [Pipeline], to manage pipelined requests and responses
+ * in a client.
+ *
+ * [Reader], to read numeric response code lines,
+ * key: value headers, lines wrapped with leading spaces
+ * on continuation lines, and whole text blocks ending
+ * with a dot on a line by itself.
+ *
+ * [Writer], to write dot-encoded text blocks.
+ *
+ * [Conn], a convenient packaging of [Reader], [Writer], and [Pipeline] for use
+ * with a single network connection.
+ */
+namespace textproto {
/**
- * A Listener is a generic network listener for stream-oriented protocols.
- *
- * Multiple goroutines may invoke methods on a Listener simultaneously.
+ * A MIMEHeader represents a MIME-style header mapping
+ * keys to sets of values.
*/
- interface Listener {
- [key:string]: any;
+ interface MIMEHeader extends _TygojaDict{}
+ interface MIMEHeader {
/**
- * Accept waits for and returns the next connection to the listener.
+ * Add adds the key, value pair to the header.
+ * It appends to any existing values associated with key.
*/
- accept(): Conn
+ add(key: string, value: string): void
+ }
+ interface MIMEHeader {
/**
- * Close closes the listener.
- * Any blocked Accept operations will be unblocked and return errors.
+ * Set sets the header entries associated with key to
+ * the single element value. It replaces any existing
+ * values associated with key.
*/
- close(): void
+ set(key: string, value: string): void
+ }
+ interface MIMEHeader {
/**
- * Addr returns the listener's network address.
+ * Get gets the first value associated with the given key.
+ * It is case insensitive; [CanonicalMIMEHeaderKey] is used
+ * to canonicalize the provided key.
+ * If there are no values associated with the key, Get returns "".
+ * To use non-canonical keys, access the map directly.
*/
- addr(): Addr
+ get(key: string): string
+ }
+ interface MIMEHeader {
+ /**
+ * Values returns all values associated with the given key.
+ * It is case insensitive; [CanonicalMIMEHeaderKey] is
+ * used to canonicalize the provided key. To use non-canonical
+ * keys, access the map directly.
+ * The returned slice is not a copy.
+ */
+ values(key: string): Array
+ }
+ interface MIMEHeader {
+ /**
+ * Del deletes the values associated with key.
+ */
+ del(key: string): void
}
}
-namespace jwt {
+namespace multipart {
+ interface Reader {
+ /**
+ * ReadForm parses an entire multipart message whose parts have
+ * a Content-Disposition of "form-data".
+ * It stores up to maxMemory bytes + 10MB (reserved for non-file parts)
+ * in memory. File parts which can't be stored in memory will be stored on
+ * disk in temporary files.
+ * It returns [ErrMessageTooLarge] if all non-file parts can't be stored in
+ * memory.
+ */
+ readForm(maxMemory: number): (Form)
+ }
/**
- * NumericDate represents a JSON numeric date value, as referenced at
- * https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+ * Form is a parsed multipart form.
+ * Its File parts are stored either in memory or on disk,
+ * and are accessible via the [*FileHeader]'s Open method.
+ * Its Value parts are stored as strings.
+ * Both are keyed by field name.
*/
- type _sHxJrxk = time.Time
- interface NumericDate extends _sHxJrxk {
+ interface Form {
+ value: _TygojaDict
+ file: _TygojaDict
}
- interface NumericDate {
+ interface Form {
/**
- * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
- * represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+ * RemoveAll removes any temporary files associated with a [Form].
*/
- marshalJSON(): string|Array
+ removeAll(): void
}
- interface NumericDate {
- /**
- * UnmarshalJSON is an implementation of the json.RawMessage interface and
- * deserializes a [NumericDate] from a JSON representation, i.e. a
- * [json.Number]. This number represents an UNIX epoch with either integer or
- * non-integer seconds.
- */
- unmarshalJSON(b: string|Array): void
+ /**
+ * File is an interface to access the file part of a multipart message.
+ * Its contents may be either stored in memory or on disk.
+ * If stored on disk, the File's underlying concrete type will be an *os.File.
+ */
+ interface File {
+ [key:string]: any;
}
/**
- * ClaimStrings is basically just a slice of strings, but it can be either
- * serialized from a string array or just a string. This type is necessary,
- * since the "aud" claim can either be a single string or an array.
+ * Reader is an iterator over parts in a MIME multipart body.
+ * Reader's underlying parser consumes its input as needed. Seeking
+ * isn't supported.
*/
- interface ClaimStrings extends Array{}
- interface ClaimStrings {
- unmarshalJSON(data: string|Array): void
+ interface Reader {
}
- interface ClaimStrings {
- marshalJSON(): string|Array
+ interface Reader {
+ /**
+ * NextPart returns the next part in the multipart or an error.
+ * When there are no more parts, the error [io.EOF] is returned.
+ *
+ * As a special case, if the "Content-Transfer-Encoding" header
+ * has a value of "quoted-printable", that header is instead
+ * hidden and the body is transparently decoded during Read calls.
+ */
+ nextPart(): (Part)
}
-}
-
-namespace hook {
- /**
- * wrapped local Hook embedded struct to limit the public API surface.
- */
- type _sAafFuB = Hook
- interface mainHook extends _sAafFuB {
+ interface Reader {
+ /**
+ * NextRawPart returns the next part in the multipart or an error.
+ * When there are no more parts, the error [io.EOF] is returned.
+ *
+ * Unlike [Reader.NextPart], it does not have special handling for
+ * "Content-Transfer-Encoding: quoted-printable".
+ */
+ nextRawPart(): (Part)
}
}
@@ -21682,411 +21837,291 @@ namespace sql {
}
}
-namespace types {
-}
-
-namespace search {
+namespace store {
}
-namespace bufio {
- /**
- * Reader implements buffering for an io.Reader object.
- */
- interface Reader {
- }
- interface Reader {
- /**
- * Size returns the size of the underlying buffer in bytes.
- */
- size(): number
- }
- interface Reader {
- /**
- * Reset discards any buffered data, resets all state, and switches
- * the buffered reader to read from r.
- * Calling Reset on the zero value of [Reader] initializes the internal buffer
- * to the default size.
- * Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
- */
- reset(r: io.Reader): void
- }
- interface Reader {
- /**
- * Peek returns the next n bytes without advancing the reader. The bytes stop
- * being valid at the next read call. If Peek returns fewer than n bytes, it
- * also returns an error explaining why the read is short. The error is
- * [ErrBufferFull] if n is larger than b's buffer size.
- *
- * Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
- * until the next read operation.
- */
- peek(n: number): string|Array
- }
- interface Reader {
- /**
- * Discard skips the next n bytes, returning the number of bytes discarded.
- *
- * If Discard skips fewer than n bytes, it also returns an error.
- * If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
- * reading from the underlying io.Reader.
- */
- discard(n: number): number
- }
- interface Reader {
- /**
- * Read reads data into p.
- * It returns the number of bytes read into p.
- * The bytes are taken from at most one Read on the underlying [Reader],
- * hence n may be less than len(p).
- * To read exactly len(p) bytes, use io.ReadFull(b, p).
- * If the underlying [Reader] can return a non-zero count with io.EOF,
- * then this Read method can do so as well; see the [io.Reader] docs.
- */
- read(p: string|Array): number
- }
- interface Reader {
- /**
- * ReadByte reads and returns a single byte.
- * If no byte is available, returns an error.
- */
- readByte(): number
- }
- interface Reader {
- /**
- * UnreadByte unreads the last byte. Only the most recently read byte can be unread.
- *
- * UnreadByte returns an error if the most recent method called on the
- * [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
- * considered read operations.
- */
- unreadByte(): void
- }
- interface Reader {
- /**
- * ReadRune reads a single UTF-8 encoded Unicode character and returns the
- * rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
- * and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
- */
- readRune(): [number, number]
- }
- interface Reader {
- /**
- * UnreadRune unreads the last rune. If the most recent method called on
- * the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
- * regard it is stricter than [Reader.UnreadByte], which will unread the last byte
- * from any read operation.)
- */
- unreadRune(): void
- }
- interface Reader {
- /**
- * Buffered returns the number of bytes that can be read from the current buffer.
- */
- buffered(): number
- }
- interface Reader {
- /**
- * ReadSlice reads until the first occurrence of delim in the input,
- * returning a slice pointing at the bytes in the buffer.
- * The bytes stop being valid at the next read.
- * If ReadSlice encounters an error before finding a delimiter,
- * it returns all the data in the buffer and the error itself (often io.EOF).
- * ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
- * Because the data returned from ReadSlice will be overwritten
- * by the next I/O operation, most clients should use
- * [Reader.ReadBytes] or ReadString instead.
- * ReadSlice returns err != nil if and only if line does not end in delim.
- */
- readSlice(delim: number): string|Array
- }
- interface Reader {
- /**
- * ReadLine is a low-level line-reading primitive. Most callers should use
- * [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
- *
- * ReadLine tries to return a single line, not including the end-of-line bytes.
- * If the line was too long for the buffer then isPrefix is set and the
- * beginning of the line is returned. The rest of the line will be returned
- * from future calls. isPrefix will be false when returning the last fragment
- * of the line. The returned buffer is only valid until the next call to
- * ReadLine. ReadLine either returns a non-nil line or it returns an error,
- * never both.
- *
- * The text returned from ReadLine does not include the line end ("\r\n" or "\n").
- * No indication or error is given if the input ends without a final line end.
- * Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
- * (possibly a character belonging to the line end) even if that byte is not
- * part of the line returned by ReadLine.
- */
- readLine(): [string|Array, boolean]
- }
- interface Reader {
- /**
- * ReadBytes reads until the first occurrence of delim in the input,
- * returning a slice containing the data up to and including the delimiter.
- * If ReadBytes encounters an error before finding a delimiter,
- * it returns the data read before the error and the error itself (often io.EOF).
- * ReadBytes returns err != nil if and only if the returned data does not end in
- * delim.
- * For simple uses, a Scanner may be more convenient.
- */
- readBytes(delim: number): string|Array
- }
- interface Reader {
- /**
- * ReadString reads until the first occurrence of delim in the input,
- * returning a string containing the data up to and including the delimiter.
- * If ReadString encounters an error before finding a delimiter,
- * it returns the data read before the error and the error itself (often io.EOF).
- * ReadString returns err != nil if and only if the returned data does not end in
- * delim.
- * For simple uses, a Scanner may be more convenient.
- */
- readString(delim: number): string
- }
- interface Reader {
- /**
- * WriteTo implements io.WriterTo.
- * This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
- * If the underlying reader supports the [Reader.WriteTo] method,
- * this calls the underlying [Reader.WriteTo] without buffering.
- */
- writeTo(w: io.Writer): number
- }
+/**
+ * Package url parses URLs and implements query escaping.
+ */
+namespace url {
/**
- * Writer implements buffering for an [io.Writer] object.
- * If an error occurs writing to a [Writer], no more data will be
- * accepted and all subsequent writes, and [Writer.Flush], will return the error.
- * After all data has been written, the client should call the
- * [Writer.Flush] method to guarantee all data has been forwarded to
- * the underlying [io.Writer].
+ * A URL represents a parsed URL (technically, a URI reference).
+ *
+ * The general form represented is:
+ *
+ * ```
+ * [scheme:][//[userinfo@]host][/]path[?query][#fragment]
+ * ```
+ *
+ * URLs that do not start with a slash after the scheme are interpreted as:
+ *
+ * ```
+ * scheme:opaque[?query][#fragment]
+ * ```
+ *
+ * The Host field contains the host and port subcomponents of the URL.
+ * When the port is present, it is separated from the host with a colon.
+ * When the host is an IPv6 address, it must be enclosed in square brackets:
+ * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port
+ * into a string suitable for the Host field, adding square brackets to
+ * the host when necessary.
+ *
+ * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
+ * A consequence is that it is impossible to tell which slashes in the Path were
+ * slashes in the raw URL and which were %2f. This distinction is rarely important,
+ * but when it is, the code should use the [URL.EscapedPath] method, which preserves
+ * the original encoding of Path.
+ *
+ * The RawPath field is an optional field which is only set when the default
+ * encoding of Path is different from the escaped path. See the EscapedPath method
+ * for more details.
+ *
+ * URL's String method uses the EscapedPath method to obtain the path.
*/
- interface Writer {
+ interface URL {
+ scheme: string
+ opaque: string // encoded opaque data
+ user?: Userinfo // username and password information
+ host: string // host or host:port (see Hostname and Port methods)
+ path: string // path (relative paths may omit leading slash)
+ rawPath: string // encoded path hint (see EscapedPath method)
+ omitHost: boolean // do not emit empty host (authority)
+ forceQuery: boolean // append a query ('?') even if RawQuery is empty
+ rawQuery: string // encoded query values, without '?'
+ fragment: string // fragment for references, without '#'
+ rawFragment: string // encoded fragment hint (see EscapedFragment method)
}
- interface Writer {
+ interface URL {
/**
- * Size returns the size of the underlying buffer in bytes.
+ * EscapedPath returns the escaped form of u.Path.
+ * In general there are multiple possible escaped forms of any path.
+ * EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
+ * Otherwise EscapedPath ignores u.RawPath and computes an escaped
+ * form on its own.
+ * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct
+ * their results.
+ * In general, code should call EscapedPath instead of
+ * reading u.RawPath directly.
*/
- size(): number
+ escapedPath(): string
}
- interface Writer {
+ interface URL {
/**
- * Reset discards any unflushed buffered data, clears any error, and
- * resets b to write its output to w.
- * Calling Reset on the zero value of [Writer] initializes the internal buffer
- * to the default size.
- * Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
+ * EscapedFragment returns the escaped form of u.Fragment.
+ * In general there are multiple possible escaped forms of any fragment.
+ * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
+ * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
+ * form on its own.
+ * The [URL.String] method uses EscapedFragment to construct its result.
+ * In general, code should call EscapedFragment instead of
+ * reading u.RawFragment directly.
*/
- reset(w: io.Writer): void
+ escapedFragment(): string
}
- interface Writer {
+ interface URL {
/**
- * Flush writes any buffered data to the underlying [io.Writer].
+ * String reassembles the [URL] into a valid URL string.
+ * The general form of the result is one of:
+ *
+ * ```
+ * scheme:opaque?query#fragment
+ * scheme://userinfo@host/path?query#fragment
+ * ```
+ *
+ * If u.Opaque is non-empty, String uses the first form;
+ * otherwise it uses the second form.
+ * Any non-ASCII characters in host are escaped.
+ * To obtain the path, String uses u.EscapedPath().
+ *
+ * In the second form, the following rules apply:
+ * ```
+ * - if u.Scheme is empty, scheme: is omitted.
+ * - if u.User is nil, userinfo@ is omitted.
+ * - if u.Host is empty, host/ is omitted.
+ * - if u.Scheme and u.Host are empty and u.User is nil,
+ * the entire scheme://userinfo@host/ is omitted.
+ * - if u.Host is non-empty and u.Path begins with a /,
+ * the form host/path does not add its own /.
+ * - if u.RawQuery is empty, ?query is omitted.
+ * - if u.Fragment is empty, #fragment is omitted.
+ * ```
*/
- flush(): void
+ string(): string
}
- interface Writer {
+ interface URL {
/**
- * Available returns how many bytes are unused in the buffer.
+ * Redacted is like [URL.String] but replaces any password with "xxxxx".
+ * Only the password in u.User is redacted.
*/
- available(): number
+ redacted(): string
}
- interface Writer {
+ /**
+ * Values maps a string key to a list of values.
+ * It is typically used for query parameters and form values.
+ * Unlike in the http.Header map, the keys in a Values map
+ * are case-sensitive.
+ */
+ interface Values extends _TygojaDict{}
+ interface Values {
/**
- * AvailableBuffer returns an empty buffer with b.Available() capacity.
- * This buffer is intended to be appended to and
- * passed to an immediately succeeding [Writer.Write] call.
- * The buffer is only valid until the next write operation on b.
+ * Get gets the first value associated with the given key.
+ * If there are no values associated with the key, Get returns
+ * the empty string. To access multiple values, use the map
+ * directly.
*/
- availableBuffer(): string|Array
+ get(key: string): string
}
- interface Writer {
+ interface Values {
/**
- * Buffered returns the number of bytes that have been written into the current buffer.
+ * Set sets the key to value. It replaces any existing
+ * values.
*/
- buffered(): number
+ set(key: string, value: string): void
}
- interface Writer {
+ interface Values {
/**
- * Write writes the contents of p into the buffer.
- * It returns the number of bytes written.
- * If nn < len(p), it also returns an error explaining
- * why the write is short.
+ * Add adds the value to key. It appends to any existing
+ * values associated with key.
*/
- write(p: string|Array): number
+ add(key: string, value: string): void
}
- interface Writer {
+ interface Values {
/**
- * WriteByte writes a single byte.
+ * Del deletes the values associated with key.
*/
- writeByte(c: number): void
+ del(key: string): void
}
- interface Writer {
+ interface Values {
/**
- * WriteRune writes a single Unicode code point, returning
- * the number of bytes written and any error.
+ * Has checks whether a given key is set.
*/
- writeRune(r: number): number
+ has(key: string): boolean
}
- interface Writer {
+ interface Values {
/**
- * WriteString writes a string.
- * It returns the number of bytes written.
- * If the count is less than len(s), it also returns an error explaining
- * why the write is short.
+ * Encode encodes the values into “URL encoded” form
+ * ("bar=baz&foo=quux") sorted by key.
*/
- writeString(s: string): number
+ encode(): string
}
- interface Writer {
+ interface URL {
/**
- * ReadFrom implements [io.ReaderFrom]. If the underlying writer
- * supports the ReadFrom method, this calls the underlying ReadFrom.
- * If there is buffered data and an underlying ReadFrom, this fills
- * the buffer and writes it before calling ReadFrom.
+ * IsAbs reports whether the [URL] is absolute.
+ * Absolute means that it has a non-empty scheme.
*/
- readFrom(r: io.Reader): number
+ isAbs(): boolean
}
-}
-
-/**
- * Package textproto implements generic support for text-based request/response
- * protocols in the style of HTTP, NNTP, and SMTP.
- *
- * The package provides:
- *
- * [Error], which represents a numeric error response from
- * a server.
- *
- * [Pipeline], to manage pipelined requests and responses
- * in a client.
- *
- * [Reader], to read numeric response code lines,
- * key: value headers, lines wrapped with leading spaces
- * on continuation lines, and whole text blocks ending
- * with a dot on a line by itself.
- *
- * [Writer], to write dot-encoded text blocks.
- *
- * [Conn], a convenient packaging of [Reader], [Writer], and [Pipeline] for use
- * with a single network connection.
- */
-namespace textproto {
- /**
- * A MIMEHeader represents a MIME-style header mapping
- * keys to sets of values.
- */
- interface MIMEHeader extends _TygojaDict{}
- interface MIMEHeader {
+ interface URL {
/**
- * Add adds the key, value pair to the header.
- * It appends to any existing values associated with key.
+ * Parse parses a [URL] in the context of the receiver. The provided URL
+ * may be relative or absolute. Parse returns nil, err on parse
+ * failure, otherwise its return value is the same as [URL.ResolveReference].
*/
- add(key: string, value: string): void
+ parse(ref: string): (URL)
}
- interface MIMEHeader {
+ interface URL {
/**
- * Set sets the header entries associated with key to
- * the single element value. It replaces any existing
- * values associated with key.
+ * ResolveReference resolves a URI reference to an absolute URI from
+ * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
+ * may be relative or absolute. ResolveReference always returns a new
+ * [URL] instance, even if the returned URL is identical to either the
+ * base or reference. If ref is an absolute URL, then ResolveReference
+ * ignores base and returns a copy of ref.
*/
- set(key: string, value: string): void
+ resolveReference(ref: URL): (URL)
}
- interface MIMEHeader {
+ interface URL {
/**
- * Get gets the first value associated with the given key.
- * It is case insensitive; [CanonicalMIMEHeaderKey] is used
- * to canonicalize the provided key.
- * If there are no values associated with the key, Get returns "".
- * To use non-canonical keys, access the map directly.
+ * Query parses RawQuery and returns the corresponding values.
+ * It silently discards malformed value pairs.
+ * To check errors use [ParseQuery].
*/
- get(key: string): string
+ query(): Values
}
- interface MIMEHeader {
+ interface URL {
/**
- * Values returns all values associated with the given key.
- * It is case insensitive; [CanonicalMIMEHeaderKey] is
- * used to canonicalize the provided key. To use non-canonical
- * keys, access the map directly.
- * The returned slice is not a copy.
+ * RequestURI returns the encoded path?query or opaque?query
+ * string that would be used in an HTTP request for u.
*/
- values(key: string): Array
+ requestURI(): string
}
- interface MIMEHeader {
+ interface URL {
/**
- * Del deletes the values associated with key.
+ * Hostname returns u.Host, stripping any valid port number if present.
+ *
+ * If the result is enclosed in square brackets, as literal IPv6 addresses are,
+ * the square brackets are removed from the result.
*/
- del(key: string): void
+ hostname(): string
}
-}
-
-namespace multipart {
- interface Reader {
+ interface URL {
/**
- * ReadForm parses an entire multipart message whose parts have
- * a Content-Disposition of "form-data".
- * It stores up to maxMemory bytes + 10MB (reserved for non-file parts)
- * in memory. File parts which can't be stored in memory will be stored on
- * disk in temporary files.
- * It returns [ErrMessageTooLarge] if all non-file parts can't be stored in
- * memory.
+ * Port returns the port part of u.Host, without the leading colon.
+ *
+ * If u.Host doesn't contain a valid numeric port, Port returns an empty string.
*/
- readForm(maxMemory: number): (Form)
+ port(): string
}
- /**
- * Form is a parsed multipart form.
- * Its File parts are stored either in memory or on disk,
- * and are accessible via the [*FileHeader]'s Open method.
- * Its Value parts are stored as strings.
- * Both are keyed by field name.
- */
- interface Form {
- value: _TygojaDict
- file: _TygojaDict
+ interface URL {
+ marshalBinary(): string|Array
}
- interface Form {
+ interface URL {
+ unmarshalBinary(text: string|Array): void
+ }
+ interface URL {
/**
- * RemoveAll removes any temporary files associated with a [Form].
+ * JoinPath returns a new [URL] with the provided path elements joined to
+ * any existing path and the resulting path cleaned of any ./ or ../ elements.
+ * Any sequences of multiple / characters will be reduced to a single /.
*/
- removeAll(): void
- }
- /**
- * File is an interface to access the file part of a multipart message.
- * Its contents may be either stored in memory or on disk.
- * If stored on disk, the File's underlying concrete type will be an *os.File.
- */
- interface File {
- [key:string]: any;
+ joinPath(...elem: string[]): (URL)
}
+}
+
+namespace jwt {
/**
- * Reader is an iterator over parts in a MIME multipart body.
- * Reader's underlying parser consumes its input as needed. Seeking
- * isn't supported.
+ * NumericDate represents a JSON numeric date value, as referenced at
+ * https://datatracker.ietf.org/doc/html/rfc7519#section-2.
*/
- interface Reader {
+ type _sMYpeaI = time.Time
+ interface NumericDate extends _sMYpeaI {
}
- interface Reader {
+ interface NumericDate {
/**
- * NextPart returns the next part in the multipart or an error.
- * When there are no more parts, the error [io.EOF] is returned.
- *
- * As a special case, if the "Content-Transfer-Encoding" header
- * has a value of "quoted-printable", that header is instead
- * hidden and the body is transparently decoded during Read calls.
+ * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+ * represented in NumericDate to a byte array, using the precision specified in TimePrecision.
*/
- nextPart(): (Part)
+ marshalJSON(): string|Array
}
- interface Reader {
+ interface NumericDate {
/**
- * NextRawPart returns the next part in the multipart or an error.
- * When there are no more parts, the error [io.EOF] is returned.
- *
- * Unlike [Reader.NextPart], it does not have special handling for
- * "Content-Transfer-Encoding: quoted-printable".
+ * UnmarshalJSON is an implementation of the json.RawMessage interface and
+ * deserializes a [NumericDate] from a JSON representation, i.e. a
+ * [json.Number]. This number represents an UNIX epoch with either integer or
+ * non-integer seconds.
*/
- nextRawPart(): (Part)
+ unmarshalJSON(b: string|Array): void
+ }
+ /**
+ * ClaimStrings is basically just a slice of strings, but it can be either
+ * serialized from a string array or just a string. This type is necessary,
+ * since the "aud" claim can either be a single string or an array.
+ */
+ interface ClaimStrings extends Array{}
+ interface ClaimStrings {
+ unmarshalJSON(data: string|Array): void
+ }
+ interface ClaimStrings {
+ marshalJSON(): string|Array
}
}
+namespace types {
+}
+
+namespace search {
+}
+
namespace http {
/**
* A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
@@ -22464,9 +22499,6 @@ namespace oauth2 {
}
}
-namespace subscriptions {
-}
-
namespace cron {
/**
* Job defines a single registered cron job.
@@ -22500,6 +22532,73 @@ namespace cron {
}
}
+namespace hook {
+ /**
+ * wrapped local Hook embedded struct to limit the public API surface.
+ */
+ type _sjYfSXq = Hook
+ interface mainHook extends _sjYfSXq {
+ }
+}
+
+namespace cobra {
+ interface PositionalArgs {(cmd: Command, args: Array): void }
+ // @ts-ignore
+ import flag = pflag
+ /**
+ * FParseErrWhitelist configures Flag parse errors to be ignored
+ */
+ interface FParseErrWhitelist extends _TygojaAny{}
+ /**
+ * Group Structure to manage groups for commands
+ */
+ interface Group {
+ id: string
+ title: string
+ }
+ /**
+ * CompletionOptions are the options to control shell completion
+ */
+ interface CompletionOptions {
+ /**
+ * DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ */
+ disableDefaultCmd: boolean
+ /**
+ * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ * for shells that support completion descriptions
+ */
+ disableNoDescFlag: boolean
+ /**
+ * DisableDescriptions turns off all completion descriptions for shells
+ * that support them
+ */
+ disableDescriptions: boolean
+ /**
+ * HiddenDefaultCmd makes the default 'completion' command hidden
+ */
+ hiddenDefaultCmd: boolean
+ }
+ /**
+ * Completion is a string that can be used for completions
+ *
+ * two formats are supported:
+ * ```
+ * - the completion choice
+ * - the completion choice with a textual description (separated by a TAB).
+ * ```
+ *
+ * [CompletionWithDesc] can be used to create a completion string with a textual description.
+ *
+ * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used.
+ */
+ interface Completion extends String{}
+ /**
+ * CompletionFunc is a function that provides completion results.
+ */
+ interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] }
+}
+
namespace router {
// @ts-ignore
import validation = ozzo_validation
@@ -22679,62 +22778,37 @@ namespace slog {
import loginternal = internal
}
-namespace cobra {
- interface PositionalArgs {(cmd: Command, args: Array): void }
- // @ts-ignore
- import flag = pflag
- /**
- * FParseErrWhitelist configures Flag parse errors to be ignored
- */
- interface FParseErrWhitelist extends _TygojaAny{}
+namespace subscriptions {
+}
+
+namespace url {
/**
- * Group Structure to manage groups for commands
+ * The Userinfo type is an immutable encapsulation of username and
+ * password details for a [URL]. An existing Userinfo value is guaranteed
+ * to have a username set (potentially empty, as allowed by RFC 2396),
+ * and optionally a password.
*/
- interface Group {
- id: string
- title: string
+ interface Userinfo {
}
- /**
- * CompletionOptions are the options to control shell completion
- */
- interface CompletionOptions {
- /**
- * DisableDefaultCmd prevents Cobra from creating a default 'completion' command
- */
- disableDefaultCmd: boolean
+ interface Userinfo {
/**
- * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
- * for shells that support completion descriptions
+ * Username returns the username.
*/
- disableNoDescFlag: boolean
+ username(): string
+ }
+ interface Userinfo {
/**
- * DisableDescriptions turns off all completion descriptions for shells
- * that support them
+ * Password returns the password in case it is set, and whether it is set.
*/
- disableDescriptions: boolean
+ password(): [string, boolean]
+ }
+ interface Userinfo {
/**
- * HiddenDefaultCmd makes the default 'completion' command hidden
+ * String returns the encoded userinfo information in the standard form
+ * of "username[:password]".
*/
- hiddenDefaultCmd: boolean
+ string(): string
}
- /**
- * Completion is a string that can be used for completions
- *
- * two formats are supported:
- * ```
- * - the completion choice
- * - the completion choice with a textual description (separated by a TAB).
- * ```
- *
- * [CompletionWithDesc] can be used to create a completion string with a textual description.
- *
- * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used.
- */
- interface Completion extends String{}
- /**
- * CompletionFunc is a function that provides completion results.
- */
- interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] }
}
namespace multipart {
@@ -22776,36 +22850,6 @@ namespace multipart {
}
}
-namespace url {
- /**
- * The Userinfo type is an immutable encapsulation of username and
- * password details for a [URL]. An existing Userinfo value is guaranteed
- * to have a username set (potentially empty, as allowed by RFC 2396),
- * and optionally a password.
- */
- interface Userinfo {
- }
- interface Userinfo {
- /**
- * Username returns the username.
- */
- username(): string
- }
- interface Userinfo {
- /**
- * Password returns the password in case it is set, and whether it is set.
- */
- password(): [string, boolean]
- }
- interface Userinfo {
- /**
- * String returns the encoded userinfo information in the standard form
- * of "username[:password]".
- */
- string(): string
- }
-}
-
namespace http {
/**
* SameSite allows a server to define a cookie attribute making it impossible for
@@ -22825,6 +22869,16 @@ namespace http {
namespace oauth2 {
}
+namespace cobra {
+ // @ts-ignore
+ import flag = pflag
+ /**
+ * ShellCompDirective is a bit map representing the different behaviors the shell
+ * can be instructed to have once completions have been provided.
+ */
+ interface ShellCompDirective extends Number{}
+}
+
namespace slog {
// @ts-ignore
import loginternal = internal
@@ -22997,16 +23051,6 @@ namespace slog {
}
}
-namespace cobra {
- // @ts-ignore
- import flag = pflag
- /**
- * ShellCompDirective is a bit map representing the different behaviors the shell
- * can be instructed to have once completions have been provided.
- */
- interface ShellCompDirective extends Number{}
-}
-
namespace slog {
// @ts-ignore
import loginternal = internal