2 private links
Moonshot AI(Kimi.ai)接口转API https://github.com/LLM-Red-Team/kimi-free-api 阿里通义 (Qwen) 接口转API https://github.com/LLM-Red-Team/qwen-free-api [...] Authorization: Bearer TOKEN1,TOKEN2,TOKEN3 [...] 构建你的 Web Service(New+ -> Build and deploy from a Git repository -> Connect你fork的项目 -> 选择部署区域 -> 选择实例类型为Free -> Create Web Service)。 npm i -g vercel --registry http://registry.npmmirror.com vercel login git clone https://github.com/LLM-Red-Team/deepseek-free-api cd deepseek-free-api vercel --prod
ZTM is an open source network infrastructure software for running a decentralized network. It is built upon HTTP/2 tunnels and can run on any sort of IP networks such as LANs, containerized networks and the Internet, etc. ZTM lays the foundation for building decentralized applications by providing a set of core capabilities including: Network connectivity across Internet gateways and firewalls [...] If not, or if you're unsure whether your installed Pipy version is compatible to ZTM, follow these steps to build Pipy from source. [...] After the CA service is up and running, start a hub pointing to the CA service: pipy hub/main.js -- --ca=localhost:9999
Save articles and read them later in our distraction-free reader. By signing up, you agree to Omnivore’s Terms of Service and Privacy Policy https://omnivore.app/about> https://omnivore.app/login
https://github.com/j178/chatgpt/releases A CLI for ChatGPT, powered by GPT-3.5-turbo and GPT-4 models. Get or create your OpenAI API Key from here: https://platform.openai.com/account/api-keys 💬 Start in chat mode [...] 💻 Use it in a pipeline cat config.yaml | chatgpt -p 'convert this yaml to json' echo "Hello, world" | chatgpt -p translator | say [...] You can add more prompts in the config file, for example: {"api_key": "sk-xxxxxx", "endpoint": "https://api.openai.com/v1", "prompts": {"default": "You are ChatGPT, a large language model trained by OpenAI. [...] "}, "conversation": {"prompt": "default", "context_length": 6, "model": "gpt-3.5-turbo", "stream": true, "max_tokens": 1024 }} then use -p flag to switch prompt: Note The prompt can be a predefined prompt, or come up with one on the fly.
The more words you use, the better} else { const sourceLangCode = query.detectFrom const targetLangCode = query.detectTo const sourceLangName = lang.getLangName(sourceLangCode) const targetLangName = lang.getLangName(targetLangCode) console.debug('sourceLang', sourceLangName) console.debug('targetLang', targetLangName) const toChinese = chineseLangCodes.indexOf(targetLangCode) >= 0 const targetLangConfig = getLangConfig(targetLangCode) const sourceLangConfig = getLangConfig(sourceLangCode) console.log('Source language is', sourceLangConfig) rolePrompt = targetLangConfig.rolePrompt switch (query.action.mode) { case null: case undefined: if ((query.action.rolePrompt ?? '').includes('${text}') || (query.action.commandPrompt ?? '').includes('${text}')) { contentPrompt = '' } else { contentPrompt = '"""' + query.text + '"""' } rolePrompt = (query.action.rolePrompt ?? '') .replace('${sourceLang}', sourceLangName) .replace('${targetLang}', targetLangName) .replace('${text}', query.text) commandPrompt = (query.action.commandPrompt ?? '') .replace('${sourceLang}', sourceLangName) .replace('${targetLang}', targetLangName) .replace('${text}', query.text) if (query.action.outputRenderingFormat) { commandPrompt +=
. Format: ${query.action.outputRenderingFormat}} break case 'translate': quoteProcessor = new QuoteProcessor() commandPrompt = targetLangConfig.genCommandPrompt( sourceLangConfig, quoteProcessor.quoteStart, quoteProcessor.quoteEnd ) contentPrompt =
${quoteProcessor.quoteStart}${query.text}${quoteProcessor.quoteEnd}if (query.text.length [...] Only polish the text between ${quoteProcessor.quoteStart} and ${quoteProcessor.quoteEnd}.
contentPrompt = ${quoteProcessor.quoteStart}${query.text}${quoteProcessor.quoteEnd}
break case 'summarize': rolePrompt = "You are a professional text summarizer, you can only summarize the text, don't interpret it." [...] (status)}, onMessage: (msg) => { let resp try { resp = JSON.parse(msg) // eslint-disable-next-line no-empty } catch { query.onFinish('stop') return } if (!conversationId) { conversationId = resp.conversation_id } const { finish_details: finishDetails } = resp.message if (finishDetails) { query.onFinish(finishDetails.type) return } const { content, author } = resp.message if (author.role === 'assistant') { const targetTxt = content.parts.join('') let textDelta = targetTxt.slice(length) if (quoteProcessor) { textDelta = quoteProcessor.processText(textDelta)} query.onMessage({ content: textDelta, role: '', isWordMode }) length = targetTxt.length }}, onError: (err) => { if (err instanceof Error) { query.onError(err.message) return } if (typeof err === 'string') { query.onError(err) return } if (typeof err === 'object') { const { detail } = err if (detail) { const { message } = detail if (message) { query.onError(ChatGPT Web: ${message}
) return }} query.onError(ChatGPT Web: ${JSON.stringify(err)}
) return } const { error } = err if (error instanceof Error) { query.onError(error.message) return } if (typeof error === 'object') { const { message } = error if (message) { query.onError(message) return }} query.onError('Unknown error')}, }) if (conversationId) { await fetcher(${utils.defaultChatGPTWebAPI}/conversation/${conversationId}
, { method: 'PATCH', headers, body: JSON.stringify({ is_visible: false }), })} } else { const url = urlJoin(settings.apiURL, settings.apiURLPath) await fetchSSE(url, { method: 'POST', headers, body: JSON.stringify(body), signal: query.signal, onMessage: (msg) => { let resp try { resp = JSON.parse(msg) // eslint-disable-next-line no-empty } catch { query.onFinish('stop') return } const { choices } = resp if (!choices || choices.length === 0) { return { error: 'No result' }} const { finish_reason: finishReason } = choices if (finishReason) { query.onFinish(finishReason) return } let targetTxt = '' if (!isChatAPI) { // It's used for Azure OpenAI Service's legacy parameters. targetTxt = choices.text if (quoteProcessor) { targetTxt = quoteProcessor.processText(targetTxt)} query.onMessage({ content: targetTxt, role: '', isWordMode })} else { const { content = '', role } = choices.delta targetTxt = content if (quoteProcessor) { targetTxt = quoteProcessor.processText(targetTxt)} query.onMessage({ content: targetTxt, role, isWordMode })} }, onError: (err) => { if (err instanceof Error) { query.onError(err.message) return } if (typeof err === 'string') { query.onError(err) return } if (typeof err === 'object') { const { detail } = err if (detail) { query.onError(detail) return }} const { error } = err if (error instanceof Error) { query.onError(error.message) return } if (typeof error === 'object') { const { message } = error if (message) { query.onError(message) return }} query.onError('Unknown error')}, })} }
Unified Model Serving Framework
🏹 Scalable with powerful performance optimizations
abstraction scales model inference separately from your custom code and multi-core CPU utilization with automatic provisioning [...]
We strip out as much potentially sensitive information as possible, and we will never collect user code, model data, model names, or stack traces.