2 private links
为了加速镜像拉取,你可以使用以下命令设置 registry mirror: 为了避免 Worker 用量耗尽,你可以手动 pull 镜像然后 re-tag 之后 push 至本地镜像仓库: Powered by Cloudflare Workers
It provides a simple and efficient way to monitor CPU and GPU usage, E-Cores and P-Cores, power consumption, and other system metrics directly from your terminal! Real-time CPU and GPU power usage display. Detailed metrics for different CPU clusters (E-Cores and P-Cores). [...] Example with flags sudo mactop --interval 1000 --color green --interval or -i: Set the powermetrics update interval in milliseconds. [...] #what-does-mactop-use-to-get-real-time-data sysctl: For CPU model information system_profiler: For GPU Core Count psutil: For memory and swap metrics powermetrics: For majority of CPU, GPU, Network, and Disk metrics
Marker converts PDF to markdown quickly and accurately. Supports a wide range of documents (optimized for books and scientific papers) [...] Here are some known limitations that are on the roadmap to address: Marker will not convert 100% of equations to LaTeX. [...] marker /path/to/input/folder /path/to/output/folder --workers 10 --max 10 --metadata_file /path/to/metadata.json --min_length 10000 --workers is the number of pdfs to convert at once. [...] Then run benchmark.py like this: python benchmark.py data/pdfs data/references report.json --nougat This will benchmark marker against other text extraction methods.
yt-dlp is a https://github.com/ytdl-org/youtube-dl fork based on the now inactive https://github.com/blackjack4494/yt-dlc. You can install yt-dlp using #release-files, https://pypi.org/project/yt-dlp or one using a third-party package manager. [...] # Download best format that contains video, # and if it doesn't already have an audio stream, merge it with best audio-only format $ yt-dlp -f "bv+ba/b" [...] # Download and merge the best format that has a video stream, # and the best 2 audio-only formats into one file $ yt-dlp -f "bv+ba+ba.2" --audio-multistreams [...] # Download the best mp4 video available, or the best video if no mp4 available $ yt-dlp -f "bv[ext=mp4]+ba[ext=m4a]/b[ext=mp4] / bv+ba/b"
The more words you use, the better} else { const sourceLangCode = query.detectFrom const targetLangCode = query.detectTo const sourceLangName = lang.getLangName(sourceLangCode) const targetLangName = lang.getLangName(targetLangCode) console.debug('sourceLang', sourceLangName) console.debug('targetLang', targetLangName) const toChinese = chineseLangCodes.indexOf(targetLangCode) >= 0 const targetLangConfig = getLangConfig(targetLangCode) const sourceLangConfig = getLangConfig(sourceLangCode) console.log('Source language is', sourceLangConfig) rolePrompt = targetLangConfig.rolePrompt switch (query.action.mode) { case null: case undefined: if ((query.action.rolePrompt ?? '').includes('${text}') || (query.action.commandPrompt ?? '').includes('${text}')) { contentPrompt = '' } else { contentPrompt = '"""' + query.text + '"""' } rolePrompt = (query.action.rolePrompt ?? '') .replace('${sourceLang}', sourceLangName) .replace('${targetLang}', targetLangName) .replace('${text}', query.text) commandPrompt = (query.action.commandPrompt ?? '') .replace('${sourceLang}', sourceLangName) .replace('${targetLang}', targetLangName) .replace('${text}', query.text) if (query.action.outputRenderingFormat) { commandPrompt +=
. Format: ${query.action.outputRenderingFormat}} break case 'translate': quoteProcessor = new QuoteProcessor() commandPrompt = targetLangConfig.genCommandPrompt( sourceLangConfig, quoteProcessor.quoteStart, quoteProcessor.quoteEnd ) contentPrompt =
${quoteProcessor.quoteStart}${query.text}${quoteProcessor.quoteEnd}if (query.text.length [...] Only polish the text between ${quoteProcessor.quoteStart} and ${quoteProcessor.quoteEnd}.
contentPrompt = ${quoteProcessor.quoteStart}${query.text}${quoteProcessor.quoteEnd}
break case 'summarize': rolePrompt = "You are a professional text summarizer, you can only summarize the text, don't interpret it." [...] (status)}, onMessage: (msg) => { let resp try { resp = JSON.parse(msg) // eslint-disable-next-line no-empty } catch { query.onFinish('stop') return } if (!conversationId) { conversationId = resp.conversation_id } const { finish_details: finishDetails } = resp.message if (finishDetails) { query.onFinish(finishDetails.type) return } const { content, author } = resp.message if (author.role === 'assistant') { const targetTxt = content.parts.join('') let textDelta = targetTxt.slice(length) if (quoteProcessor) { textDelta = quoteProcessor.processText(textDelta)} query.onMessage({ content: textDelta, role: '', isWordMode }) length = targetTxt.length }}, onError: (err) => { if (err instanceof Error) { query.onError(err.message) return } if (typeof err === 'string') { query.onError(err) return } if (typeof err === 'object') { const { detail } = err if (detail) { const { message } = detail if (message) { query.onError(ChatGPT Web: ${message}
) return }} query.onError(ChatGPT Web: ${JSON.stringify(err)}
) return } const { error } = err if (error instanceof Error) { query.onError(error.message) return } if (typeof error === 'object') { const { message } = error if (message) { query.onError(message) return }} query.onError('Unknown error')}, }) if (conversationId) { await fetcher(${utils.defaultChatGPTWebAPI}/conversation/${conversationId}
, { method: 'PATCH', headers, body: JSON.stringify({ is_visible: false }), })} } else { const url = urlJoin(settings.apiURL, settings.apiURLPath) await fetchSSE(url, { method: 'POST', headers, body: JSON.stringify(body), signal: query.signal, onMessage: (msg) => { let resp try { resp = JSON.parse(msg) // eslint-disable-next-line no-empty } catch { query.onFinish('stop') return } const { choices } = resp if (!choices || choices.length === 0) { return { error: 'No result' }} const { finish_reason: finishReason } = choices if (finishReason) { query.onFinish(finishReason) return } let targetTxt = '' if (!isChatAPI) { // It's used for Azure OpenAI Service's legacy parameters. targetTxt = choices.text if (quoteProcessor) { targetTxt = quoteProcessor.processText(targetTxt)} query.onMessage({ content: targetTxt, role: '', isWordMode })} else { const { content = '', role } = choices.delta targetTxt = content if (quoteProcessor) { targetTxt = quoteProcessor.processText(targetTxt)} query.onMessage({ content: targetTxt, role, isWordMode })} }, onError: (err) => { if (err instanceof Error) { query.onError(err.message) return } if (typeof err === 'string') { query.onError(err) return } if (typeof err === 'object') { const { detail } = err if (detail) { query.onError(detail) return }} const { error } = err if (error instanceof Error) { query.onError(error.message) return } if (typeof error === 'object') { const { message } = error if (message) { query.onError(message) return }} query.onError('Unknown error')}, })} }
Master, help us to awaken and enlighten. Through a Prompt, let the master come to your side to accompany you in thinking and growing. [...] Of course, I (GPT) can explain the commands in English: /help
: Lists all the commands, descriptions, and rules I recognize. [...] /role
: Lists all available master roles. [...] If you'd like a round table discussion involving multiple roles, you can list multiple roles after the command.