diff --git a/package.json b/package.json index 19240a5e..9b720d7f 100644 --- a/package.json +++ b/package.json @@ -26,6 +26,8 @@ "@modelcontextprotocol/server-slack": "*", "@modelcontextprotocol/server-brave-search": "*", "@modelcontextprotocol/server-memory": "*", - "@modelcontextprotocol/server-filesystem": "*" + "@modelcontextprotocol/server-filesystem": "*", + "@modelcontextprotocol/server-everart": "*", + "@modelcontextprotocol/server-sequentialthinking": "*" } } diff --git a/src/everart/README.md b/src/everart/README.md new file mode 100644 index 00000000..545f5dfa --- /dev/null +++ b/src/everart/README.md @@ -0,0 +1,73 @@ +# EverArt MCP Server + +Image generation server for Claude Desktop using EverArt's API. + +## Install +```bash +npm install +export EVERART_API_KEY=your_key_here +``` + +## Config +Add to Claude Desktop config: +```json +{ + "mcpServers": { + "everart": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-everart"], + "env": { + "EVERART_API_KEY": "your_key_here" + } + } + } +} +``` + +## Tools + +### generate_image +Generates images with multiple model options. Opens result in browser and returns URL. + +Parameters: +```typescript +{ + prompt: string, // Image description + model?: string, // Model ID (default: "207910310772879360") + image_count?: number // Number of images (default: 1) +} +``` + +Models: +- 5000: FLUX1.1 (standard) +- 9000: FLUX1.1-ultra +- 6000: SD3.5 +- 7000: Recraft-Real +- 8000: Recraft-Vector + +All images generated at 1024x1024. + +Sample usage: +```javascript +const result = await client.callTool({ + name: "generate_image", + arguments: { + prompt: "A cat sitting elegantly", + model: "7000", + image_count: 1 + } +}); +``` + +Response format: +``` +Image generated successfully! +The image has been opened in your default browser. + +Generation details: +- Model: 7000 +- Prompt: "A cat sitting elegantly" +- Image URL: https://storage.googleapis.com/... + +You can also click the URL above to view the image again. +``` diff --git a/src/everart/index.ts b/src/everart/index.ts new file mode 100644 index 00000000..bfdb2277 --- /dev/null +++ b/src/everart/index.ts @@ -0,0 +1,160 @@ +#!/usr/bin/env node +import EverArt from "everart"; +import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { + CallToolRequestSchema, + ListToolsRequestSchema, + ListResourcesRequestSchema, + ReadResourceRequestSchema, +} from "@modelcontextprotocol/sdk/types.js"; +import fetch from "node-fetch"; +import open from "open"; + +const server = new Server( + { + name: "example-servers/everart", + version: "0.2.0", + }, + { + capabilities: { + tools: {}, + resources: {}, // Required for image resources + }, + }, +); + +if (!process.env.EVERART_API_KEY) { + console.error("EVERART_API_KEY environment variable is not set"); + process.exit(1); +} + +const client = new EverArt.default(process.env.EVERART_API_KEY); + +server.setRequestHandler(ListToolsRequestSchema, async () => ({ + tools: [ + { + name: "generate_image", + description: + "Generate images using EverArt Models and returns a clickable link to view the generated image. " + + "The tool will return a URL that can be clicked to view the image in a browser. " + + "Available models:\n" + + "- 5000:FLUX1.1: Standard quality\n" + + "- 9000:FLUX1.1-ultra: Ultra high quality\n" + + "- 6000:SD3.5: Stable Diffusion 3.5\n" + + "- 7000:Recraft-Real: Photorealistic style\n" + + "- 8000:Recraft-Vector: Vector art style\n" + + "\nThe response will contain a direct link to view the generated image.", + inputSchema: { + type: "object", + properties: { + prompt: { + type: "string", + description: "Text description of desired image", + }, + model: { + type: "string", + description: + "Model ID (5000:FLUX1.1, 9000:FLUX1.1-ultra, 6000:SD3.5, 7000:Recraft-Real, 8000:Recraft-Vector)", + default: "5000", + }, + image_count: { + type: "number", + description: "Number of images to generate", + default: 1, + }, + }, + required: ["prompt"], + }, + }, + ], +})); + +server.setRequestHandler(ListResourcesRequestSchema, async () => { + return { + resources: [ + { + uri: "everart://images", + mimeType: "image/png", + name: "Generated Images", + }, + ], + }; +}); + +server.setRequestHandler(ReadResourceRequestSchema, async (request) => { + if (request.params.uri === "everart://images") { + return { + contents: [ + { + uri: "everart://images", + mimeType: "image/png", + blob: "", // Empty since this is just for listing + }, + ], + }; + } + throw new Error("Resource not found"); +}); + +server.setRequestHandler(CallToolRequestSchema, async (request) => { + if (request.params.name === "generate_image") { + try { + const { + prompt, + model = "207910310772879360", + image_count = 1, + } = request.params.arguments as any; + + // Use correct EverArt API method + const generation = await client.v1.generations.create( + model, + prompt, + "txt2img", + { + imageCount: image_count, + height: 1024, + width: 1024, + }, + ); + + // Wait for generation to complete + const completedGen = await client.v1.generations.fetchWithPolling( + generation[0].id, + ); + + const imgUrl = completedGen.image_url; + if (!imgUrl) throw new Error("No image URL"); + + // Automatically open the image URL in the default browser + await open(imgUrl); + + // Return a formatted message with the clickable link + return { + content: [ + { + type: "text", + text: `Image generated successfully!\nThe image has been opened in your default browser.\n\nGeneration details:\n- Model: ${model}\n- Prompt: "${prompt}"\n- Image URL: ${imgUrl}\n\nYou can also click the URL above to view the image again.`, + }, + ], + }; + } catch (error: unknown) { + console.error("Detailed error:", error); + const errorMessage = + error instanceof Error ? error.message : "Unknown error"; + return { + content: [{ type: "text", text: `Error: ${errorMessage}` }], + isError: true, + }; + } + } + throw new Error(`Unknown tool: ${request.params.name}`); +}); + +async function runServer() { + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("EverArt MCP Server running on stdio"); +} + +runServer().catch(console.error); diff --git a/src/everart/package.json b/src/everart/package.json new file mode 100644 index 00000000..771c85a4 --- /dev/null +++ b/src/everart/package.json @@ -0,0 +1,32 @@ +{ + "name": "@modelcontextprotocol/server-everart", + "version": "0.1.0", + "description": "MCP server for EverArt API integration", + "license": "MIT", + "author": "Anthropic, PBC (https://anthropic.com)", + "homepage": "https://modelcontextprotocol.io", + "bugs": "https://github.com/modelcontextprotocol/servers/issues", + "type": "module", + "bin": { + "mcp-server-everart": "dist/index.js" + }, + "files": [ + "dist" + ], + "scripts": { + "build": "tsc && shx chmod +x dist/*.js", + "prepare": "npm run build", + "watch": "tsc --watch" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "0.5.0", + "everart": "^1.0.0", + "node-fetch": "^3.3.2", + "open": "^9.1.0" + }, + "devDependencies": { + "@types/node": "^20.11.0", + "shx": "^0.3.4", + "typescript": "^5.3.3" + } +} diff --git a/src/everart/tsconfig.json b/src/everart/tsconfig.json new file mode 100644 index 00000000..ec5da158 --- /dev/null +++ b/src/everart/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "./dist", + "rootDir": "." + }, + "include": [ + "./**/*.ts" + ] +} diff --git a/src/fetch/src/mcp_server_fetch/server.py b/src/fetch/src/mcp_server_fetch/server.py index 3d35094b..c676c056 100644 --- a/src/fetch/src/mcp_server_fetch/server.py +++ b/src/fetch/src/mcp_server_fetch/server.py @@ -44,7 +44,7 @@ def extract_content_from_html(html: str) -> str: return content -def get_robots_txt_url(url: AnyUrl | str) -> str: +def get_robots_txt_url(url: str) -> str: """Get the robots.txt URL for a given website URL. Args: @@ -54,7 +54,7 @@ def get_robots_txt_url(url: AnyUrl | str) -> str: URL of the robots.txt file """ # Parse the URL into components - parsed = urlparse(str(url)) + parsed = urlparse(url) # Reconstruct the base URL with just scheme, netloc, and /robots.txt path robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", "")) @@ -62,7 +62,7 @@ def get_robots_txt_url(url: AnyUrl | str) -> str: return robots_url -async def check_may_autonomously_fetch_url(url: AnyUrl | str, user_agent: str) -> None: +async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None: """ Check if the URL can be fetched by the user agent according to the robots.txt file. Raises a McpError if not. @@ -106,7 +106,7 @@ async def check_may_autonomously_fetch_url(url: AnyUrl | str, user_agent: str) - async def fetch_url( - url: AnyUrl | str, user_agent: str, force_raw: bool = False + url: str, user_agent: str, force_raw: bool = False ) -> Tuple[str, str]: """ Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information. @@ -116,7 +116,7 @@ async def fetch_url( async with AsyncClient() as client: try: response = await client.get( - str(url), + url, follow_redirects=True, headers={"User-Agent": user_agent}, timeout=30, @@ -221,7 +221,7 @@ async def call_tool(name, arguments: dict) -> list[TextContent]: except ValueError as e: raise McpError(INVALID_PARAMS, str(e)) - url = args.url + url = str(args.url) if not url: raise McpError(INVALID_PARAMS, "URL is required") diff --git a/src/sequentialthinking/README.md b/src/sequentialthinking/README.md new file mode 100644 index 00000000..0b299c3f --- /dev/null +++ b/src/sequentialthinking/README.md @@ -0,0 +1,63 @@ + +# Sequential Thinking MCP Server + +An MCP server implementation that provides a tool for dynamic and reflective problem-solving through a structured thinking process. + +## Features + +- Break down complex problems into manageable steps +- Revise and refine thoughts as understanding deepens +- Branch into alternative paths of reasoning +- Adjust the total number of thoughts dynamically +- Generate and verify solution hypotheses + +## Tool + +### sequential_thinking + +Facilitates a detailed, step-by-step thinking process for problem-solving and analysis. + +**Inputs:** +- `thought` (string): The current thinking step +- `nextThoughtNeeded` (boolean): Whether another thought step is needed +- `thoughtNumber` (integer): Current thought number +- `totalThoughts` (integer): Estimated total thoughts needed +- `isRevision` (boolean, optional): Whether this revises previous thinking +- `revisesThought` (integer, optional): Which thought is being reconsidered +- `branchFromThought` (integer, optional): Branching point thought number +- `branchId` (string, optional): Branch identifier +- `needsMoreThoughts` (boolean, optional): If more thoughts are needed + +## Usage + +The Sequential Thinking tool is designed for: +- Breaking down complex problems into steps +- Planning and design with room for revision +- Analysis that might need course correction +- Problems where the full scope might not be clear initially +- Tasks that need to maintain context over multiple steps +- Situations where irrelevant information needs to be filtered out + +## Configuration + +### Usage with Claude Desktop + +Add this to your `claude_desktop_config.json`: + +```json +{ + "mcpServers": { + "sequential-thinking": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ] + } + } +} +``` + +## License + +This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. diff --git a/src/sequentialthinking/index.ts b/src/sequentialthinking/index.ts new file mode 100644 index 00000000..c10301d7 --- /dev/null +++ b/src/sequentialthinking/index.ts @@ -0,0 +1,278 @@ +#!/usr/bin/env node + +import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { + CallToolRequestSchema, + ListToolsRequestSchema, + Tool, +} from "@modelcontextprotocol/sdk/types.js"; +// Fixed chalk import for ESM +import chalk from 'chalk'; + +interface ThoughtData { + thought: string; + thoughtNumber: number; + totalThoughts: number; + isRevision?: boolean; + revisesThought?: number; + branchFromThought?: number; + branchId?: string; + needsMoreThoughts?: boolean; + nextThoughtNeeded: boolean; +} + +class SequentialThinkingServer { + private thoughtHistory: ThoughtData[] = []; + private branches: Record = {}; + + private validateThoughtData(input: unknown): ThoughtData { + const data = input as Record; + + if (!data.thought || typeof data.thought !== 'string') { + throw new Error('Invalid thought: must be a string'); + } + if (!data.thoughtNumber || typeof data.thoughtNumber !== 'number') { + throw new Error('Invalid thoughtNumber: must be a number'); + } + if (!data.totalThoughts || typeof data.totalThoughts !== 'number') { + throw new Error('Invalid totalThoughts: must be a number'); + } + if (typeof data.nextThoughtNeeded !== 'boolean') { + throw new Error('Invalid nextThoughtNeeded: must be a boolean'); + } + + return { + thought: data.thought, + thoughtNumber: data.thoughtNumber, + totalThoughts: data.totalThoughts, + nextThoughtNeeded: data.nextThoughtNeeded, + isRevision: data.isRevision as boolean | undefined, + revisesThought: data.revisesThought as number | undefined, + branchFromThought: data.branchFromThought as number | undefined, + branchId: data.branchId as string | undefined, + needsMoreThoughts: data.needsMoreThoughts as boolean | undefined, + }; + } + + private formatThought(thoughtData: ThoughtData): string { + const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; + + let prefix = ''; + let context = ''; + + if (isRevision) { + prefix = chalk.yellow('🔄 Revision'); + context = ` (revising thought ${revisesThought})`; + } else if (branchFromThought) { + prefix = chalk.green('🌿 Branch'); + context = ` (from thought ${branchFromThought}, ID: ${branchId})`; + } else { + prefix = chalk.blue('💭 Thought'); + context = ''; + } + + const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`; + const border = '─'.repeat(Math.max(header.length, thought.length) + 4); + + return ` +┌${border}┐ +│ ${header} │ +├${border}┤ +│ ${thought.padEnd(border.length - 2)} │ +└${border}┘`; + } + + public processThought(input: unknown): { content: Array<{ type: string; text: string }>; isError?: boolean } { + try { + const validatedInput = this.validateThoughtData(input); + + if (validatedInput.thoughtNumber > validatedInput.totalThoughts) { + validatedInput.totalThoughts = validatedInput.thoughtNumber; + } + + this.thoughtHistory.push(validatedInput); + + if (validatedInput.branchFromThought && validatedInput.branchId) { + if (!this.branches[validatedInput.branchId]) { + this.branches[validatedInput.branchId] = []; + } + this.branches[validatedInput.branchId].push(validatedInput); + } + + const formattedThought = this.formatThought(validatedInput); + console.error(formattedThought); + + return { + content: [{ + type: "text", + text: JSON.stringify({ + thoughtNumber: validatedInput.thoughtNumber, + totalThoughts: validatedInput.totalThoughts, + nextThoughtNeeded: validatedInput.nextThoughtNeeded, + branches: Object.keys(this.branches), + thoughtHistoryLength: this.thoughtHistory.length + }, null, 2) + }] + }; + } catch (error) { + return { + content: [{ + type: "text", + text: JSON.stringify({ + error: error instanceof Error ? error.message : String(error), + status: 'failed' + }, null, 2) + }], + isError: true + }; + } + } +} + +const SEQUENTIAL_THINKING_TOOL: Tool = { + name: "sequentialthinking", + description: `A detailed tool for dynamic and reflective problem-solving through thoughts. +This tool helps analyze problems through a flexible thinking process that can adapt and evolve. +Each thought can build on, question, or revise previous insights as understanding deepens. + +When to use this tool: +- Breaking down complex problems into steps +- Planning and design with room for revision +- Analysis that might need course correction +- Problems where the full scope might not be clear initially +- Problems that require a multi-step solution +- Tasks that need to maintain context over multiple steps +- Situations where irrelevant information needs to be filtered out + +Key features: +- You can adjust total_thoughts up or down as you progress +- You can question or revise previous thoughts +- You can add more thoughts even after reaching what seemed like the end +- You can express uncertainty and explore alternative approaches +- Not every thought needs to build linearly - you can branch or backtrack +- Generates a solution hypothesis +- Verifies the hypothesis based on the Chain of Thought steps +- Repeats the process until satisfied +- Provides a correct answer + +Parameters explained: +- thought: Your current thinking step, which can include: +* Regular analytical steps +* Revisions of previous thoughts +* Questions about previous decisions +* Realizations about needing more analysis +* Changes in approach +* Hypothesis generation +* Hypothesis verification +- next_thought_needed: True if you need more thinking, even if at what seemed like the end +- thought_number: Current number in sequence (can go beyond initial total if needed) +- total_thoughts: Current estimate of thoughts needed (can be adjusted up/down) +- is_revision: A boolean indicating if this thought revises previous thinking +- revises_thought: If is_revision is true, which thought number is being reconsidered +- branch_from_thought: If branching, which thought number is the branching point +- branch_id: Identifier for the current branch (if any) +- needs_more_thoughts: If reaching end but realizing more thoughts needed + +You should: +1. Start with an initial estimate of needed thoughts, but be ready to adjust +2. Feel free to question or revise previous thoughts +3. Don't hesitate to add more thoughts if needed, even at the "end" +4. Express uncertainty when present +5. Mark thoughts that revise previous thinking or branch into new paths +6. Ignore information that is irrelevant to the current step +7. Generate a solution hypothesis when appropriate +8. Verify the hypothesis based on the Chain of Thought steps +9. Repeat the process until satisfied with the solution +10. Provide a single, ideally correct answer as the final output +11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached`, + inputSchema: { + type: "object", + properties: { + thought: { + type: "string", + description: "Your current thinking step" + }, + nextThoughtNeeded: { + type: "boolean", + description: "Whether another thought step is needed" + }, + thoughtNumber: { + type: "integer", + description: "Current thought number", + minimum: 1 + }, + totalThoughts: { + type: "integer", + description: "Estimated total thoughts needed", + minimum: 1 + }, + isRevision: { + type: "boolean", + description: "Whether this revises previous thinking" + }, + revisesThought: { + type: "integer", + description: "Which thought is being reconsidered", + minimum: 1 + }, + branchFromThought: { + type: "integer", + description: "Branching point thought number", + minimum: 1 + }, + branchId: { + type: "string", + description: "Branch identifier" + }, + needsMoreThoughts: { + type: "boolean", + description: "If more thoughts are needed" + } + }, + required: ["thought", "nextThoughtNeeded", "thoughtNumber", "totalThoughts"] + } +}; + +const server = new Server( + { + name: "sequential-thinking-server", + version: "0.2.0", + }, + { + capabilities: { + tools: {}, + }, + } +); + +const thinkingServer = new SequentialThinkingServer(); + +server.setRequestHandler(ListToolsRequestSchema, async () => ({ + tools: [SEQUENTIAL_THINKING_TOOL], +})); + +server.setRequestHandler(CallToolRequestSchema, async (request) => { + if (request.params.name === "sequentialthinking") { + return thinkingServer.processThought(request.params.arguments); + } + + return { + content: [{ + type: "text", + text: `Unknown tool: ${request.params.name}` + }], + isError: true + }; +}); + +async function runServer() { + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Sequential Thinking MCP Server running on stdio"); +} + +runServer().catch((error) => { + console.error("Fatal error running server:", error); + process.exit(1); +}); diff --git a/src/sequentialthinking/package.json b/src/sequentialthinking/package.json new file mode 100644 index 00000000..31110e1b --- /dev/null +++ b/src/sequentialthinking/package.json @@ -0,0 +1,32 @@ +{ + "name": "@modelcontextprotocol/server-sequential-thinking", + "version": "0.1.0", + "description": "MCP server for sequential thinking and problem solving", + "license": "MIT", + "author": "Anthropic, PBC (https://anthropic.com)", + "homepage": "https://modelcontextprotocol.io", + "bugs": "https://github.com/modelcontextprotocol/servers/issues", + "type": "module", + "bin": { + "mcp-server-sequential-thinking": "dist/index.js" + }, + "files": [ + "dist" + ], + "scripts": { + "build": "tsc && shx chmod +x dist/*.js", + "prepare": "npm run build", + "watch": "tsc --watch" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "0.5.0", + "chalk": "^5.3.0", + "yargs": "^17.7.2" + }, + "devDependencies": { + "@types/node": "^20.11.0", + "@types/yargs": "^17.0.32", + "shx": "^0.3.4", + "typescript": "^5.3.3" + } +} diff --git a/src/sequentialthinking/tsconfig.json b/src/sequentialthinking/tsconfig.json new file mode 100644 index 00000000..2ce5843e --- /dev/null +++ b/src/sequentialthinking/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "./dist", + "rootDir": ".", + "moduleResolution": "NodeNext", + "module": "NodeNext" + }, + "include": ["./**/*.ts"] +}