diff --git a/setup.cfg b/setup.cfg index d95117d..e0e98e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,11 @@ [metadata] name = napari-chatgpt -version = v2024.3.26 +version = v2024.3.26.3 description = A napari plugin to process and analyse images with chatGPT. long_description = file: README.md long_description_content_type = text/markdown url = https://github.com/royerlab/napari-chatgpt -author = Loic A. Royer +author = Loic A. Royer and contributors author_email = royerloic@gmail.com license = BSD-3-Clause license_files = LICENSE @@ -38,6 +38,7 @@ install_requires = QtAwesome langchain==0.1.11 langchain-openai==0.0.8 + langchain-anthropic==0.1.4 openai==1.13.3 anthropic fastapi diff --git a/src/napari_chatgpt/_widget.py b/src/napari_chatgpt/_widget.py index edeb3da..504aea7 100644 --- a/src/napari_chatgpt/_widget.py +++ b/src/napari_chatgpt/_widget.py @@ -126,8 +126,8 @@ def _model_selection(self): model_list.append('claude-2.1') model_list.append('claude-2.0') model_list.append('claude-instant-1.2') - #model_list.append('claude-3-sonnet-20240229') - #model_list.append('claude-3-opus-20240229') + model_list.append('claude-3-sonnet-20240229') + model_list.append('claude-3-opus-20240229') if is_ollama_running(): @@ -450,14 +450,14 @@ def _start_omega(self): main_llm_model_name = self.model_combo_box.currentText() # Warn users with a modal window that the selected model might be sub-optimal: - if 'gpt-4' not in main_llm_model_name: + if 'gpt-4' not in main_llm_model_name and 'claude-3-opus' not in main_llm_model_name: aprint("Warning: you did not select a gpt-4 level model. Omega's cognitive and coding abilities will be degraded.") show_warning_dialog(f"You have selected this model: '{main_llm_model_name}'. " - f"This is not a GPT4-level model. " + f"This is not a GPT4 or Claude-3-opus level model. " f"Omega's cognitive and coding abilities will be degraded. " f"It might even completely fail or be too slow. " f"Please visit our wiki " - f"for information on how to gain access to GPT4.") + f"for information on how to gain access to GPT4 (or Claude-3).") # Set tool LLM model name via configuration file. tool_llm_model_name = self.config.get('tool_llm_model_name', 'same') diff --git a/src/napari_chatgpt/llm/llms.py b/src/napari_chatgpt/llm/llms.py index fae7e99..aaad6d9 100644 --- a/src/napari_chatgpt/llm/llms.py +++ b/src/napari_chatgpt/llm/llms.py @@ -63,9 +63,16 @@ def _instantiate_single_llm(llm_model_name: str, elif 'claude' in llm_model_name: # Import Claude LLM: - from langchain.chat_models import ChatAnthropic + from langchain_anthropic import ChatAnthropic - max_token_limit = 8000 + llm_model_name_lc = llm_model_name.lower() + + if 'opus' in llm_model_name_lc or 'sonnet' in llm_model_name_lc or 'hiaku' in llm_model_name_lc or '2.1': + max_tokens_to_sample = 4096 + max_token_limit = 200000 + else: + max_tokens_to_sample = 4096 + max_token_limit = 8000 # Instantiates Main LLM: llm = ChatAnthropic( @@ -73,7 +80,7 @@ def _instantiate_single_llm(llm_model_name: str, verbose=verbose, streaming=streaming, temperature=temperature, - max_tokens_to_sample=max_token_limit, + max_tokens_to_sample=max_tokens_to_sample, callbacks=[callback_handler]) return llm, max_token_limit @@ -103,7 +110,7 @@ def _instantiate_single_llm(llm_model_name: str, # Wait a bit: sleep(3) - # Make ure that Ollama is running + # Make sure that Ollama is running if not is_ollama_running(ollama_host, ollama_port): aprint(f"Ollama server is not running on '{ollama_host}'. Please start the Ollama server on this machine and make sure the port '{ollama_port}' is open. ") raise Exception("Ollama server is not running!")