Skip to content

Commit

Permalink
docstrings experimental (langchain-ai#7969)
Browse files Browse the repository at this point in the history
- added/changed docstring for `experimental`
- added/changed docstrings for different artifacts
- 
@baskaryan
  • Loading branch information
leo-gan authored Jul 24, 2023
1 parent 3eb4112 commit c580c81
Show file tree
Hide file tree
Showing 31 changed files with 133 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,20 @@


class AutoGPTAction(NamedTuple):
"""Action for AutoGPT."""

name: str
"""Name of the action."""
args: Dict
"""Arguments for the action."""


class BaseAutoGPTOutputParser(BaseOutputParser):
"""Base class for AutoGPT output parsers."""

@abstractmethod
def parse(self, text: str) -> AutoGPTAction:
"""Return AutoGPTAction"""
"""Parse text and return AutoGPTAction"""


def preprocess_json_input(input_str: str) -> str:
Expand All @@ -36,6 +42,8 @@ def preprocess_json_input(input_str: str) -> str:


class AutoGPTOutputParser(BaseAutoGPTOutputParser):
"""Output parser for AutoGPT."""

def parse(self, text: str) -> AutoGPTAction:
try:
parsed = json.loads(text, strict=False)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def generate_prompt_string(self) -> str:


def get_prompt(tools: List[BaseTool]) -> str:
"""This function generates a prompt string.
"""Generate a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Expand Down
2 changes: 2 additions & 0 deletions libs/experimental/langchain_experimental/cpal/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@


class Constant(Enum):
"""Enum for constants used in the CPAL."""

narrative_input = "narrative_input"
chain_answer = "chain_answer" # natural language answer
chain_data = "chain_data" # pydantic instance
1 change: 1 addition & 0 deletions libs/langchain/langchain/callbacks/openai_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def standardize_model_name(
) -> str:
"""
Standardize the model name to a format that can be used in the OpenAI API.
Args:
model_name: Model name to standardize.
is_completion: Whether the model is used for completion or not.
Expand Down
4 changes: 1 addition & 3 deletions libs/langchain/langchain/chains/query_constructor/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,7 @@ def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore

@v_args(inline=True)
class QueryTransformer(Transformer):
"""Transforms a query string into an IR representation
(intermediate representation).
"""
"""Transforms a query string into an intermediate representation."""

def __init__(
self,
Expand Down
7 changes: 7 additions & 0 deletions libs/langchain/langchain/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,16 @@ def _get_verbosity() -> bool:


class BaseChatModel(BaseLanguageModel, ABC):
"""Base class for chat models."""

cache: Optional[bool] = None
"""Whether to cache the response."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to add to the run trace."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Callback manager to add to the run trace."""
tags: Optional[List[str]] = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
Expand Down Expand Up @@ -441,6 +446,8 @@ def dict(self, **kwargs: Any) -> Dict:


class SimpleChatModel(BaseChatModel):
"""Simple Chat Model."""

def _generate(
self,
messages: List[BaseMessage],
Expand Down
1 change: 1 addition & 0 deletions libs/langchain/langchain/document_loaders/notiondb.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

class NotionDBLoader(BaseLoader):
"""Notion DB Loader.
Reads content from pages within a Notion Database.
Args:
integration_token (str): Notion integration token.
Expand Down
2 changes: 2 additions & 0 deletions libs/langchain/langchain/document_loaders/parsers/grobid.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@


class ServerUnavailableException(Exception):
"""Exception raised when the GROBID server is unavailable."""

pass


Expand Down
3 changes: 3 additions & 0 deletions libs/langchain/langchain/document_loaders/rocksetdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,13 @@


def default_joiner(docs: List[Tuple[str, Any]]) -> str:
"""Default joiner for content columns."""
return "\n".join([doc[1] for doc in docs])


class ColumnNotFoundError(Exception):
"""Column not found error."""

def __init__(self, missing_key: str, query: str):
super().__init__(f'Column "{missing_key}" not selected in query:\n{query}')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ class TrajectoryEval(NamedTuple):


class TrajectoryOutputParser(BaseOutputParser):
"""Trajectory output parser."""

@property
def _type(self) -> str:
return "agent_trajectory"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@


class AutoGPTMemory(BaseChatMemory):
"""Memory for AutoGPT."""

retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


class TaskCreationChain(LLMChain):
"""Chain to generates tasks."""
"""Chain generating tasks."""

@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@


class GenerativeAgent(BaseModel):
"""A character with memory and innate characteristics."""
"""An Agent as a character with memory and innate characteristics."""

name: str
"""The character's name."""

age: Optional[int] = None
"""The optional age of the character."""
traits: str = "N/A"
Expand All @@ -29,13 +28,10 @@ class GenerativeAgent(BaseModel):
verbose: bool = False
summary: str = "" #: :meta private:
"""Stateful self-summary generated via reflection on the character's memory."""

summary_refresh_seconds: int = 3600 #: :meta private:
"""How frequently to re-generate the summary."""

last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
"""The last time the character's summary was regenerated."""

daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
"""Summary of the events in the plan that the agent took."""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,21 @@


class GenerativeAgentMemory(BaseMemory):
"""Memory for the generative agent."""

llm: BaseLanguageModel
"""The core language model."""

memory_retriever: TimeWeightedVectorStoreRetriever
"""The retriever to fetch related memories."""
verbose: bool = False

reflection_threshold: Optional[float] = None
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""

current_plan: List[str] = []
"""The current plan of the agent."""

# A weight of 0.15 makes this less important than it
# would be otherwise, relative to salience and time
importance_weight: float = 0.15
"""How much weight to assign the memory importance."""

aggregate_importance: float = 0.0 # : :meta private:
"""Track the sum of the 'importance' of recent memories.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,19 @@ def import_jsonformer() -> jsonformer:
try:
import jsonformer
except ImportError:
raise ValueError(
raise ImportError(
"Could not import jsonformer python package. "
"Please install it with `pip install jsonformer`."
)
return jsonformer


class JsonFormer(HuggingFacePipeline):
"""Jsonformer wrapped LLM using HuggingFace Pipeline API.
This pipeline is experimental and not yet stable.
"""

json_schema: dict = Field(..., description="The JSON Schema to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."
Expand Down
4 changes: 3 additions & 1 deletion libs/langchain/langchain/experimental/llms/rellm_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,16 @@ def import_rellm() -> rellm:
try:
import rellm
except ImportError:
raise ValueError(
raise ImportError(
"Could not import rellm python package. "
"Please install it with `pip install rellm`."
)
return rellm


class RELLM(HuggingFacePipeline):
"""RELLM wrapped LLM using HuggingFace Pipeline API."""

regex: RegexPattern = Field(..., description="The structured format to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,14 @@


class PlanAndExecute(Chain):
"""Plan and execute a chain of steps."""

planner: BasePlanner
"""The planner to use."""
executor: BaseExecutor
"""The executor to use."""
step_container: BaseStepContainer = Field(default_factory=ListStepContainer)
"""The step container to use."""
input_key: str = "input"
output_key: str = "output"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@


class BaseExecutor(BaseModel):
"""Base executor."""

@abstractmethod
def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
Expand All @@ -19,11 +21,14 @@ def step(
async def astep(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take step."""
"""Take async step."""


class ChainExecutor(BaseExecutor):
"""Chain executor."""

chain: Chain
"""The chain to use."""

def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@


class BasePlanner(BaseModel):
"""Base planner."""

@abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
Expand All @@ -17,13 +19,18 @@ def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
"""Given input, asynchronously decide what to do."""


class LLMPlanner(BasePlanner):
"""LLM planner."""

llm_chain: LLMChain
"""The LLM chain to use."""
output_parser: PlanOutputParser
"""The output parser to use."""
stop: Optional[List] = None
"""The stop list to use."""

def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
Expand All @@ -33,7 +40,7 @@ def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
"""Given input, asynchronously decide what to do."""
llm_response = await self.llm_chain.arun(
**inputs, stop=self.stop, callbacks=callbacks
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@


class PlanningOutputParser(PlanOutputParser):
"""Planning output parser."""

def parse(self, text: str) -> Plan:
steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]]
return Plan(steps=steps)
Expand All @@ -34,6 +36,7 @@ def load_chat_planner(
) -> LLMPlanner:
"""
Load a chat planner.
Args:
llm: Language model.
system_prompt: System prompt.
Expand Down
16 changes: 16 additions & 0 deletions libs/langchain/langchain/experimental/plan_and_execute/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,29 @@


class Step(BaseModel):
"""Step."""

value: str
"""The value."""


class Plan(BaseModel):
"""Plan."""

steps: List[Step]
"""The steps."""


class StepResponse(BaseModel):
"""Step response."""

response: str
"""The response."""


class BaseStepContainer(BaseModel):
"""Base step container."""

@abstractmethod
def add_step(self, step: Step, step_response: StepResponse) -> None:
"""Add step and step response to the container."""
Expand All @@ -29,7 +40,10 @@ def get_final_response(self) -> str:


class ListStepContainer(BaseStepContainer):
"""List step container."""

steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list)
"""The steps."""

def add_step(self, step: Step, step_response: StepResponse) -> None:
self.steps.append((step, step_response))
Expand All @@ -42,6 +56,8 @@ def get_final_response(self) -> str:


class PlanOutputParser(BaseOutputParser):
"""Plan output parser."""

@abstractmethod
def parse(self, text: str) -> Plan:
"""Parse into a plan."""
Loading

0 comments on commit c580c81

Please sign in to comment.