From 3cf5a7908ddcb70be216b6d2f2b2be60dcb7e0ce Mon Sep 17 00:00:00 2001 From: David Huntsperger <5672572+pcoet@users.noreply.github.com> Date: Sun, 10 Mar 2024 22:03:38 -0700 Subject: [PATCH] syncing changes to Keras + Gemma get started (#293) --- site/en/gemma/docs/get_started.ipynb | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/site/en/gemma/docs/get_started.ipynb b/site/en/gemma/docs/get_started.ipynb index e8e6e3306..0faa40e9e 100644 --- a/site/en/gemma/docs/get_started.ipynb +++ b/site/en/gemma/docs/get_started.ipynb @@ -234,8 +234,24 @@ "id": "XrAWvsU6pI0E" }, "source": [ - "`from_preset` instantiates the model from a preset architecture and weights. In the code above, the string `\"gemma_2b_en\"` specifies the preset architecture: a Gemma model with 2 billion parameters. (A Gemma model with 7 billion parameters is also available. To run the larger model in Colab, you need access to the premium GPUs available in paid plans. Alternatively, you can perform [distributed tuning on a Gemma 7B model](https://ai.google.dev/gemma/docs/distributed_tuning) on Kaggle or Google Cloud.)\n", - "\n", + "`from_preset` instantiates the model from a preset architecture and weights. In the code above, the string `\"gemma_2b_en\"` specifies the preset architecture: a Gemma model with 2 billion parameters.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ij73k0PfUhjE" + }, + "source": [ + "Note: A Gemma model with 7 billion parameters is also available. To run the larger model in Colab, you need access to the premium GPUs available in paid plans. Alternatively, you can perform [distributed tuning on a Gemma 7B model](https://ai.google.dev/gemma/docs/distributed_tuning) on Kaggle or Google Cloud." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E-cSEjULUhST" + }, + "source": [ "Use `summary` to get more info about the model:" ] }, @@ -380,7 +396,9 @@ "id": "81KHdRYOrWYm" }, "source": [ - "As you can see from the summary, the model has 2.5 billion trainable parameters." + "As you can see from the summary, the model has 2.5 billion trainable parameters.\n", + "\n", + "Note: For purposes of naming the model (\"2B\"), the embedding layer is not counted against the number of parameters." ] }, { @@ -561,11 +579,13 @@ "\n", "* Learn how to [finetune a Gemma model](https://ai.google.dev/gemma/docs/lora_tuning).\n", "* Learn how to perform [distributed fine-tuning and inference on a Gemma model](https://ai.google.dev/gemma/docs/distributed_tuning).\n", + "* Learn about [Gemma integration with Vertex AI](https://ai.google.dev/gemma/docs/integrations/vertex)\n", "* Learn how to [use Gemma models with Vertex AI](https://cloud.google.com/vertex-ai/docs/generative-ai/open-models/use-gemma)." ] } ], "metadata": { + "accelerator": "GPU", "colab": { "name": "get_started.ipynb", "toc_visible": true