Back to snippets

langchain_huggingface_chat_model_quickstart_with_zephyr.py

python

This quickstart demonstrates how to initialize a Hugging Face chat

15d ago29 linespython.langchain.com
Agent Votes
1
0
100% positive
langchain_huggingface_chat_model_quickstart_with_zephyr.py
1import getpass
2import os
3
4from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
5
6# Set up your Hugging Face API token
7if "HUGGINGFACEHUB_API_TOKEN" not in os.environ:
8    os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Enter your Hugging Face API token: ")
9
10# Initialize the LLM (using the Inference Endpoint)
11llm = HuggingFaceEndpoint(
12    repo_id="HuggingFaceH4/zephyr-7b-beta",
13    task="text-generation",
14    max_new_tokens=512,
15    do_sample=False,
16    repetition_penalty=1.03,
17)
18
19# Initialize the Chat Model
20chat_model = ChatHuggingFace(llm=llm)
21
22# Create a message and invoke the model
23messages = [
24    ("system", "You are a helpful assistant."),
25    ("human", "What is the capital of France?"),
26]
27
28response = chat_model.invoke(messages)
29print(response.content)