from langchain.chat_models import ChatOpenAI from openai import BadRequestError openai_llm = ChatOpenAI(model="gpt-4") try: print(openai_llm.invoke(f"次の文章を要約してください: {text}")) except BadRequestError as error: message = error.response.json()["error"]["message"] print("Hit error: ", message) Hit error: This model's maximum context length is 8192 tokens. However, your messages resulted in 36534 tokens