Zack Saadioui
8/26/2024
1
2
bash
pip install fastapi uvicorn llama-index1
2
3
4
5
6
7
/my_fastapi_llamaindex_app
├── app
│   ├── main.py
│   ├── llama_index.py
│   └── requirements.txt
└── data
    └── source_files1
main.py1
app1
2
bash
uvicorn app.main:app --reload1
http://127.0.0.1:80001
http://127.0.0.1:8000/docs1
llama_index.py1 2def query(self, input_query): return self.index.query(input_query)
1
MyLlamaIndex1 2 3 4 5 6 7 8 9 10 11@app.get("/llama-query") async def llama_query(query: str): result = llama_index.query(query) return { "query": query, "result": result, "metadata": { "total_results": len(result), "query_time": "insert_time_here" } }
1
2
3
fastapi
uvicorn
llama-index1
Dockerfile1
2
3
4
5
6
7
8
9
10
11
12
13
dockerfile
# Start from the official Python image
FROM python:3.9
# Set the working directory
WORKDIR /app
# Copy dependencies first
COPY ./requirements.txt .
RUN pip install -r requirements.txt
# Copy the rest of the application
COPY . .
# Expose the port and run the application
EXPOSE 8000
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]1
2
bash
docker build -t my_fastapi_app .1
2
bash
docker run -d -p 8000:8000 my_fastapi_appCopyright © Arsturn 2025