diff --git a/.cursorrules b/.cursorrules
new file mode 100644
index 0000000000000000000000000000000000000000..c40b1fb10d8687aba8468993ffe5149beae6c89a
--- /dev/null
+++ b/.cursorrules
@@ -0,0 +1 @@
+# Project Overview This project, named GPT-Researcher, LLM based autonomous agent that conducts local and web research on any topic and generates a comprehensive report with citations, is built using Next.js and TypeScript. It integrates various libraries for their strenghts. Your primary goal is to help with Next.js app router patterns, TypeScript type safety, Tailwind CSS best practices, code quality standards, and Python/FastAPI backend optimizations. # Key URLs - Project Home Page: https://gptr.dev/ - GitHub Repository: https://github.com/assafelovic/gpt-researcher - Documentation: https://docs.gptr.dev/ # Project Structure - Frontend user interface built with Next.js, TypeScript, and Tailwind CSS in `/frontend` - Static FastAPI version for lightweight deployments - Next.js version for production use with enhanced features - Multi-agent research system using LangChain and LangGraph in `/backend/multi_agents` - Browser, Editor, Researcher, Reviewer, Revisor, Writer, and Publisher agents - Task configuration and agent coordination - Document processing using Unstructured and PyMuPDF in `/backend/document_processing` - PDF, DOCX, and web content parsing - Text extraction and preprocessing - Report generation using LangChain and Jinja2 templates in `/backend/report_generation` - Template-based report structuring - Dynamic content formatting - Multiple output formats in `/backend/output_formats` - PDF via md2pdf - Markdown via mistune - DOCX via python-docx - Format conversion utilities - Export functionality - GPT Researcher core functionality in `/gpt_researcher` - Web scraping and content aggregation - Research planning and execution - Source validation and tracking - Query processing and response generation - Testing infrastructure in `/tests` - Unit tests for individual components - Integration tests for agent interactions - End-to-end research workflow tests - Mock data and fixtures for testing # Language Model Configuration - Default model: gpt-4-turbo - Alternative models: gpt-3.5-turbo, claude-3-opus - Temperature settings for different tasks - Context window management - Token limit handling - Cost optimization strategies # Error Handling - Research failure recovery - API rate limiting - Network timeout handling - Invalid input management - Source validation errors - Report generation failures # Performance - Parallel processing strategies - Caching mechanisms - Memory management - Response streaming - Resource allocation - Query optimization # Development Workflow - Branch naming conventions - Commit message format - PR review process - Testing requirements - Documentation updates - Version control guidelines # API Documentation - REST endpoints - WebSocket events - Request/Response formats - Authentication methods - Rate limits - Error codes # Monitoring - Performance metrics - Error tracking - Usage statistics - Cost monitoring - Research quality metrics - User feedback tracking # Frontend Components - Static FastAPI version for lightweight deployments - Next.js version for production use with enhanced features # Backend Components - Multi-agent system architecture - Document processing pipeline - Report generation system - Output format handlers # Core Research Components - Web scraping and aggregation - Research planning and execution - Source validation - Query processing # Testing - Unit tests - Integration tests - End-to-end tests - Performance testing # Rule Violation Monitoring - Alert developer when changes conflict with project structure - Warn about deviations from coding standards - Flag unauthorized framework or library additions - Monitor for security and performance anti-patterns - Track API usage patterns that may violate guidelines - Report TypeScript strict mode violations - Identify accessibility compliance issues # Development Guidelines - Use TypeScript with strict mode enabled - Follow ESLint and Prettier configurations - Ensure components are responsive and accessible - Use Tailwind CSS for styling, following the project's design system - Minimize AI-generated comments, prefer self-documenting code - Follow React best practices and hooks guidelines - Validate all user inputs and API responses - Use existing components as reference implementations # Important Scripts - `npm run dev`: Start development server - `npm run build`: Build for production - `npm run test`: Run test suite - `python -m pytest`: Run Python tests - `docker-compose up`: Start all services - `docker-compose run gpt-researcher-tests`: Run test suite in container - `python -m uvicorn backend.server.server:app --host=0.0.0.0 --port=8000`: Start FastAPI server - `python -m uvicorn backend.server.server:app --reload`: Start FastAPI server with auto-reload for development - `python main.py`: Run the main application directly # AI Integration Guidelines - Prioritize type safety in all AI interactions - Follow LangChain and LangGraph best practices - Implement proper error handling for AI responses - Maintain context window limits - Handle rate limiting and API quotas - Validate AI outputs before processing - Log AI interactions for debugging # Lexicon - **GPT Researcher**: Autonomous research agent system - **Multi-Agent System**: Coordinated AI agents for research tasks - **Research Pipeline**: End-to-end research workflow - **Agent Roles**: Browser, Editor, Researcher, Reviewer, Revisor, Writer, Publisher - **Source Validation**: Verification of research sources - **Report Generation**: Process of creating final research output # Additional Resources - [Next.js Documentation](https://nextjs.org/docs) - [TypeScript Handbook](https://www.typescriptlang.org/docs/) - [Tailwind CSS Documentation](https://tailwindcss.com/docs) - [LangChain Documentation](https://python.langchain.com/docs/) - [FastAPI Documentation](https://fastapi.tiangolo.com/) - [Project Documentation](https://docs.gptr.dev/) End all your comments with a :-) symbol.
\ No newline at end of file
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..e8f991d7a2f6a2b81cef8500f3f98b6b2c8323c9
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+.git
+output/
diff --git a/.env b/.env
new file mode 100644
index 0000000000000000000000000000000000000000..fff6fe6a7013ba439536710f5134e404123280a2
--- /dev/null
+++ b/.env
@@ -0,0 +1,8 @@
+GOOGLE_API_KEY=AIzaSyCISHY92IzU60M8Jf0qCWIRCyhGUAj_haU
+FAST_LLM="google_genai:gemini-1.5-flash"
+SMART_LLM="google_genai:gemini-1.5-pro"
+STRATEGIC_LLM="google_genai:gemini-1.5-pro"
+
+EMBEDDING="google_genai:models/text-embedding-004"
+
+TAVILY_API_KEY=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
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..3805f41a617a034db2ef4402c166ae0ef75a1f10 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,24 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+docs/blog/2023-09-22-gpt-researcher/architecture.png filter=lfs diff=lfs merge=lfs -text
+docs/blog/2023-09-22-gpt-researcher/planner.jpeg filter=lfs diff=lfs merge=lfs -text
+docs/blog/2024-05-19-gptr-langgraph/blog-langgraph.jpeg filter=lfs diff=lfs merge=lfs -text
+docs/blog/2024-09-7-hybrid-research/gptr-hybrid.png filter=lfs diff=lfs merge=lfs -text
+docs/docs/gpt-researcher/context/gptr-hybrid.png filter=lfs diff=lfs merge=lfs -text
+docs/static/img/architecture.png filter=lfs diff=lfs merge=lfs -text
+docs/static/img/leaderboard.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/academicResearchAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/businessAnalystAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/computerSecurityanalystAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/financeAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/mathAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/agents/travelAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/nextjs/public/img/gptr-logo.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/academicResearchAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/businessAnalystAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/computerSecurityanalystAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/financeAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/mathAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+frontend/static/travelAgentAvatar.png filter=lfs diff=lfs merge=lfs -text
+tests/docs/doc.pdf filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000000000000000000000000000000000..6867cf8d2f6c61215f32a329b4b28831ac65d94a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..72718d5aa63a292159351ae852c305fec1880a93
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9b9ce933c5319f03b1d0b1b91ab59ca2705787e9
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,15 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "pip" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "weekly"
+ - package-ecosystem: "docker"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9ac322d211ec437d1110ffddfe077b764c2f5f80
--- /dev/null
+++ b/.github/workflows/docker-build.yml
@@ -0,0 +1,45 @@
+name: GPTR tests
+run-name: ${{ github.actor }} ran the GPTR tests flow
+permissions:
+ contents: read
+ pull-requests: write
+on:
+ workflow_dispatch: # Add this line to enable manual triggering
+ # pull_request:
+ # types: [opened, synchronize]
+
+jobs:
+ docker:
+ runs-on: ubuntu-latest
+ environment: tests # Specify the environment to use for this job
+ env:
+ # Ensure these environment variables are set for the entire job
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
+ LANGCHAIN_API_KEY: ${{ secrets.LANGCHAIN_API_KEY }}
+ steps:
+ - name: Git checkout
+ uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ driver: docker
+
+ # - name: Build Docker images
+ # uses: docker/build-push-action@v4
+ # with:
+ # push: false
+ # tags: gptresearcher/gpt-researcher:latest
+ # file: Dockerfile
+
+ - name: Set up Docker Compose
+ run: |
+ sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+ sudo chmod +x /usr/local/bin/docker-compose
+ - name: Run tests with Docker Compose
+ run: |
+ docker-compose --profile test run --rm gpt-researcher-tests
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..3e12fc8bd05b4bbc840131282e175b845aa59787
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,53 @@
+#Ignore env containing secrets
+.env
+.venv
+.envrc
+
+#Ignore Virtual Env
+env/
+venv/
+.venv/
+
+# Other Environments
+ENV/
+env.bak/
+venv.bak/
+
+#Ignore generated outputs
+outputs/
+*.lock
+dist/
+gpt_researcher.egg-info/
+
+#Ignore my local docs
+my-docs/
+
+#Ignore pycache
+**/__pycache__/
+
+#Ignore mypy cache
+.mypy_cache/
+node_modules
+.idea
+.DS_Store
+.docusaurus
+build
+docs/build
+
+.vscode/launch.json
+.langgraph-data/
+.next/
+package-lock.json
+
+#Vim swp files
+*.swp
+
+# Log files
+logs/
+*.orig
+*.log
+server_log.txt
+
+#Cursor Rules
+.cursorrules
+CURSOR_RULES.md
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..a910a5ef95c183d29beac86da972d3d396af7658
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,123 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We, as members, contributors, and leaders, pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, sexual identity, or
+orientation.
+
+We commit to acting and interacting in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+- Demonstrating empathy and kindness toward others
+- Being respectful of differing opinions, viewpoints, and experiences
+- Giving and gracefully accepting constructive feedback
+- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
+- Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+- The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+- Trolling, insulting or derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or email address, without their explicit permission
+- Other conduct that could reasonably be considered inappropriate in a professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior deemed inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that do not
+align with this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies to all community spaces and also applies when
+an individual is officially representing the community in public spaces.
+Examples include using an official email address, posting via an official
+social media account, or acting as an appointed representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+[Assaf.elovic@gmail.com](mailto:Assaf.elovic@gmail.com).
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period. This includes
+avoiding interactions in community spaces and external channels like social media.
+Violating these terms may lead to a temporary or permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any interaction or public
+communication with the community for a specified period. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of groups of individuals.
+
+**Consequence**: A permanent ban from any public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..f63123e38edf69bc5773bdab226fe4710a6cfb37
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,42 @@
+# Contributing to GPT Researcher
+
+First off, we'd like to welcome you and thank you for your interest and effort in contributing to our open-source project ❤️. Contributions of all forms are welcome—from new features and bug fixes to documentation and more.
+
+We are on a mission to build the #1 AI agent for comprehensive, unbiased, and factual research online, and we need your support to achieve this grand vision.
+
+Please take a moment to review this document to make the contribution process easy and effective for everyone involved.
+
+## Reporting Issues
+
+If you come across any issue or have an idea for an improvement, don't hesitate to create an issue on GitHub. Describe your problem in sufficient detail, providing as much relevant information as possible. This way, we can reproduce the issue before attempting to fix it or respond appropriately.
+
+## Contributing Code
+
+1. **Fork the repository and create your branch from `master`.**
+ If it’s not an urgent bug fix, branch from `master` and work on the feature or fix there.
+
+2. **Make your changes.**
+ Implement your changes following best practices for coding in the project's language.
+
+3. **Test your changes.**
+ Ensure that your changes pass all tests if any exist. If the project doesn’t have automated tests, test your changes manually to confirm they behave as expected.
+
+4. **Follow the coding style.**
+ Ensure your code adheres to the coding conventions used throughout the project, including indentation, accurate comments, etc.
+
+5. **Commit your changes.**
+ Make your Git commits informative and concise. This is very helpful for others when they look at the Git log.
+
+6. **Push to your fork and submit a pull request.**
+ When your work is ready and passes tests, push your branch to your fork of the repository and submit a pull request from there.
+
+7. **Pat yourself on the back and wait for review.**
+ Your work is done, congratulations! Now sit tight. The project maintainers will review your submission as soon as possible. They might suggest changes or ask for improvements. Both constructive conversation and patience are key to the collaboration process.
+
+## Documentation
+
+If you would like to contribute to the project's documentation, please follow the same steps: fork the repository, make your changes, test them, and submit a pull request.
+
+Documentation is a vital part of any software. It's not just about having good code; ensuring that users and contributors understand what's going on, how to use the software, or how to contribute is crucial.
+
+We're grateful for all our contributors, and we look forward to building the world's leading AI research agent hand-in-hand with you. Let's harness the power of open source and AI to change the world together!
diff --git a/CURSOR_RULES.md b/CURSOR_RULES.md
new file mode 100644
index 0000000000000000000000000000000000000000..673567484a18c82f23e2c6787eeb493fa0913fd3
--- /dev/null
+++ b/CURSOR_RULES.md
@@ -0,0 +1,181 @@
+> **Note**: This is a readable copy of the `.cursorrules` file maintained for legibility. The actual rules are implemented from the `.cursorrules` file in the root directory.
+
+# GPT-Researcher Cursor Rules
+
+## Project Overview
+This project, named GPT-Researcher, is an LLM-based autonomous agent that conducts local and web research on any topic and generates a comprehensive report with citations. It is built using Next.js and TypeScript, integrating various libraries for their strengths.
+
+Your primary goal is to help with:
+- Next.js app router patterns
+- TypeScript type safety
+- Tailwind CSS best practices
+- Code quality standards
+- Python/FastAPI backend optimizations
+
+## Key URLs
+- Project Home Page: https://gptr.dev/
+- GitHub Repository: https://github.com/assafelovic/gpt-researcher
+- Documentation: https://docs.gptr.dev/
+
+## Project Structure
+- Frontend user interface built with Next.js, TypeScript, and Tailwind CSS in `/frontend`
+ - Static FastAPI version for lightweight deployments
+ - Next.js version for production use with enhanced features
+
+- Multi-agent research system using LangChain and LangGraph in `/backend/multi_agents`
+ - Browser, Editor, Researcher, Reviewer, Revisor, Writer, and Publisher agents
+ - Task configuration and agent coordination
+
+- Document processing using Unstructured and PyMuPDF in `/backend/document_processing`
+ - PDF, DOCX, and web content parsing
+ - Text extraction and preprocessing
+
+- Report generation using LangChain and Jinja2 templates in `/backend/report_generation`
+ - Template-based report structuring
+ - Dynamic content formatting
+
+- Multiple output formats in `/backend/output_formats`
+ - PDF via md2pdf
+ - Markdown via mistune
+ - DOCX via python-docx
+ - Format conversion utilities
+ - Export functionality
+
+- GPT Researcher core functionality in `/gpt_researcher`
+ - Web scraping and content aggregation
+ - Research planning and execution
+ - Source validation and tracking
+ - Query processing and response generation
+
+- Testing infrastructure in `/tests`
+ - Unit tests for individual components
+ - Integration tests for agent interactions
+ - End-to-end research workflow tests
+ - Mock data and fixtures for testing
+
+## Language Model Configuration
+- Default model: gpt-4-turbo
+- Alternative models: gpt-3.5-turbo, claude-3-opus
+- Temperature settings for different tasks
+- Context window management
+- Token limit handling
+- Cost optimization strategies
+
+## Error Handling
+- Research failure recovery
+- API rate limiting
+- Network timeout handling
+- Invalid input management
+- Source validation errors
+- Report generation failures
+
+## Performance
+- Parallel processing strategies
+- Caching mechanisms
+- Memory management
+- Response streaming
+- Resource allocation
+- Query optimization
+
+## Development Workflow
+- Branch naming conventions
+- Commit message format
+- PR review process
+- Testing requirements
+- Documentation updates
+- Version control guidelines
+
+## API Documentation
+- REST endpoints
+- WebSocket events
+- Request/Response formats
+- Authentication methods
+- Rate limits
+- Error codes
+
+## Monitoring
+- Performance metrics
+- Error tracking
+- Usage statistics
+- Cost monitoring
+- Research quality metrics
+- User feedback tracking
+
+## Frontend Components
+- Static FastAPI version for lightweight deployments
+- Next.js version for production use with enhanced features
+
+## Backend Components
+- Multi-agent system architecture
+- Document processing pipeline
+- Report generation system
+- Output format handlers
+
+## Core Research Components
+- Web scraping and aggregation
+- Research planning and execution
+- Source validation
+- Query processing
+
+## Testing
+- Unit tests
+- Integration tests
+- End-to-end tests
+- Performance testing
+
+## Rule Violation Monitoring
+- Alert developer when changes conflict with project structure
+- Warn about deviations from coding standards
+- Flag unauthorized framework or library additions
+- Monitor for security and performance anti-patterns
+- Track API usage patterns that may violate guidelines
+- Report TypeScript strict mode violations
+- Identify accessibility compliance issues
+
+## Development Guidelines
+- Use TypeScript with strict mode enabled
+- Follow ESLint and Prettier configurations
+- Ensure components are responsive and accessible
+- Use Tailwind CSS for styling, following the project's design system
+- Minimize AI-generated comments, prefer self-documenting code
+- Follow React best practices and hooks guidelines
+- Validate all user inputs and API responses
+- Use existing components as reference implementations
+
+## Important Scripts
+- `npm run dev`: Start development server
+- `npm run build`: Build for production
+- `npm run test`: Run test suite
+- `python -m pytest`: Run Python tests
+- `python -m uvicorn backend.server.server:app --host=0.0.0.0 --port=8000`: Start FastAPI server
+- `python -m uvicorn backend.server.server:app --reload`: Start FastAPI server with auto-reload for development
+- `python main.py`: Run the main application directly
+- `docker-compose up`: Start all services
+- `docker-compose run gpt-researcher-tests`: Run test suite in container
+
+## AI Integration Guidelines
+- Prioritize type safety in all AI interactions
+- Follow LangChain and LangGraph best practices
+- Implement proper error handling for AI responses
+- Maintain context window limits
+- Handle rate limiting and API quotas
+- Validate AI outputs before processing
+- Log AI interactions for debugging
+
+## Lexicon
+- **GPT Researcher**: Autonomous research agent system
+- **Multi-Agent System**: Coordinated AI agents for research tasks
+- **Research Pipeline**: End-to-end research workflow
+- **Agent Roles**: Browser, Editor, Researcher, Reviewer, Revisor, Writer, Publisher
+- **Source Validation**: Verification of research sources
+- **Report Generation**: Process of creating final research output
+
+## Additional Resources
+- [Next.js Documentation](https://nextjs.org/docs)
+- [TypeScript Handbook](https://www.typescriptlang.org/docs/)
+- [Tailwind CSS Documentation](https://tailwindcss.com/docs)
+- [LangChain Documentation](https://python.langchain.com/docs/)
+- [FastAPI Documentation](https://fastapi.tiangolo.com/)
+- [Project Documentation](https://docs.gptr.dev/)
+
+_Note: End all your comments with a :-) symbol._
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..637b185027a00fc2ed8bef793f05b531cf1da872
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,46 @@
+# Stage 1: Browser and build tools installation
+FROM python:3.11.4-slim-bullseye AS install-browser
+
+# Install Chromium, Chromedriver, Firefox, Geckodriver, and build tools in one layer
+RUN apt-get update && \
+ apt-get satisfy -y "chromium, chromium-driver (>= 115.0)" && \
+ apt-get install -y --no-install-recommends firefox-esr wget build-essential && \
+ wget https://github.com/mozilla/geckodriver/releases/download/v0.33.0/geckodriver-v0.33.0-linux64.tar.gz && \
+ tar -xvzf geckodriver-v0.33.0-linux64.tar.gz && \
+ chmod +x geckodriver && \
+ mv geckodriver /usr/local/bin/ && \
+ rm geckodriver-v0.33.0-linux64.tar.gz && \
+ chromium --version && chromedriver --version && \
+ rm -rf /var/lib/apt/lists/* # Clean up apt lists to reduce image size
+
+# Stage 2: Python dependencies installation
+FROM install-browser AS gpt-researcher-install
+
+ENV PIP_ROOT_USER_ACTION=ignore
+WORKDIR /usr/src/app
+
+# Copy and install Python dependencies in a single layer to optimize cache usage
+COPY ./requirements.txt ./requirements.txt
+COPY ./multi_agents/requirements.txt ./multi_agents/requirements.txt
+
+RUN pip install --no-cache-dir -r requirements.txt && \
+ pip install --no-cache-dir -r multi_agents/requirements.txt
+
+# Stage 3: Final stage with non-root user and app
+FROM gpt-researcher-install AS gpt-researcher
+
+# Create a non-root user for security
+RUN useradd -ms /bin/bash gpt-researcher && \
+ chown -R gpt-researcher:gpt-researcher /usr/src/app
+
+USER gpt-researcher
+WORKDIR /usr/src/app
+
+# Copy the rest of the application files with proper ownership
+COPY --chown=gpt-researcher:gpt-researcher ./ ./
+
+# Expose the application's port
+EXPOSE 8000
+
+# Define the default command to run the application
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..29f81d812f3e768fa89638d1f72920dbfd1413a8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Procfile b/Procfile
new file mode 100644
index 0000000000000000000000000000000000000000..b0c6683b009a4a144de9af5bcfd4b280789be431
--- /dev/null
+++ b/Procfile
@@ -0,0 +1 @@
+web: python -m uvicorn backend.server.server:app --host=0.0.0.0 --port=${PORT}
\ No newline at end of file
diff --git a/README-ja_JP.md b/README-ja_JP.md
new file mode 100644
index 0000000000000000000000000000000000000000..e806861e0d5941f01176b8ff47110e16bfb6d3ef
--- /dev/null
+++ b/README-ja_JP.md
@@ -0,0 +1,159 @@
+
+
+
+
+
+####
+
+[](https://gptr.dev)
+[](https://docs.gptr.dev)
+[](https://discord.gg/QgZXvJAccX)
+
+[](https://badge.fury.io/py/gpt-researcher)
+
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+[](https://hub.docker.com/r/gptresearcher/gpt-researcher)
+[](https://twitter.com/assaf_elovic)
+
+[English](README.md) |
+[中文](README-zh_CN.md) |
+[日本語](README-ja_JP.md) |
+[한국어](README-ko_KR.md)
+
+
+# 🔎 GPT Researcher
+
+**GPT Researcher は、さまざまなタスクに対する包括的なオンラインリサーチのために設計された自律エージェントです。**
+
+このエージェントは、詳細で事実に基づいた偏りのない研究レポートを生成することができ、関連するリソース、アウトライン、およびレッスンに焦点を当てるためのカスタマイズオプションを提供します。最近の [Plan-and-Solve](https://arxiv.org/abs/2305.04091) および [RAG](https://arxiv.org/abs/2005.11401) 論文に触発され、GPT Researcher は速度、決定論、および信頼性の問題に対処し、同期操作ではなく並列化されたエージェント作業を通じてより安定したパフォーマンスと高速化を提供します。
+
+**私たちの使命は、AIの力を活用して、個人や組織に正確で偏りのない事実に基づいた情報を提供することです。**
+
+## なぜGPT Researcherなのか?
+
+- 手動の研究タスクで客観的な結論を形成するには時間がかかることがあり、適切なリソースと情報を見つけるのに数週間かかることもあります。
+- 現在のLLMは過去の情報に基づいて訓練されており、幻覚のリスクが高く、研究タスクにはほとんど役に立ちません。
+- 現在のLLMは短いトークン出力に制限されており、長く詳細な研究レポート(2,000語以上)には不十分です。
+- Web検索を可能にするサービス(ChatGPT + Webプラグインなど)は、限られたリソースとコンテンツのみを考慮し、場合によっては表面的で偏った回答をもたらします。
+- Webソースの選択のみを使用すると、研究タスクの正しい結論を導く際にバイアスが生じる可能性があります。
+
+## アーキテクチャ
+主なアイデアは、「プランナー」と「実行」エージェントを実行することであり、プランナーは研究する質問を生成し、実行エージェントは生成された各研究質問に基づいて最も関連性の高い情報を探します。最後に、プランナーはすべての関連情報をフィルタリングおよび集約し、研究レポートを作成します。
+エージェントは、研究タスクを完了するために gpt-4o-mini と gpt-4o(128K コンテキスト)の両方を活用します。必要に応じてそれぞれを使用することでコストを最適化します。**平均的な研究タスクは完了するのに約3分かかり、コストは約0.1ドルです**。
+
+
+
+
+
+
+詳細説明:
+* 研究クエリまたはタスクに基づいて特定のドメインエージェントを作成します。
+* 研究タスクに対する客観的な意見を形成する一連の研究質問を生成します。
+* 各研究質問に対して、与えられたタスクに関連する情報をオンラインリソースから収集するクローラーエージェントをトリガーします。
+* 各収集されたリソースについて、関連情報に基づいて要約し、そのソースを追跡します。
+* 最後に、すべての要約されたソースをフィルタリングおよび集約し、最終的な研究レポートを生成します。
+
+## デモ
+https://github.com/assafelovic/gpt-researcher/assets/13554167/a00c89a6-a295-4dd0-b58d-098a31c40fda
+
+## チュートリアル
+ - [動作原理](https://docs.gptr.dev/blog/building-gpt-researcher)
+ - [インストール方法](https://www.loom.com/share/04ebffb6ed2a4520a27c3e3addcdde20?sid=da1848e8-b1f1-42d1-93c3-5b0b9c3b24ea)
+ - [ライブデモ](https://www.loom.com/share/6a3385db4e8747a1913dd85a7834846f?sid=a740fd5b-2aa3-457e-8fb7-86976f59f9b8)
+
+## 特徴
+- 📝 研究、アウトライン、リソース、レッスンレポートを生成
+- 🌐 各研究で20以上のWebソースを集約し、客観的で事実に基づいた結論を形成
+- 🖥️ 使いやすいWebインターフェース(HTML/CSS/JS)を含む
+- 🔍 JavaScriptサポート付きのWebソースをスクレイピング
+- 📂 訪問および使用されたWebソースのコンテキストを追跡
+- 📄 研究レポートをPDF、Wordなどにエクスポート
+
+## 📖 ドキュメント
+
+完全なドキュメントについては、[こちら](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started)を参照してください:
+
+- 入門(インストール、環境設定、簡単な例)
+- 操作例(デモ、統合、dockerサポート)
+- 参考資料(API完全ドキュメント)
+- Tavilyアプリケーションインターフェースの統合(コア概念の高度な説明)
+
+## クイックスタート
+> **ステップ 0** - Python 3.11 以降をインストールします。[こちら](https://www.tutorialsteacher.com/python/install-python)を参照して、ステップバイステップのガイドを確認してください。
+
+
+
+> **ステップ 1** - プロジェクトをダウンロードします
+
+```bash
+$ git clone https://github.com/assafelovic/gpt-researcher.git
+$ cd gpt-researcher
+```
+
+
+
+> **ステップ2** - 依存関係をインストールします
+```bash
+$ pip install -r requirements.txt
+```
+
+
+> **ステップ 3** - OpenAI キーと Tavily API キーを使用して .env ファイルを作成するか、直接エクスポートします
+
+```bash
+$ export OPENAI_API_KEY={Your OpenAI API Key here}
+```
+```bash
+$ export TAVILY_API_KEY={Your Tavily API Key here}
+```
+
+- **LLMには、[OpenAI GPT](https://platform.openai.com/docs/guides/gpt) を使用することをお勧めします**が、[Langchain Adapter](https://python.langchain.com/docs/guides/adapters/openai) がサポートする他の LLM モデル(オープンソースを含む)を使用することもできます。llm モデルとプロバイダーを config/config.py で変更するだけです。[このガイド](https://python.langchain.com/docs/integrations/llms/) に従って、LLM を Langchain と統合する方法を学んでください。
+- **検索エンジンには、[Tavily Search API](https://app.tavily.com)(LLM 用に最適化されています)を使用することをお勧めします**が、他の検索エンジンを選択することもできます。config/config.py で検索プロバイダーを「duckduckgo」、「googleAPI」、「googleSerp」、「searchapi」、「searx」に変更するだけです。次に、config.py ファイルに対応する env API キーを追加します。
+- **最適なパフォーマンスを得るために、[OpenAI GPT](https://platform.openai.com/docs/guides/gpt) モデルと [Tavily Search API](https://app.tavily.com) を使用することを強くお勧めします。**
+
+
+> **ステップ 4** - FastAPI を使用してエージェントを実行します
+
+```bash
+$ uvicorn main:app --reload
+```
+
+
+> **ステップ 5** - 任意のブラウザで http://localhost:8000 にアクセスして、リサーチを楽しんでください!
+
+Docker の使い方や機能とサービスの詳細については、[ドキュメント](https://docs.gptr.dev) ページをご覧ください。
+
+## 🚀 貢献
+私たちは貢献を大歓迎します!興味がある場合は、[貢献](CONTRIBUTING.md) をご覧ください。
+
+私たちの[ロードマップ](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap) ページを確認し、私たちの使命に参加することに興味がある場合は、[Discord コミュニティ](https://discord.gg/QgZXvJAccX) を通じてお問い合わせください。
+
+## ✉️ サポート / お問い合わせ
+- [コミュニティディスカッション](https://discord.gg/spBgZmm3Xe)
+- 私たちのメール: support@tavily.com
+
+## 🛡 免責事項
+
+このプロジェクト「GPT Researcher」は実験的なアプリケーションであり、明示または黙示のいかなる保証もなく「現状のまま」提供されます。私たちは学術目的のためにMITライセンスの下でコードを共有しています。ここに記載されている内容は学術的なアドバイスではなく、学術論文や研究論文での使用を推奨するものではありません。
+
+私たちの客観的な研究主張に対する見解:
+1. 私たちのスクレイピングシステムの主な目的は、不正確な事実を減らすことです。どうやって解決するのか?私たちがスクレイピングするサイトが多ければ多いほど、誤ったデータの可能性は低くなります。各研究で20の情報を収集し、それらがすべて間違っている可能性は非常に低いです。
+2. 私たちの目標はバイアスを排除することではなく、可能な限りバイアスを減らすことです。**私たちはここでコミュニティとして最も効果的な人間と機械の相互作用を探求しています**。
+3. 研究プロセスでは、人々も自分が研究しているトピックに対してすでに意見を持っているため、バイアスがかかりやすいです。このツールは多くの意見を収集し、偏った人が決して読まないであろう多様な見解を均等に説明します。
+
+**GPT-4 言語モデルの使用は、トークンの使用により高額な費用がかかる可能性があることに注意してください**。このプロジェクトを利用することで、トークンの使用状況と関連する費用を監視および管理する責任があることを認めたことになります。OpenAI API の使用状況を定期的に確認し、予期しない料金が発生しないように必要な制限やアラートを設定することを強くお勧めします。
+
+---
+
+
+
+
+
+
+
+
+
+
diff --git a/README-ko_KR.md b/README-ko_KR.md
new file mode 100644
index 0000000000000000000000000000000000000000..e8adfc52669dd4154da1dce4c686c28ec6fff619
--- /dev/null
+++ b/README-ko_KR.md
@@ -0,0 +1,242 @@
+
+
+
+
+
+####
+
+[](https://gptr.dev)
+[](https://docs.gptr.dev)
+[](https://discord.gg/QgZXvJAccX)
+
+[](https://badge.fury.io/py/gpt-researcher)
+
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+[](https://hub.docker.com/r/gptresearcher/gpt-researcher)
+[](https://twitter.com/assaf_elovic)
+
+[English](README.md) |
+[中文](README-zh_CN.md) |
+[日本語](README-ja_JP.md) |
+[한국어](README-ko_KR.md)
+
+
+# 🔎 GPT Researcher
+
+**GPT Researcher는 다양한 작업을 대해 포괄적인 온라인 연구를 수행하도록 설계된 자율 에이전트입니다.**
+
+이 에이전트는 세부적이고 사실에 기반하며 편견 없는 연구 보고서를 생성할 수 있으며, 관련 리소스와 개요에 초점을 맞춘 맞춤형 옵션을 제공합니다. 최근 발표된 [Plan-and-Solve](https://arxiv.org/abs/2305.04091) 및 [RAG](https://arxiv.org/abs/2005.11401) 논문에서 영감을 받아 GPT Researcher는 잘못된 정보, 속도, 결정론적 접근 방식, 신뢰성 문제를 해결하고, 동기화 작업이 아닌 병렬 에이전트 작업을 통해 더 안정적이고 빠른 성능을 제공합니다.
+
+**우리의 목표는 AI의 힘을 활용하여 개인과 조직에게 정확하고 편향 없는 사실에 기반한 정보를 제공하는 것입니다.**
+
+## 왜 GPT Researcher인가?
+
+- 직접 수행하는 연구 과정은 객관적인 결론을 도출하는 데 시간이 오래 걸리며, 적절한 리소스와 정보를 찾는 데 몇 주가 걸릴 수 있습니다.
+- 현재의 대규모 언어 모델(LLM)은 과거 정보에 기반해 훈련되었으며, 환각 현상이 발생할 위험이 높아 연구 작업에는 적합하지 않습니다.
+- 현재 LLM은 짧은 토큰 출력으로 제한되며, 2,000단어 이상의 길고 자세한 연구 보고서를 작성하는 데는 충분하지 않습니다.
+- 웹 검색을 지원하는 서비스(예: ChatGPT 또는 Perplexity)는 제한된 리소스와 콘텐츠만을 고려하여 경우에 따라 피상적이고 편향된 답변을 제공합니다.
+- 웹 소스만을 사용하면 연구 작업에서 올바른 결론을 도출할 때 편향이 발생할 수 있습니다.
+
+## 데모
+https://github.com/user-attachments/assets/092e9e71-7e27-475d-8c4f-9dddd28934a3
+
+## 아키텍처
+주요 아이디어는 "플래너"와 "실행" 에이전트를 실행하는 것으로, 플래너는 연구할 질문을 생성하고, 실행 에이전트는 생성된 각 연구 질문에 따라 가장 관련성 높은 정보를 찾습니다. 마지막으로 플래너는 모든 관련 정보를 필터링하고 집계하여 연구 보고서를 작성합니다.
+
+에이전트는 `gpt-4o-mini`와 `gpt-4o`(128K 컨텍스트)를 활용하여 연구 작업을 완료합니다. 필요에 따라 각각을 사용하여 비용을 최적화합니다. **평균 연구 작업은 약 2분이 소요되며, 비용은 약 $0.005입니다.**.
+
+
+
+
+
+구체적으로:
+* 연구 쿼리 또는 작업을 기반으로 도메인별 에이전트를 생성합니다.
+* 주어진 작업에 대해 객관적인 의견을 형성할 수 있는 일련의 연구 질문을 생성합니다.
+* 각 연구 질문에 대해 크롤러 에이전트를 실행하여 작업과 관련된 정보를 온라인 리소스에서 수집합니다.
+* 수집된 각 리소스에서 관련 정보를 요약하고 출처를 기록합니다.
+* 마지막으로, 요약된 모든 정보를 필터링하고 집계하여 최종 연구 보고서를 생성합니다.
+
+## 튜토리얼
+ - [동작원리](https://docs.gptr.dev/blog/building-gpt-researcher)
+ - [설치방법](https://www.loom.com/share/04ebffb6ed2a4520a27c3e3addcdde20?sid=da1848e8-b1f1-42d1-93c3-5b0b9c3b24ea)
+ - [라이브 데모](https://www.loom.com/share/6a3385db4e8747a1913dd85a7834846f?sid=a740fd5b-2aa3-457e-8fb7-86976f59f9b8)
+
+
+## 기능
+- 📝 로컬 문서 및 웹 소스를 사용하여 연구, 개요, 리소스 및 학습 보고서 생성
+- 📜 2,000단어 이상의 길고 상세한 연구 보고서 생성 가능
+- 🌐 연구당 20개 이상의 웹 소스를 집계하여 객관적이고 사실에 기반한 결론 도출
+- 🖥️ 경량 HTML/CSS/JS와 프로덕션용 (NextJS + Tailwind) UX/UI 포함
+- 🔍 자바스크립트 지원 웹 소스 스크래핑 기능
+- 📂 연구 과정에서 맥락과 메모리 추적 및 유지
+- 📄 연구 보고서를 PDF, Word 등으로 내보내기 지원
+
+## 📖 문서
+
+전체 문서(설치, 환경 설정, 간단한 예시)를 보려면 [여기](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started)를 참조하세요.
+
+- 시작하기 (설치, 환경 설정, 간단한 예시)
+- 맞춤 설정 및 구성
+- 사용 방법 예시 (데모, 통합, 도커 지원)
+- 참고자료 (전체 API 문서)
+
+## ⚙️ 시작하기
+### 설치
+> **1단계** - Python 3.11 또는 그 이상의 버전을 설치하세요. [여기](https://www.tutorialsteacher.com/python/install-python)를 참조하여 단계별 가이드를 확인하세요.
+
+> **2단계** - 프로젝트를 다운로드하고 해당 디렉토리로 이동하세요.
+
+```bash
+git clone https://github.com/assafelovic/gpt-researcher.git
+cd gpt-researcher
+```
+
+> **3단계** - 두 가지 방법으로 API 키를 설정하세요: 직접 export하거나 `.env` 파일에 저장하세요.
+
+Linux/Windows에서 임시 설정을 하려면 export 방법을 사용하세요:
+
+```bash
+export OPENAI_API_KEY={OpenAI API 키 입력}
+export TAVILY_API_KEY={Tavily API 키 입력}
+```
+
+더 영구적인 설정을 원한다면, 현재의 `gpt-researcher` 디렉토리에 `.env` 파일을 생성하고 환경 변수를 입력하세요 (export 없이).
+
+- 기본 LLM은 [GPT](https://platform.openai.com/docs/guides/gpt)이지만, `claude`, `ollama3`, `gemini`, `mistral` 등 다른 LLM도 사용할 수 있습니다. LLM 제공자를 변경하는 방법은 [LLMs 문서](https://docs.gptr.dev/docs/gpt-researcher/llms/llms)를 참조하세요. 이 프로젝트는 OpenAI GPT 모델에 최적화되어 있습니다.
+- 기본 검색기는 [Tavily](https://app.tavily.com)이지만, `duckduckgo`, `google`, `bing`, `searchapi`, `serper`, `searx`, `arxiv`, `exa` 등의 검색기를 사용할 수 있습니다. 검색 제공자를 변경하는 방법은 [검색기 문서](https://docs.gptr.dev/docs/gpt-researcher/retrievers)를 참조하세요.
+
+### 빠른 시작
+
+> **1단계** - 필요한 종속성 설치
+
+```bash
+pip install -r requirements.txt
+```
+
+> **2단계** - FastAPI로 에이전트 실행
+
+```bash
+python -m uvicorn main:app --reload
+```
+
+> **3단계** - 브라우저에서 http://localhost:8000 으로 이동하여 연구를 시작하세요!
+
+
+
+**[Poetry](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started#poetry) 또는 [가상 환경](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started#virtual-environment)에 대해 배우고 싶다면, [문서](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started)를 참조하세요.**
+
+### PIP 패키지로 실행하기
+```bash
+pip install gpt-researcher
+```
+
+```python
+...
+from gpt_researcher import GPTResearcher
+
+query = "왜 Nvidia 주식이 오르고 있나요?"
+researcher = GPTResearcher(query=query, report_type="research_report")
+# 주어진 질문에 대한 연구 수행
+research_result = await researcher.conduct_research()
+# 보고서 작성
+report = await researcher.write_report()
+...
+```
+
+**더 많은 예제와 구성 옵션은 [PIP 문서](https://docs.gptr.dev/docs/gpt-researcher/gptr/pip-package)를 참조하세요.**
+
+## Docker로 실행
+
+> **1단계** - [Docker 설치](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started-with-docker)
+
+> **2단계** - `.env.example` 파일을 복사하고 API 키를 추가한 후, 파일을 `.env`로 저장하세요.
+
+> **3단계** - docker-compose 파일에서 실행하고 싶지 않은 서비스를 주석 처리하세요.
+
+```bash
+$ docker-compose up --build
+```
+
+> **4단계** - docker-compose 파일에서 아무 것도 주석 처리하지 않았다면, 기본적으로 두 가지 프로세스가 시작됩니다:
+ - localhost:8000에서 실행 중인 Python 서버
+ - localhost:3000에서 실행 중인 React 앱
+
+브라우저에서 localhost:3000으로 이동하여 연구를 시작하세요!
+
+## 📄 로컬 문서로 연구하기
+
+GPT Researcher를 사용하여 로컬 문서를 기반으로 연구 작업을 수행할 수 있습니다. 현재 지원되는 파일 형식은 PDF, 일반 텍스트, CSV, Excel, Markdown, PowerPoint, Word 문서입니다.
+
+1단계: `DOC_PATH` 환경 변수를 설정하여 문서가 있는 폴더를 지정하세요.
+
+```bash
+export DOC_PATH="./my-docs"
+```
+
+2단계:
+ - 프론트엔드 앱을 localhost:8000에서 실행 중이라면, "Report Source" 드롭다운 옵션에서 "My Documents"를 선택하세요.
+ - GPT Researcher를 [PIP 패키지](https://docs.tavily.com/docs/gpt-researcher/pip-package)로 실행 중이라면, `report_source` 인수를 "local"로 설정하여 `GPTResearcher` 클래스를 인스턴스화하세요. [코드 예제](https://docs.gptr.dev/docs/gpt-researcher/context/tailored-research)를 참조하세요.
+
+## 👪 다중 에이전트 어시스턴트
+
+AI가 프롬프트 엔지니어링 및 RAG에서 다중 에이전트 시스템으로 발전함에 따라, 우리는 [LangGraph](https://python.langchain.com/v0.1/docs/langgraph/)로 구축된 새로운 다중 에이전트 어시스턴트를 소개합니다.
+
+LangGraph를 사용하면 여러 에이전트의 전문 기술을 활용하여 연구 과정의 깊이와 질을 크게 향상시킬 수 있습니다. 최근 [STORM](https://arxiv.org/abs/2402.14207) 논문에서 영감을 받아, 이 프로젝트는 AI 에이전트 팀이 주제에 대한 연구를 계획에서 출판까지 함께 수행하는 방법을 보여줍니다.
+
+평균 실행은 5-6 페이지 분량의 연구 보고서를 PDF, Docx, Markdown 형식으로 생성합니다.
+
+[여기](https://github.com/assafelovic/gpt-researcher/tree/master/multi_agents)에서 확인하거나 [문서](https://docs.gptr.dev/docs/gpt-researcher/multi_agents/langgraph)에서 자세한 내용을 참조하세요.
+
+## 🖥️ 프론트엔드 애플리케이션
+
+GPT-Researcher는 사용자 경험을 개선하고 연구 프로세스를 간소화하기 위해 향상된 프론트엔드를 제공합니다. 프론트엔드는 다음과 같은 기능을 제공합니다:
+
+- 연구 쿼리를 입력할 수 있는 직관적인 인터페이스
+- 연구 작업의 실시간 진행 상황 추적
+- 연구 결과의 대화형 디스플레이
+- 맞춤형 연구 경험을 위한 설정 가능
+
+두 가지 배포 옵션이 있습니다:
+1. FastAPI로 제공되는 경량 정적 프론트엔드
+2. 고급 기능을 제공하는 NextJS 애플리케이션
+
+프론트엔드 기능에 대한 자세한 설치 방법 및 정보를 원하시면 [문서 페이지](https://docs.gptr.dev/docs/gpt-researcher/frontend/frontend)를 참조하세요.
+
+## 🚀 기여하기
+우리는 기여를 적극 환영합니다! 관심이 있다면 [기여 가이드](https://github.com/assafelovic/gpt-researcher/blob/master/CONTRIBUTING.md)를 확인해 주세요.
+
+[로드맵](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap) 페이지를 확인하고, 우리 [Discord 커뮤니티](https://discord.gg/QgZXvJAccX)에 가입하여 우리의 목표에 함께 참여해 주세요.
+
+
+
+
+## ✉️ 지원 / 문의
+- [커뮤니티 Discord](https://discord.gg/spBgZmm3Xe)
+- 저자 이메일: assaf.elovic@gmail.com
+
+## 🛡️ 면책 조항
+
+이 프로젝트인 GPT Researcher는 실험적인 응용 프로그램이며, 명시적이거나 묵시적인 보증 없이 "있는 그대로" 제공됩니다. 우리는 이 코드를 학술적 목적으로 Apache 2 라이선스 하에 공유하고 있습니다. 여기에 있는 것은 학술적 조언이 아니며, 학술 또는 연구 논문에 사용하는 것을 권장하지 않습니다.
+
+편향되지 않은 연구 주장에 대한 우리의 견해:
+1. GPT Researcher의 주요 목표는 잘못된 정보와 편향된 사실을 줄이는 것입니다. 그 방법은 무엇일까요? 우리는 더 많은 사이트를 스크래핑할수록 잘못된 데이터의 가능성이 줄어든다고 가정합니다. 여러 사이트에서 정보를 스크래핑하고 가장 빈번한 정보를 선택하면, 모든 정보가 틀릴 확률은 매우 낮습니다.
+2. 우리는 편향을 완전히 제거하려고 하지는 않지만, 가능한 한 줄이는 것을 목표로 합니다. **우리는 인간과 LLM의 가장 효과적인 상호작용을 찾기 위한 커뮤니티입니다.**
+3. 연구에서 사람들도 이미 자신이 연구하는 주제에 대해 의견을 가지고 있기 때문에 편향되는 경향이 있습니다. 이 도구는 많은 의견을 스크래핑하며, 편향된 사람이라면 결코 읽지 않았을 다양한 견해를 고르게 설명합니다.
+
+**GPT-4 모델을 사용할 경우, 토큰 사용량 때문에 비용이 많이 들 수 있습니다.** 이 프로젝트를 사용하는 경우, 자신의 토큰 사용량 및 관련 비용을 모니터링하고 관리하는 것은 본인의 책임입니다. OpenAI API 사용량을 정기적으로 확인하고, 예상치 못한 비용을 방지하기 위해 필요한 한도를 설정하거나 알림을 설정하는 것이 좋습니다.
+
+
+---
+
+
+
+
+
+
+
+
+
+
diff --git a/README-zh_CN.md b/README-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..b0f950f349fae29b7fe55809ed844815149768cd
--- /dev/null
+++ b/README-zh_CN.md
@@ -0,0 +1,158 @@
+
+
+
+
+
+####
+
+[](https://gptr.dev)
+[](https://docs.gptr.dev)
+[](https://discord.gg/QgZXvJAccX)
+
+[](https://badge.fury.io/py/gpt-researcher)
+
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+[](https://hub.docker.com/r/gptresearcher/gpt-researcher)
+[](https://twitter.com/assaf_elovic)
+
+[English](README.md) |
+[中文](README-zh_CN.md) |
+[日本語](README-ja_JP.md) |
+[한국어](README-ko_KR.md)
+
+
+# 🔎 GPT Researcher
+
+**GPT Researcher 是一个智能体代理,专为各种任务的综合在线研究而设计。**
+
+代理可以生成详细、正式且客观的研究报告,并提供自定义选项,专注于相关资源、结构框架和经验报告。受最近发表的[Plan-and-Solve](https://arxiv.org/abs/2305.04091) 和[RAG](https://arxiv.org/abs/2005.11401) 论文的启发,GPT Researcher 解决了速度、确定性和可靠性等问题,通过并行化的代理运行,而不是同步操作,提供了更稳定的性能和更高的速度。
+
+**我们的使命是利用人工智能的力量,为个人和组织提供准确、客观和事实的信息。**
+
+## 为什么选择GPT Researcher?
+
+- 因为人工研究任务形成客观结论可能需要时间和经历,有时甚至需要数周才能找到正确的资源和信息。
+- 目前的LLM是根据历史和过时的信息进行训练的,存在严重的幻觉风险,因此几乎无法胜任研究任务。
+- 网络搜索的解决方案(例如 ChatGPT + Web 插件)仅考虑有限的资源和内容,在某些情况下会导致肤浅的结论或不客观的答案。
+- 只使用部分资源可能会在确定研究问题或任务的正确结论时产生偏差。
+
+## 架构
+主要思想是运行“**计划者**”和“**执行**”代理,而**计划者**生成问题进行研究,“**执行**”代理根据每个生成的研究问题寻找最相关的信息。最后,“**计划者**”过滤和聚合所有相关信息并创建研究报告。
+代理同时利用 gpt-40-mini 和 gpt-4o(128K 上下文)来完成一项研究任务。我们仅在必要时使用这两种方法对成本进行优化。**研究任务平均耗时约 3 分钟,成本约为 ~0.1 美元**。
+
+
+
+
+
+
+详细说明:
+* 根据研究搜索或任务创建特定领域的代理。
+* 生成一组研究问题,这些问题共同形成答案对任何给定任务的客观意见。
+* 针对每个研究问题,触发一个爬虫代理,从在线资源中搜索与给定任务相关的信息。
+* 对于每一个抓取的资源,根据相关信息进行汇总,并跟踪其来源。
+* 最后,对所有汇总的资料来源进行过滤和汇总,并生成最终研究报告。
+
+## 演示
+https://github.com/assafelovic/gpt-researcher/assets/13554167/a00c89a6-a295-4dd0-b58d-098a31c40fda
+
+## 教程
+ - [运行原理](https://docs.gptr.dev/blog/building-gpt-researcher)
+ - [如何安装](https://www.loom.com/share/04ebffb6ed2a4520a27c3e3addcdde20?sid=da1848e8-b1f1-42d1-93c3-5b0b9c3b24ea)
+ - [现场演示](https://www.loom.com/share/6a3385db4e8747a1913dd85a7834846f?sid=a740fd5b-2aa3-457e-8fb7-86976f59f9b8)
+
+## 特性
+- 📝 生成研究问题、大纲、资源和课题报告
+- 🌐 每项研究汇总超过20个网络资源,形成客观和真实的结论
+- 🖥️ 包括易于使用的web界面 (HTML/CSS/JS)
+- 🔍 支持JavaScript网络资源抓取功能
+- 📂 追踪访问过和使用过的网络资源和来源
+- 📄 将研究报告导出为PDF或其他格式...
+
+## 📖 文档
+
+请参阅[此处](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started),了解完整文档:
+
+- 入门(安装、设置环境、简单示例)
+- 操作示例(演示、集成、docker 支持)
+- 参考资料(API完整文档)
+- Tavily 应用程序接口集成(核心概念的高级解释)
+
+## 快速开始
+> **步骤 0** - 安装 Python 3.11 或更高版本。[参见此处](https://www.tutorialsteacher.com/python/install-python) 获取详细指南。
+
+
+
+> **步骤 1** - 下载项目
+
+```bash
+$ git clone https://github.com/assafelovic/gpt-researcher.git
+$ cd gpt-researcher
+```
+
+
+
+> **步骤2** -安装依赖项
+```bash
+$ pip install -r requirements.txt
+```
+
+
+> **第 3 步** - 使用 OpenAI 密钥和 Tavily API 密钥创建 .env 文件,或直接导出该文件
+
+```bash
+$ export OPENAI_API_KEY={Your OpenAI API Key here}
+```
+```bash
+$ export TAVILY_API_KEY={Your Tavily API Key here}
+```
+
+- **LLM,我们推荐使用 [OpenAI GPT](https://platform.openai.com/docs/guides/gpt)**,但您也可以使用 [Langchain Adapter](https://python.langchain.com/docs/guides/adapters/openai) 支持的任何其他 LLM 模型(包括开源),只需在 config/config.py 中更改 llm 模型和提供者即可。请按照 [这份指南](https://python.langchain.com/docs/integrations/llms/) 学习如何将 LLM 与 Langchain 集成。
+- **对于搜索引擎,我们推荐使用 [Tavily Search API](https://app.tavily.com)(已针对 LLM 进行优化)**,但您也可以选择其他搜索引擎,只需将 config/config.py 中的搜索提供程序更改为 "duckduckgo"、"googleAPI"、"searchapi"、"googleSerp "或 "searx "即可。然后在 config.py 文件中添加相应的 env API 密钥。
+- **我们强烈建议使用 [OpenAI GPT](https://platform.openai.com/docs/guides/gpt) 模型和 [Tavily Search API](https://app.tavily.com) 以获得最佳性能。**
+
+
+> **第 4 步** - 使用 FastAPI 运行代理
+
+```bash
+$ uvicorn main:app --reload
+```
+
+
+> **第 5 步** - 在任何浏览器上访问 http://localhost:8000,享受研究乐趣!
+
+要了解如何开始使用 Docker 或了解有关功能和服务的更多信息,请访问 [documentation](https://docs.gptr.dev) 页面。
+
+## 🚀 贡献
+我们非常欢迎您的贡献!如果您感兴趣,请查看 [contributing](CONTRIBUTING.md)。
+
+如果您有兴趣加入我们的任务,请查看我们的 [路线图](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap) 页面,并通过我们的 [Discord 社区](https://discord.gg/QgZXvJAccX) 联系我们。
+
+## ✉️ 支持 / 联系我们
+- [社区讨论区](https://discord.gg/spBgZmm3Xe)
+- 我们的邮箱: support@tavily.com
+
+## 🛡 免责声明
+
+本项目 "GPT Researcher "是一个实验性应用程序,按 "现状 "提供,不做任何明示或暗示的保证。我们根据 MIT 许可分享用于学术目的的代码。本文不提供任何学术建议,也不建议在学术或研究论文中使用。
+
+我们对客观研究主张的看法:
+1. 我们抓取系统的全部目的是减少不正确的事实。如何解决?我们抓取的网站越多,错误数据的可能性就越小。我们每项研究都会收集20条信息,它们全部错误的可能性极低。
+2. 我们的目标不是消除偏见,而是尽可能减少偏见。**作为一个社区,我们在这里探索最有效的人机互动**。
+3. 在研究过程中,人们也容易产生偏见,因为大多数人对自己研究的课题都有自己的看法。这个工具可以搜罗到许多观点,并均匀地解释各种不同的观点,而有偏见的人是绝对读不到这些观点的。
+
+**请注意,使用 GPT-4 语言模型可能会因使用令牌而产生高昂费用**。使用本项目即表示您承认有责任监控和管理自己的令牌使用情况及相关费用。强烈建议您定期检查 OpenAI API 的使用情况,并设置任何必要的限制或警报,以防止发生意外费用。
+
+---
+
+
+
+
+
+
+
+
+
+
diff --git a/README.md b/README.md
index d7ffae005dee34bb6f27fadcb8842d12a2461592..b7493cf6278d3678ea398c18ce41be8ba43a7007 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,231 @@
----
-title: GPT Researcher
-emoji: 👁
-colorFrom: indigo
-colorTo: red
-sdk: static
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+
+
+
+
+####
+
+[](https://gptr.dev)
+[](https://docs.gptr.dev)
+[](https://discord.gg/QgZXvJAccX)
+
+[](https://badge.fury.io/py/gpt-researcher)
+
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+[](https://hub.docker.com/r/gptresearcher/gpt-researcher)
+[](https://twitter.com/assaf_elovic)
+
+[English](README.md) | [中文](README-zh_CN.md) | [日本語](README-ja_JP.md) | [한국어](README-ko_KR.md)
+
+
+
+# 🔎 GPT Researcher
+
+**GPT Researcher is an autonomous agent designed for comprehensive web and local research on any given task.**
+
+The agent produces detailed, factual, and unbiased research reports with citations. GPT Researcher provides a full suite of customization options to create tailor made and domain specific research agents. Inspired by the recent [Plan-and-Solve](https://arxiv.org/abs/2305.04091) and [RAG](https://arxiv.org/abs/2005.11401) papers, GPT Researcher addresses misinformation, speed, determinism, and reliability by offering stable performance and increased speed through parallelized agent work.
+
+**Our mission is to empower individuals and organizations with accurate, unbiased, and factual information through AI.**
+
+## Why GPT Researcher?
+
+- Objective conclusions for manual research can take weeks, requiring vast resources and time.
+- LLMs trained on outdated information can hallucinate, becoming irrelevant for current research tasks.
+- Current LLMs have token limitations, insufficient for generating long research reports.
+- Limited web sources in existing services lead to misinformation and shallow results.
+- Selective web sources can introduce bias into research tasks.
+
+## Demo
+https://github.com/user-attachments/assets/2cc38f6a-9f66-4644-9e69-a46c40e296d4
+
+## Architecture
+
+The core idea is to utilize 'planner' and 'execution' agents. The planner generates research questions, while the execution agents gather relevant information. The publisher then aggregates all findings into a comprehensive report.
+
+
+
+
+
+Steps:
+* Create a task-specific agent based on a research query.
+* Generate questions that collectively form an objective opinion on the task.
+* Use a crawler agent for gathering information for each question.
+* Summarize and source-track each resource.
+* Filter and aggregate summaries into a final research report.
+
+## Tutorials
+ - [How it Works](https://docs.gptr.dev/blog/building-gpt-researcher)
+ - [How to Install](https://www.loom.com/share/04ebffb6ed2a4520a27c3e3addcdde20?sid=da1848e8-b1f1-42d1-93c3-5b0b9c3b24ea)
+ - [Live Demo](https://www.loom.com/share/6a3385db4e8747a1913dd85a7834846f?sid=a740fd5b-2aa3-457e-8fb7-86976f59f9b8)
+
+## Features
+
+- 📝 Generate detailed research reports using web and local documents.
+- 🖼️ Smart image scraping and filtering for reports.
+- 📜 Generate detailed reports exceeding 2,000 words.
+- 🌐 Aggregate over 20 sources for objective conclusions.
+- 🖥️ Frontend available in lightweight (HTML/CSS/JS) and production-ready (NextJS + Tailwind) versions.
+- 🔍 JavaScript-enabled web scraping.
+- 📂 Maintains memory and context throughout research.
+- 📄 Export reports to PDF, Word, and other formats.
+
+## 📖 Documentation
+
+See the [Documentation](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started) for:
+- Installation and setup guides
+- Configuration and customization options
+- How-To examples
+- Full API references
+
+## ⚙️ Getting Started
+
+### Installation
+
+1. Install Python 3.11 or later. [Guide](https://www.tutorialsteacher.com/python/install-python).
+2. Clone the project and navigate to the directory:
+
+ ```bash
+ git clone https://github.com/assafelovic/gpt-researcher.git
+ cd gpt-researcher
+ ```
+
+3. Set up API keys by exporting them or storing them in a `.env` file.
+
+ ```bash
+ export OPENAI_API_KEY={Your OpenAI API Key here}
+ export TAVILY_API_KEY={Your Tavily API Key here}
+ ```
+
+4. Install dependencies and start the server:
+
+ ```bash
+ pip install -r requirements.txt
+ python -m uvicorn main:app --reload
+ ```
+
+Visit [http://localhost:8000](http://localhost:8000) to start.
+
+For other setups (e.g., Poetry or virtual environments), check the [Getting Started page](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started).
+
+## Run as PIP package
+```bash
+pip install gpt-researcher
+
+```
+### Example Usage:
+```python
+...
+from gpt_researcher import GPTResearcher
+
+query = "why is Nvidia stock going up?"
+researcher = GPTResearcher(query=query, report_type="research_report")
+# Conduct research on the given query
+research_result = await researcher.conduct_research()
+# Write the report
+report = await researcher.write_report()
+...
+```
+
+**For more examples and configurations, please refer to the [PIP documentation](https://docs.gptr.dev/docs/gpt-researcher/gptr/pip-package) page.**
+
+
+## Run with Docker
+
+> **Step 1** - [Install Docker](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started-with-docker)
+
+> **Step 2** - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env'
+
+> **Step 3** - Within the docker-compose file comment out services that you don't want to run with Docker.
+
+```bash
+docker-compose up --build
+```
+
+If that doesn't work, try running it without the dash:
+```bash
+docker compose up --build
+```
+
+
+> **Step 4** - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes:
+ - the Python server running on localhost:8000
+ - the React app running on localhost:3000
+
+Visit localhost:3000 on any browser and enjoy researching!
+
+
+
+## 📄 Research on Local Documents
+
+You can instruct the GPT Researcher to run research tasks based on your local documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents.
+
+Step 1: Add the env variable `DOC_PATH` pointing to the folder where your documents are located.
+
+```bash
+export DOC_PATH="./my-docs"
+```
+
+Step 2:
+ - If you're running the frontend app on localhost:8000, simply select "My Documents" from the "Report Source" Dropdown Options.
+ - If you're running GPT Researcher with the [PIP package](https://docs.tavily.com/docs/gpt-researcher/pip-package), pass the `report_source` argument as "local" when you instantiate the `GPTResearcher` class [code sample here](https://docs.gptr.dev/docs/gpt-researcher/context/tailored-research).
+
+
+## 👪 Multi-Agent Assistant
+As AI evolves from prompt engineering and RAG to multi-agent systems, we're excited to introduce our new multi-agent assistant built with [LangGraph](https://python.langchain.com/v0.1/docs/langgraph/).
+
+By using LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Inspired by the recent [STORM](https://arxiv.org/abs/2402.14207) paper, this project showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication.
+
+An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown.
+
+Check it out [here](https://github.com/assafelovic/gpt-researcher/tree/master/multi_agents) or head over to our [documentation](https://docs.gptr.dev/docs/gpt-researcher/multi_agents/langgraph) for more information.
+
+## 🖥️ Frontend Applications
+
+GPT-Researcher now features an enhanced frontend to improve the user experience and streamline the research process. The frontend offers:
+
+- An intuitive interface for inputting research queries
+- Real-time progress tracking of research tasks
+- Interactive display of research findings
+- Customizable settings for tailored research experiences
+
+Two deployment options are available:
+1. A lightweight static frontend served by FastAPI
+2. A feature-rich NextJS application for advanced functionality
+
+For detailed setup instructions and more information about the frontend features, please visit our [documentation page](https://docs.gptr.dev/docs/gpt-researcher/frontend/frontend).
+
+## 🚀 Contributing
+We highly welcome contributions! Please check out [contributing](https://github.com/assafelovic/gpt-researcher/blob/master/CONTRIBUTING.md) if you're interested.
+
+Please check out our [roadmap](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap) page and reach out to us via our [Discord community](https://discord.gg/QgZXvJAccX) if you're interested in joining our mission.
+
+
+
+## ✉️ Support / Contact us
+- [Community Discord](https://discord.gg/spBgZmm3Xe)
+- Author Email: assaf.elovic@gmail.com
+
+## 🛡 Disclaimer
+
+This project, GPT Researcher, is an experimental application and is provided "as-is" without any warranty, express or implied. We are sharing codes for academic purposes under the Apache 2 license. Nothing herein is academic advice, and NOT a recommendation to use in academic or research papers.
+
+Our view on unbiased research claims:
+1. The main goal of GPT Researcher is to reduce incorrect and biased facts. How? We assume that the more sites we scrape the less chances of incorrect data. By scraping multiple sites per research, and choosing the most frequent information, the chances that they are all wrong is extremely low.
+2. We do not aim to eliminate biases; we aim to reduce it as much as possible. **We are here as a community to figure out the most effective human/llm interactions.**
+3. In research, people also tend towards biases as most have already opinions on the topics they research about. This tool scrapes many opinions and will evenly explain diverse views that a biased person would never have read.
+
+---
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ⬆️ Back to Top
+
diff --git a/__pycache__/main.cpython-312.pyc b/__pycache__/main.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bb8858398bb9345ef8947017430d289ef170795
Binary files /dev/null and b/__pycache__/main.cpython-312.pyc differ
diff --git a/backend/__init__.py b/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..870e9568c7d9a67bb88f952133124804fb322e7b
--- /dev/null
+++ b/backend/__init__.py
@@ -0,0 +1 @@
+from multi_agents import agents
\ No newline at end of file
diff --git a/backend/__pycache__/__init__.cpython-312.pyc b/backend/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c928be3f62fca1fa9dc21204233a3d01ad8f02a9
Binary files /dev/null and b/backend/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/__pycache__/utils.cpython-312.pyc b/backend/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c732e3f3b1c06c50cc1664551fdb94d579813e3
Binary files /dev/null and b/backend/__pycache__/utils.cpython-312.pyc differ
diff --git a/backend/chat/__init__.py b/backend/chat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..00c51a25e6c0bfc2d1bc6e1261f4cf21b69fc239
--- /dev/null
+++ b/backend/chat/__init__.py
@@ -0,0 +1 @@
+from .chat import ChatAgentWithMemory
\ No newline at end of file
diff --git a/backend/chat/__pycache__/__init__.cpython-312.pyc b/backend/chat/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69516bef55195d81a8e84e40561aa23d50af16f7
Binary files /dev/null and b/backend/chat/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/chat/__pycache__/chat.cpython-312.pyc b/backend/chat/__pycache__/chat.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59291f07703bbd5d27539748b8ede6c69af12c70
Binary files /dev/null and b/backend/chat/__pycache__/chat.cpython-312.pyc differ
diff --git a/backend/chat/chat.py b/backend/chat/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb7a53abedbee0d3978d9503a6630887ac91cda4
--- /dev/null
+++ b/backend/chat/chat.py
@@ -0,0 +1,106 @@
+from fastapi import WebSocket
+import uuid
+
+from gpt_researcher.utils.llm import get_llm
+from gpt_researcher.memory import Memory
+from gpt_researcher.config.config import Config
+
+from langgraph.prebuilt import create_react_agent
+from langgraph.checkpoint.memory import MemorySaver
+
+from langchain_community.vectorstores import InMemoryVectorStore
+from langchain.text_splitter import RecursiveCharacterTextSplitter
+from langchain.tools import Tool, tool
+
+class ChatAgentWithMemory:
+ def __init__(
+ self,
+ report: str,
+ config_path,
+ headers,
+ vector_store = None
+ ):
+ self.report = report
+ self.headers = headers
+ self.config = Config(config_path)
+ self.vector_store = vector_store
+ self.graph = self.create_agent()
+
+ def create_agent(self):
+ """Create React Agent Graph"""
+ cfg = Config()
+
+ # Retrieve LLM using get_llm with settings from config
+ provider = get_llm(
+ llm_provider=cfg.smart_llm_provider,
+ model=cfg.smart_llm_model,
+ temperature=0.35,
+ max_tokens=cfg.smart_token_limit,
+ **self.config.llm_kwargs
+ ).llm
+
+ # If vector_store is not initialized, process documents and add to vector_store
+ if not self.vector_store:
+ documents = self._process_document(self.report)
+ self.chat_config = {"configurable": {"thread_id": str(uuid.uuid4())}}
+ self.embedding = Memory(
+ cfg.embedding_provider,
+ cfg.embedding_model,
+ **cfg.embedding_kwargs
+ ).get_embeddings()
+ self.vector_store = InMemoryVectorStore(self.embedding)
+ self.vector_store.add_texts(documents)
+
+ # Create the React Agent Graph with the configured provider
+ graph = create_react_agent(
+ provider,
+ tools=[self.vector_store_tool(self.vector_store)],
+ checkpointer=MemorySaver()
+ )
+
+ return graph
+
+ def vector_store_tool(self, vector_store) -> Tool:
+ """Create Vector Store Tool"""
+ @tool
+ def retrieve_info(query):
+ """
+ Consult the report for relevant contexts whenever you don't know something
+ """
+ retriever = vector_store.as_retriever(k = 4)
+ return retriever.invoke(query)
+ return retrieve_info
+
+ def _process_document(self, report):
+ """Split Report into Chunks"""
+ text_splitter = RecursiveCharacterTextSplitter(
+ chunk_size=1024,
+ chunk_overlap=20,
+ length_function=len,
+ is_separator_regex=False,
+ )
+ documents = text_splitter.split_text(report)
+ return documents
+
+ async def chat(self, message, websocket):
+ """Chat with React Agent"""
+ message = f"""
+ You are GPT Researcher, a autonomous research agent created by an open source community at https://github.com/assafelovic/gpt-researcher, homepage: https://gptr.dev.
+ To learn more about GPT Researcher you can suggest to check out: https://docs.gptr.dev.
+
+ This is a chat message between the user and you: GPT Researcher.
+ The chat is about a research reports that you created. Answer based on the given context and report.
+ You must include citations to your answer based on the report.
+
+ Report: {self.report}
+ User Message: {message}
+ """
+ inputs = {"messages": [("user", message)]}
+ response = await self.graph.ainvoke(inputs, config=self.chat_config)
+ ai_message = response["messages"][-1].content
+ if websocket is not None:
+ await websocket.send_json({"type": "chat", "content": ai_message})
+
+ def get_context(self):
+ """return the current context of the chat"""
+ return self.report
diff --git a/backend/memory/__init__.py b/backend/memory/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/backend/memory/draft.py b/backend/memory/draft.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5e6862146d56672857e837a91b31c6b0e23b962
--- /dev/null
+++ b/backend/memory/draft.py
@@ -0,0 +1,10 @@
+from typing import TypedDict, List, Annotated
+import operator
+
+
+class DraftState(TypedDict):
+ task: dict
+ topic: str
+ draft: dict
+ review: str
+ revision_notes: str
\ No newline at end of file
diff --git a/backend/memory/research.py b/backend/memory/research.py
new file mode 100644
index 0000000000000000000000000000000000000000..337e1f17705e8eb44e99e3fa37a390fa2e36f423
--- /dev/null
+++ b/backend/memory/research.py
@@ -0,0 +1,20 @@
+from typing import TypedDict, List, Annotated
+import operator
+
+
+class ResearchState(TypedDict):
+ task: dict
+ initial_research: str
+ sections: List[str]
+ research_data: List[dict]
+ # Report layout
+ title: str
+ headers: dict
+ date: str
+ table_of_contents: str
+ introduction: str
+ conclusion: str
+ sources: List[str]
+ report: str
+
+
diff --git a/backend/report_type/__init__.py b/backend/report_type/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f1e0faef859d32df2a48fbf67c99bc7b66aa1db
--- /dev/null
+++ b/backend/report_type/__init__.py
@@ -0,0 +1,7 @@
+from .basic_report.basic_report import BasicReport
+from .detailed_report.detailed_report import DetailedReport
+
+__all__ = [
+ "BasicReport",
+ "DetailedReport"
+]
\ No newline at end of file
diff --git a/backend/report_type/__pycache__/__init__.cpython-312.pyc b/backend/report_type/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7c096c84c1dff1a9618079809c4243f29139634
Binary files /dev/null and b/backend/report_type/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/report_type/basic_report/__init__.py b/backend/report_type/basic_report/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/backend/report_type/basic_report/__pycache__/__init__.cpython-312.pyc b/backend/report_type/basic_report/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81e7cb3045a3f9ed56d2afcef731a0bdfdfbe4a4
Binary files /dev/null and b/backend/report_type/basic_report/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/report_type/basic_report/__pycache__/basic_report.cpython-312.pyc b/backend/report_type/basic_report/__pycache__/basic_report.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80106abafb9f41bf84ec5d940b6dcaa0f23fa87d
Binary files /dev/null and b/backend/report_type/basic_report/__pycache__/basic_report.cpython-312.pyc differ
diff --git a/backend/report_type/basic_report/basic_report.py b/backend/report_type/basic_report/basic_report.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd29c732d0b6d887869984ad8da8fcf0493c5c1c
--- /dev/null
+++ b/backend/report_type/basic_report/basic_report.py
@@ -0,0 +1,46 @@
+from fastapi import WebSocket
+from typing import Any
+
+from gpt_researcher import GPTResearcher
+
+
+class BasicReport:
+ def __init__(
+ self,
+ query: str,
+ report_type: str,
+ report_source: str,
+ source_urls,
+ document_urls,
+ tone: Any,
+ config_path: str,
+ websocket: WebSocket,
+ headers=None
+ ):
+ self.query = query
+ self.report_type = report_type
+ self.report_source = report_source
+ self.source_urls = source_urls
+ self.document_urls = document_urls
+ self.tone = tone
+ self.config_path = config_path
+ self.websocket = websocket
+ self.headers = headers or {}
+
+ async def run(self):
+ # Initialize researcher
+ researcher = GPTResearcher(
+ query=self.query,
+ report_type=self.report_type,
+ report_source=self.report_source,
+ source_urls=self.source_urls,
+ document_urls=self.document_urls,
+ tone=self.tone,
+ config_path=self.config_path,
+ websocket=self.websocket,
+ headers=self.headers
+ )
+
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
diff --git a/backend/report_type/detailed_report/README.md b/backend/report_type/detailed_report/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1348d7a22f4df4304990e4b57e406ae16c16a660
--- /dev/null
+++ b/backend/report_type/detailed_report/README.md
@@ -0,0 +1,12 @@
+## Detailed Reports
+
+Introducing long and detailed reports, with a completely new architecture inspired by the latest [STORM](https://arxiv.org/abs/2402.14207) paper.
+
+In this method we do the following:
+
+1. Trigger Initial GPT Researcher report based on task
+2. Generate subtopics from research summary
+3. For each subtopic the headers of the subtopic report are extracted and accumulated
+4. For each subtopic a report is generated making sure that any information about the headers accumulated until now are not re-generated.
+5. An additional introduction section is written along with a table of contents constructed from the entire report.
+6. The final report is constructed by appending these : Intro + Table of contents + Subsection reports
\ No newline at end of file
diff --git a/backend/report_type/detailed_report/__init__.py b/backend/report_type/detailed_report/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/backend/report_type/detailed_report/__pycache__/__init__.cpython-312.pyc b/backend/report_type/detailed_report/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e75aba000b4992347fd191acd8f398d8007fdf2c
Binary files /dev/null and b/backend/report_type/detailed_report/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/report_type/detailed_report/__pycache__/detailed_report.cpython-312.pyc b/backend/report_type/detailed_report/__pycache__/detailed_report.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f74a5e6146f32fa3b8c1149e4b8705c6588c8a9
Binary files /dev/null and b/backend/report_type/detailed_report/__pycache__/detailed_report.cpython-312.pyc differ
diff --git a/backend/report_type/detailed_report/detailed_report.py b/backend/report_type/detailed_report/detailed_report.py
new file mode 100644
index 0000000000000000000000000000000000000000..290f2215b8e8f808501ff1fba84a7e94b31e1b30
--- /dev/null
+++ b/backend/report_type/detailed_report/detailed_report.py
@@ -0,0 +1,139 @@
+import asyncio
+from typing import List, Dict, Set, Optional, Any
+from fastapi import WebSocket
+
+from gpt_researcher import GPTResearcher
+
+
+class DetailedReport:
+ def __init__(
+ self,
+ query: str,
+ report_type: str,
+ report_source: str,
+ source_urls: List[str] = [],
+ document_urls: List[str] = [],
+ config_path: str = None,
+ tone: Any = "",
+ websocket: WebSocket = None,
+ subtopics: List[Dict] = [],
+ headers: Optional[Dict] = None
+ ):
+ self.query = query
+ self.report_type = report_type
+ self.report_source = report_source
+ self.source_urls = source_urls
+ self.document_urls = document_urls
+ self.config_path = config_path
+ self.tone = tone
+ self.websocket = websocket
+ self.subtopics = subtopics
+ self.headers = headers or {}
+
+ self.gpt_researcher = GPTResearcher(
+ query=self.query,
+ report_type="research_report",
+ report_source=self.report_source,
+ source_urls=self.source_urls,
+ document_urls=self.document_urls,
+ config_path=self.config_path,
+ tone=self.tone,
+ websocket=self.websocket,
+ headers=self.headers
+ )
+ self.existing_headers: List[Dict] = []
+ self.global_context: List[str] = []
+ self.global_written_sections: List[str] = []
+ self.global_urls: Set[str] = set(
+ self.source_urls) if self.source_urls else set()
+
+ async def run(self) -> str:
+ await self._initial_research()
+ subtopics = await self._get_all_subtopics()
+ report_introduction = await self.gpt_researcher.write_introduction()
+ _, report_body = await self._generate_subtopic_reports(subtopics)
+ self.gpt_researcher.visited_urls.update(self.global_urls)
+ report = await self._construct_detailed_report(report_introduction, report_body)
+ return report
+
+ async def _initial_research(self) -> None:
+ await self.gpt_researcher.conduct_research()
+ self.global_context = self.gpt_researcher.context
+ self.global_urls = self.gpt_researcher.visited_urls
+
+ async def _get_all_subtopics(self) -> List[Dict]:
+ subtopics_data = await self.gpt_researcher.get_subtopics()
+
+ all_subtopics = []
+ if subtopics_data and subtopics_data.subtopics:
+ for subtopic in subtopics_data.subtopics:
+ all_subtopics.append({"task": subtopic.task})
+ else:
+ print(f"Unexpected subtopics data format: {subtopics_data}")
+
+ return all_subtopics
+
+ async def _generate_subtopic_reports(self, subtopics: List[Dict]) -> tuple:
+ subtopic_reports = []
+ subtopics_report_body = ""
+
+ for subtopic in subtopics:
+ result = await self._get_subtopic_report(subtopic)
+ if result["report"]:
+ subtopic_reports.append(result)
+ subtopics_report_body += f"\n\n\n{result['report']}"
+
+ return subtopic_reports, subtopics_report_body
+
+ async def _get_subtopic_report(self, subtopic: Dict) -> Dict[str, str]:
+ current_subtopic_task = subtopic.get("task")
+ subtopic_assistant = GPTResearcher(
+ query=current_subtopic_task,
+ report_type="subtopic_report",
+ report_source=self.report_source,
+ websocket=self.websocket,
+ headers=self.headers,
+ parent_query=self.query,
+ subtopics=self.subtopics,
+ visited_urls=self.global_urls,
+ agent=self.gpt_researcher.agent,
+ role=self.gpt_researcher.role,
+ tone=self.tone,
+ )
+
+ subtopic_assistant.context = list(set(self.global_context))
+ await subtopic_assistant.conduct_research()
+
+ draft_section_titles = await subtopic_assistant.get_draft_section_titles(current_subtopic_task)
+
+ if not isinstance(draft_section_titles, str):
+ draft_section_titles = str(draft_section_titles)
+
+ parse_draft_section_titles = self.gpt_researcher.extract_headers(draft_section_titles)
+ parse_draft_section_titles_text = [header.get(
+ "text", "") for header in parse_draft_section_titles]
+
+ relevant_contents = await subtopic_assistant.get_similar_written_contents_by_draft_section_titles(
+ current_subtopic_task, parse_draft_section_titles_text, self.global_written_sections
+ )
+
+ subtopic_report = await subtopic_assistant.write_report(self.existing_headers, relevant_contents)
+
+ self.global_written_sections.extend(self.gpt_researcher.extract_sections(subtopic_report))
+ self.global_context = list(set(subtopic_assistant.context))
+ self.global_urls.update(subtopic_assistant.visited_urls)
+
+ self.existing_headers.append({
+ "subtopic task": current_subtopic_task,
+ "headers": self.gpt_researcher.extract_headers(subtopic_report),
+ })
+
+ return {"topic": subtopic, "report": subtopic_report}
+
+ async def _construct_detailed_report(self, introduction: str, report_body: str) -> str:
+ toc = self.gpt_researcher.table_of_contents(report_body)
+ conclusion = await self.gpt_researcher.write_report_conclusion(report_body)
+ conclusion_with_references = self.gpt_researcher.add_references(
+ conclusion, self.gpt_researcher.visited_urls)
+ report = f"{introduction}\n\n{toc}\n\n{report_body}\n\n{conclusion_with_references}"
+ return report
diff --git a/backend/server/__init__.py b/backend/server/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/backend/server/__pycache__/__init__.cpython-312.pyc b/backend/server/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..672366d9480e7ab9c2d808a48498f8447e52ca8a
Binary files /dev/null and b/backend/server/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/server/__pycache__/server.cpython-312.pyc b/backend/server/__pycache__/server.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02aa29ab278e5ae5828332acfe20b437c6b99ee2
Binary files /dev/null and b/backend/server/__pycache__/server.cpython-312.pyc differ
diff --git a/backend/server/__pycache__/server_utils.cpython-312.pyc b/backend/server/__pycache__/server_utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f78d761e038a1f84d00da4e8f6bd331d102f716f
Binary files /dev/null and b/backend/server/__pycache__/server_utils.cpython-312.pyc differ
diff --git a/backend/server/__pycache__/websocket_manager.cpython-312.pyc b/backend/server/__pycache__/websocket_manager.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74fd54ea92a6cf3adc771d8fd96af3127c740704
Binary files /dev/null and b/backend/server/__pycache__/websocket_manager.cpython-312.pyc differ
diff --git a/backend/server/app.py b/backend/server/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..e15d43a8cbd0c4a4400525c291f5a8dca96e6ab0
--- /dev/null
+++ b/backend/server/app.py
@@ -0,0 +1,16 @@
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+import logging
+
+logger = logging.getLogger(__name__)
+
+app = FastAPI()
+
+# Add CORS middleware
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # In production, replace with your frontend domain
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
\ No newline at end of file
diff --git a/backend/server/logging_config.py b/backend/server/logging_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3821273bcfe71721a707e2de56dd6e5b62d8c99
--- /dev/null
+++ b/backend/server/logging_config.py
@@ -0,0 +1,83 @@
+import logging
+import json
+import os
+from datetime import datetime
+from pathlib import Path
+
+class JSONResearchHandler:
+ def __init__(self, json_file):
+ self.json_file = json_file
+ self.research_data = {
+ "timestamp": datetime.now().isoformat(),
+ "events": [],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0
+ }
+ }
+
+ def log_event(self, event_type: str, data: dict):
+ self.research_data["events"].append({
+ "timestamp": datetime.now().isoformat(),
+ "type": event_type,
+ "data": data
+ })
+ self._save_json()
+
+ def update_content(self, key: str, value):
+ self.research_data["content"][key] = value
+ self._save_json()
+
+ def _save_json(self):
+ with open(self.json_file, 'w') as f:
+ json.dump(self.research_data, f, indent=2)
+
+def setup_research_logging():
+ # Create logs directory if it doesn't exist
+ logs_dir = Path("logs")
+ logs_dir.mkdir(exist_ok=True)
+
+ # Generate timestamp for log files
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ # Create log file paths
+ log_file = logs_dir / f"research_{timestamp}.log"
+ json_file = logs_dir / f"research_{timestamp}.json"
+
+ # Configure file handler for research logs
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(logging.INFO)
+ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+
+ # Get research logger and configure it
+ research_logger = logging.getLogger('research')
+ research_logger.setLevel(logging.INFO)
+
+ # Remove any existing handlers to avoid duplicates
+ research_logger.handlers.clear()
+
+ # Add file handler
+ research_logger.addHandler(file_handler)
+
+ # Add stream handler for console output
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+ research_logger.addHandler(console_handler)
+
+ # Prevent propagation to root logger to avoid duplicate logs
+ research_logger.propagate = False
+
+ # Create JSON handler
+ json_handler = JSONResearchHandler(json_file)
+
+ return str(log_file), str(json_file), research_logger, json_handler
+
+# Create a function to get the logger and JSON handler
+def get_research_logger():
+ return logging.getLogger('research')
+
+def get_json_handler():
+ return getattr(logging.getLogger('research'), 'json_handler', None)
\ No newline at end of file
diff --git a/backend/server/server.py b/backend/server/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d2f3ee297771eba6e696dca81cb88c41ccd669d
--- /dev/null
+++ b/backend/server/server.py
@@ -0,0 +1,134 @@
+import json
+import os
+from typing import Dict, List
+
+from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect, File, UploadFile, Header
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+from fastapi.templating import Jinja2Templates
+from pydantic import BaseModel
+
+from backend.server.websocket_manager import WebSocketManager
+from backend.server.server_utils import (
+ get_config_dict,
+ update_environment_variables, handle_file_upload, handle_file_deletion,
+ execute_multi_agents, handle_websocket_communication
+)
+
+
+from gpt_researcher.utils.logging_config import setup_research_logging
+
+import logging
+
+# Get logger instance
+logger = logging.getLogger(__name__)
+
+# Don't override parent logger settings
+logger.propagate = True
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(levelname)s - %(message)s",
+ handlers=[
+ logging.StreamHandler() # Only log to console
+ ]
+)
+
+# Models
+
+
+class ResearchRequest(BaseModel):
+ task: str
+ report_type: str
+ agent: str
+
+
+class ConfigRequest(BaseModel):
+ ANTHROPIC_API_KEY: str
+ TAVILY_API_KEY: str
+ LANGCHAIN_TRACING_V2: str
+ LANGCHAIN_API_KEY: str
+ OPENAI_API_KEY: str
+ DOC_PATH: str
+ RETRIEVER: str
+ GOOGLE_API_KEY: str = ''
+ GOOGLE_CX_KEY: str = ''
+ BING_API_KEY: str = ''
+ SEARCHAPI_API_KEY: str = ''
+ SERPAPI_API_KEY: str = ''
+ SERPER_API_KEY: str = ''
+ SEARX_URL: str = ''
+ XAI_API_KEY: str
+ DEEPSEEK_API_KEY: str
+
+
+# App initialization
+app = FastAPI()
+
+# Static files and templates
+app.mount("/site", StaticFiles(directory="./frontend"), name="site")
+app.mount("/static", StaticFiles(directory="./frontend/static"), name="static")
+templates = Jinja2Templates(directory="./frontend")
+
+# WebSocket manager
+manager = WebSocketManager()
+
+# Middleware
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["http://localhost:3000"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Constants
+DOC_PATH = os.getenv("DOC_PATH", "./my-docs")
+
+# Startup event
+
+
+@app.on_event("startup")
+def startup_event():
+ os.makedirs("outputs", exist_ok=True)
+ app.mount("/outputs", StaticFiles(directory="outputs"), name="outputs")
+ os.makedirs(DOC_PATH, exist_ok=True)
+
+
+# Routes
+
+
+@app.get("/")
+async def read_root(request: Request):
+ return templates.TemplateResponse("index.html", {"request": request, "report": None})
+
+
+@app.get("/files/")
+async def list_files():
+ files = os.listdir(DOC_PATH)
+ print(f"Files in {DOC_PATH}: {files}")
+ return {"files": files}
+
+
+@app.post("/api/multi_agents")
+async def run_multi_agents():
+ return await execute_multi_agents(manager)
+
+
+@app.post("/upload/")
+async def upload_file(file: UploadFile = File(...)):
+ return await handle_file_upload(file, DOC_PATH)
+
+
+@app.delete("/files/{filename}")
+async def delete_file(filename: str):
+ return await handle_file_deletion(filename, DOC_PATH)
+
+
+@app.websocket("/ws")
+async def websocket_endpoint(websocket: WebSocket):
+ await manager.connect(websocket)
+ try:
+ await handle_websocket_communication(websocket, manager)
+ except WebSocketDisconnect:
+ await manager.disconnect(websocket)
diff --git a/backend/server/server_utils.py b/backend/server/server_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b06eceb18ff794bc376ebd4533b98c64786b9e84
--- /dev/null
+++ b/backend/server/server_utils.py
@@ -0,0 +1,259 @@
+import json
+import os
+import re
+import time
+import shutil
+from typing import Dict, List, Any
+from fastapi.responses import JSONResponse, FileResponse
+from gpt_researcher.document.document import DocumentLoader
+from backend.utils import write_md_to_pdf, write_md_to_word, write_text_to_md
+from pathlib import Path
+from datetime import datetime
+from fastapi import HTTPException
+import logging
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+class CustomLogsHandler:
+ """Custom handler to capture streaming logs from the research process"""
+ def __init__(self, websocket, task: str):
+ self.logs = []
+ self.websocket = websocket
+ sanitized_filename = sanitize_filename(f"task_{int(time.time())}_{task}")
+ self.log_file = os.path.join("outputs", f"{sanitized_filename}.json")
+ self.timestamp = datetime.now().isoformat()
+ # Initialize log file with metadata
+ os.makedirs("outputs", exist_ok=True)
+ with open(self.log_file, 'w') as f:
+ json.dump({
+ "timestamp": self.timestamp,
+ "events": [],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0
+ }
+ }, f, indent=2)
+
+ async def send_json(self, data: Dict[str, Any]) -> None:
+ """Store log data and send to websocket"""
+ # Send to websocket for real-time display
+ if self.websocket:
+ await self.websocket.send_json(data)
+
+ # Read current log file
+ with open(self.log_file, 'r') as f:
+ log_data = json.load(f)
+
+ # Update appropriate section based on data type
+ if data.get('type') == 'logs':
+ log_data['events'].append({
+ "timestamp": datetime.now().isoformat(),
+ "type": "event",
+ "data": data
+ })
+ else:
+ # Update content section for other types of data
+ log_data['content'].update(data)
+
+ # Save updated log file
+ with open(self.log_file, 'w') as f:
+ json.dump(log_data, f, indent=2)
+ logger.debug(f"Log entry written to: {self.log_file}")
+
+
+class Researcher:
+ def __init__(self, query: str, report_type: str = "research_report"):
+ self.query = query
+ self.report_type = report_type
+ # Generate unique ID for this research task
+ self.research_id = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_{hash(query)}"
+ # Initialize logs handler with research ID
+ self.logs_handler = CustomLogsHandler(self.research_id)
+ self.researcher = GPTResearcher(
+ query=query,
+ report_type=report_type,
+ websocket=self.logs_handler
+ )
+
+ async def research(self) -> dict:
+ """Conduct research and return paths to generated files"""
+ await self.researcher.conduct_research()
+ report = await self.researcher.write_report()
+
+ # Generate the files
+ sanitized_filename = sanitize_filename(f"task_{int(time.time())}_{self.query}")
+ file_paths = await generate_report_files(report, sanitized_filename)
+
+ # Get the JSON log path that was created by CustomLogsHandler
+ json_relative_path = os.path.relpath(self.logs_handler.log_file)
+
+ return {
+ "output": {
+ **file_paths, # Include PDF, DOCX, and MD paths
+ "json": json_relative_path
+ }
+ }
+
+def sanitize_filename(filename: str) -> str:
+ # Split into components
+ prefix, timestamp, *task_parts = filename.split('_')
+ task = '_'.join(task_parts)
+
+ # Calculate max length for task portion
+ # 255 - len("outputs/") - len("task_") - len(timestamp) - len("_.json") - safety_margin
+ max_task_length = 255 - 8 - 5 - 10 - 6 - 10 # ~216 chars for task
+
+ # Truncate task if needed
+ truncated_task = task[:max_task_length] if len(task) > max_task_length else task
+
+ # Reassemble and clean the filename
+ sanitized = f"{prefix}_{timestamp}_{truncated_task}"
+ return re.sub(r"[^\w\s-]", "", sanitized).strip()
+
+
+async def handle_start_command(websocket, data: str, manager):
+ json_data = json.loads(data[6:])
+ task, report_type, source_urls, document_urls, tone, headers, report_source = extract_command_data(
+ json_data)
+
+ if not task or not report_type:
+ print("Error: Missing task or report_type")
+ return
+
+ # Create logs handler with websocket and task
+ logs_handler = CustomLogsHandler(websocket, task)
+ # Initialize log content with query
+ await logs_handler.send_json({
+ "query": task,
+ "sources": [],
+ "context": [],
+ "report": ""
+ })
+
+ sanitized_filename = sanitize_filename(f"task_{int(time.time())}_{task}")
+
+ report = await manager.start_streaming(
+ task,
+ report_type,
+ report_source,
+ source_urls,
+ document_urls,
+ tone,
+ websocket,
+ headers
+ )
+ report = str(report)
+ file_paths = await generate_report_files(report, sanitized_filename)
+ # Add JSON log path to file_paths
+ file_paths["json"] = os.path.relpath(logs_handler.log_file)
+ await send_file_paths(websocket, file_paths)
+
+
+async def handle_human_feedback(data: str):
+ feedback_data = json.loads(data[14:]) # Remove "human_feedback" prefix
+ print(f"Received human feedback: {feedback_data}")
+ # TODO: Add logic to forward the feedback to the appropriate agent or update the research state
+
+async def handle_chat(websocket, data: str, manager):
+ json_data = json.loads(data[4:])
+ print(f"Received chat message: {json_data.get('message')}")
+ await manager.chat(json_data.get("message"), websocket)
+
+async def generate_report_files(report: str, filename: str) -> Dict[str, str]:
+ pdf_path = await write_md_to_pdf(report, filename)
+ docx_path = await write_md_to_word(report, filename)
+ md_path = await write_text_to_md(report, filename)
+ return {"pdf": pdf_path, "docx": docx_path, "md": md_path}
+
+
+async def send_file_paths(websocket, file_paths: Dict[str, str]):
+ await websocket.send_json({"type": "path", "output": file_paths})
+
+
+def get_config_dict(
+ langchain_api_key: str, openai_api_key: str, tavily_api_key: str,
+ google_api_key: str, google_cx_key: str, bing_api_key: str,
+ searchapi_api_key: str, serpapi_api_key: str, serper_api_key: str, searx_url: str
+) -> Dict[str, str]:
+ return {
+ "LANGCHAIN_API_KEY": langchain_api_key or os.getenv("LANGCHAIN_API_KEY", ""),
+ "OPENAI_API_KEY": openai_api_key or os.getenv("OPENAI_API_KEY", ""),
+ "TAVILY_API_KEY": tavily_api_key or os.getenv("TAVILY_API_KEY", ""),
+ "GOOGLE_API_KEY": google_api_key or os.getenv("GOOGLE_API_KEY", ""),
+ "GOOGLE_CX_KEY": google_cx_key or os.getenv("GOOGLE_CX_KEY", ""),
+ "BING_API_KEY": bing_api_key or os.getenv("BING_API_KEY", ""),
+ "SEARCHAPI_API_KEY": searchapi_api_key or os.getenv("SEARCHAPI_API_KEY", ""),
+ "SERPAPI_API_KEY": serpapi_api_key or os.getenv("SERPAPI_API_KEY", ""),
+ "SERPER_API_KEY": serper_api_key or os.getenv("SERPER_API_KEY", ""),
+ "SEARX_URL": searx_url or os.getenv("SEARX_URL", ""),
+ "LANGCHAIN_TRACING_V2": os.getenv("LANGCHAIN_TRACING_V2", "true"),
+ "DOC_PATH": os.getenv("DOC_PATH", "./my-docs"),
+ "RETRIEVER": os.getenv("RETRIEVER", ""),
+ "EMBEDDING_MODEL": os.getenv("OPENAI_EMBEDDING_MODEL", "")
+ }
+
+
+def update_environment_variables(config: Dict[str, str]):
+ for key, value in config.items():
+ os.environ[key] = value
+
+
+async def handle_file_upload(file, DOC_PATH: str) -> Dict[str, str]:
+ file_path = os.path.join(DOC_PATH, os.path.basename(file.filename))
+ with open(file_path, "wb") as buffer:
+ shutil.copyfileobj(file.file, buffer)
+ print(f"File uploaded to {file_path}")
+
+ document_loader = DocumentLoader(DOC_PATH)
+ await document_loader.load()
+
+ return {"filename": file.filename, "path": file_path}
+
+
+async def handle_file_deletion(filename: str, DOC_PATH: str) -> JSONResponse:
+ file_path = os.path.join(DOC_PATH, os.path.basename(filename))
+ if os.path.exists(file_path):
+ os.remove(file_path)
+ print(f"File deleted: {file_path}")
+ return JSONResponse(content={"message": "File deleted successfully"})
+ else:
+ print(f"File not found: {file_path}")
+ return JSONResponse(status_code=404, content={"message": "File not found"})
+
+
+async def execute_multi_agents(manager) -> Any:
+ websocket = manager.active_connections[0] if manager.active_connections else None
+ if websocket:
+ report = await run_research_task("Is AI in a hype cycle?", websocket, stream_output)
+ return {"report": report}
+ else:
+ return JSONResponse(status_code=400, content={"message": "No active WebSocket connection"})
+
+
+async def handle_websocket_communication(websocket, manager):
+ while True:
+ data = await websocket.receive_text()
+ if data.startswith("start"):
+ await handle_start_command(websocket, data, manager)
+ elif data.startswith("human_feedback"):
+ await handle_human_feedback(data)
+ elif data.startswith("chat"):
+ await handle_chat(websocket, data, manager)
+ else:
+ print("Error: Unknown command or not enough parameters provided.")
+
+
+def extract_command_data(json_data: Dict) -> tuple:
+ return (
+ json_data.get("task"),
+ json_data.get("report_type"),
+ json_data.get("source_urls"),
+ json_data.get("document_urls"),
+ json_data.get("tone"),
+ json_data.get("headers", {}),
+ json_data.get("report_source")
+ )
diff --git a/backend/server/websocket_manager.py b/backend/server/websocket_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a9b4c6e4844173aab30785297b8bf5b3db2a048
--- /dev/null
+++ b/backend/server/websocket_manager.py
@@ -0,0 +1,125 @@
+import asyncio
+import datetime
+from typing import Dict, List
+
+from fastapi import WebSocket
+
+from backend.report_type import BasicReport, DetailedReport
+from backend.chat import ChatAgentWithMemory
+
+from gpt_researcher.utils.enum import ReportType, Tone
+from multi_agents.main import run_research_task
+from gpt_researcher.actions import stream_output # Import stream_output
+from backend.server.server_utils import CustomLogsHandler
+
+
+class WebSocketManager:
+ """Manage websockets"""
+
+ def __init__(self):
+ """Initialize the WebSocketManager class."""
+ self.active_connections: List[WebSocket] = []
+ self.sender_tasks: Dict[WebSocket, asyncio.Task] = {}
+ self.message_queues: Dict[WebSocket, asyncio.Queue] = {}
+ self.chat_agent = None
+
+ async def start_sender(self, websocket: WebSocket):
+ """Start the sender task."""
+ queue = self.message_queues.get(websocket)
+ if not queue:
+ return
+
+ while True:
+ message = await queue.get()
+ if websocket in self.active_connections:
+ try:
+ if message == "ping":
+ await websocket.send_text("pong")
+ else:
+ await websocket.send_text(message)
+ except:
+ break
+ else:
+ break
+
+ async def connect(self, websocket: WebSocket):
+ """Connect a websocket."""
+ await websocket.accept()
+ self.active_connections.append(websocket)
+ self.message_queues[websocket] = asyncio.Queue()
+ self.sender_tasks[websocket] = asyncio.create_task(
+ self.start_sender(websocket))
+
+ async def disconnect(self, websocket: WebSocket):
+ """Disconnect a websocket."""
+ if websocket in self.active_connections:
+ self.active_connections.remove(websocket)
+ self.sender_tasks[websocket].cancel()
+ await self.message_queues[websocket].put(None)
+ del self.sender_tasks[websocket]
+ del self.message_queues[websocket]
+
+ async def start_streaming(self, task, report_type, report_source, source_urls, document_urls, tone, websocket, headers=None):
+ """Start streaming the output."""
+ tone = Tone[tone]
+ # add customized JSON config file path here
+ config_path = "default"
+ report = await run_agent(task, report_type, report_source, source_urls, document_urls, tone, websocket, headers = headers, config_path = config_path)
+ #Create new Chat Agent whenever a new report is written
+ self.chat_agent = ChatAgentWithMemory(report, config_path, headers)
+ return report
+
+ async def chat(self, message, websocket):
+ """Chat with the agent based message diff"""
+ if self.chat_agent:
+ await self.chat_agent.chat(message, websocket)
+ else:
+ await websocket.send_json({"type": "chat", "content": "Knowledge empty, please run the research first to obtain knowledge"})
+
+async def run_agent(task, report_type, report_source, source_urls, document_urls, tone: Tone, websocket, headers=None, config_path=""):
+ """Run the agent."""
+ start_time = datetime.datetime.now()
+
+ # Create logs handler for this research task
+ logs_handler = CustomLogsHandler(websocket, task)
+
+ # Initialize researcher based on report type
+ if report_type == "multi_agents":
+ report = await run_research_task(
+ query=task,
+ websocket=logs_handler, # Use logs_handler instead of raw websocket
+ stream_output=stream_output,
+ tone=tone,
+ headers=headers
+ )
+ report = report.get("report", "")
+
+ elif report_type == ReportType.DetailedReport.value:
+ researcher = DetailedReport(
+ query=task,
+ report_type=report_type,
+ report_source=report_source,
+ source_urls=source_urls,
+ document_urls=document_urls,
+ tone=tone,
+ config_path=config_path,
+ websocket=logs_handler, # Use logs_handler instead of raw websocket
+ headers=headers
+ )
+ report = await researcher.run()
+
+ else:
+ researcher = BasicReport(
+ query=task,
+ report_type=report_type,
+ report_source=report_source,
+ source_urls=source_urls,
+ document_urls=document_urls,
+ tone=tone,
+ config_path=config_path,
+ websocket=logs_handler, # Use logs_handler instead of raw websocket
+ headers=headers
+ )
+ report = await researcher.run()
+
+ return report
diff --git a/backend/utils.py b/backend/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5870747fdcdeb25b83937581f8a4a77e3d972b6a
--- /dev/null
+++ b/backend/utils.py
@@ -0,0 +1,92 @@
+import aiofiles
+import urllib
+import mistune
+
+async def write_to_file(filename: str, text: str) -> None:
+ """Asynchronously write text to a file in UTF-8 encoding.
+
+ Args:
+ filename (str): The filename to write to.
+ text (str): The text to write.
+ """
+ # Ensure text is a string
+ if not isinstance(text, str):
+ text = str(text)
+
+ # Convert text to UTF-8, replacing any problematic characters
+ text_utf8 = text.encode('utf-8', errors='replace').decode('utf-8')
+
+ async with aiofiles.open(filename, "w", encoding='utf-8') as file:
+ await file.write(text_utf8)
+
+async def write_text_to_md(text: str, filename: str = "") -> str:
+ """Writes text to a Markdown file and returns the file path.
+
+ Args:
+ text (str): Text to write to the Markdown file.
+
+ Returns:
+ str: The file path of the generated Markdown file.
+ """
+ file_path = f"outputs/{filename[:60]}.md"
+ await write_to_file(file_path, text)
+ return urllib.parse.quote(file_path)
+
+async def write_md_to_pdf(text: str, filename: str = "") -> str:
+ """Converts Markdown text to a PDF file and returns the file path.
+
+ Args:
+ text (str): Markdown text to convert.
+
+ Returns:
+ str: The encoded file path of the generated PDF.
+ """
+ file_path = f"outputs/{filename[:60]}.pdf"
+
+ try:
+ from md2pdf.core import md2pdf
+ md2pdf(file_path,
+ md_content=text,
+ # md_file_path=f"{file_path}.md",
+ css_file_path="./frontend/pdf_styles.css",
+ base_url=None)
+ print(f"Report written to {file_path}")
+ except Exception as e:
+ print(f"Error in converting Markdown to PDF: {e}")
+ return ""
+
+ encoded_file_path = urllib.parse.quote(file_path)
+ return encoded_file_path
+
+async def write_md_to_word(text: str, filename: str = "") -> str:
+ """Converts Markdown text to a DOCX file and returns the file path.
+
+ Args:
+ text (str): Markdown text to convert.
+
+ Returns:
+ str: The encoded file path of the generated DOCX.
+ """
+ file_path = f"outputs/{filename[:60]}.docx"
+
+ try:
+ from docx import Document
+ from htmldocx import HtmlToDocx
+ # Convert report markdown to HTML
+ html = mistune.html(text)
+ # Create a document object
+ doc = Document()
+ # Convert the html generated from the report to document format
+ HtmlToDocx().add_html_to_document(html, doc)
+
+ # Saving the docx document to file_path
+ doc.save(file_path)
+
+ print(f"Report written to {file_path}")
+
+ encoded_file_path = urllib.parse.quote(file_path)
+ return encoded_file_path
+
+ except Exception as e:
+ print(f"Error in converting Markdown to DOCX: {e}")
+ return ""
\ No newline at end of file
diff --git a/citation.cff b/citation.cff
new file mode 100644
index 0000000000000000000000000000000000000000..f87c4afe9a3b837df9173109532ba5f5820814e8
--- /dev/null
+++ b/citation.cff
@@ -0,0 +1,10 @@
+cff-version: 1.0.0
+message: "If you use this software, please cite it as below."
+authors:
+ - family-names: Elovic
+ given-names: Assaf
+title: gpt-researcher
+version: 0.5.4
+date-released: 2023-07-23
+repository-code: https://github.com/assafelovic/gpt-researcher
+url: https://gptr.dev
\ No newline at end of file
diff --git a/cli.py b/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..8543e38ebabd953b59fbcb1533152451832b85d0
--- /dev/null
+++ b/cli.py
@@ -0,0 +1,139 @@
+"""
+Provides a command line interface for the GPTResearcher class.
+
+Usage:
+
+```shell
+python cli.py "" --report_type
+```
+
+"""
+import asyncio
+import argparse
+from argparse import RawTextHelpFormatter
+from uuid import uuid4
+import os
+
+from dotenv import load_dotenv
+
+from gpt_researcher import GPTResearcher
+from gpt_researcher.utils.enum import ReportType, Tone
+from backend.report_type import DetailedReport
+
+# =============================================================================
+# CLI
+# =============================================================================
+
+cli = argparse.ArgumentParser(
+ description="Generate a research report.",
+ # Enables the use of newlines in the help message
+ formatter_class=RawTextHelpFormatter)
+
+# =====================================
+# Arg: Query
+# =====================================
+
+cli.add_argument(
+ # Position 0 argument
+ "query",
+ type=str,
+ help="The query to conduct research on.")
+
+# =====================================
+# Arg: Report Type
+# =====================================
+
+choices = [report_type.value for report_type in ReportType]
+
+report_type_descriptions = {
+ ReportType.ResearchReport.value: "Summary - Short and fast (~2 min)",
+ ReportType.DetailedReport.value: "Detailed - In depth and longer (~5 min)",
+ ReportType.ResourceReport.value: "",
+ ReportType.OutlineReport.value: "",
+ ReportType.CustomReport.value: "",
+ ReportType.SubtopicReport.value: ""
+}
+
+cli.add_argument(
+ "--report_type",
+ type=str,
+ help="The type of report to generate. Options:\n" + "\n".join(
+ f" {choice}: {report_type_descriptions[choice]}" for choice in choices
+ ),
+ # Deserialize ReportType as a List of strings:
+ choices=choices,
+ required=True)
+
+# First, let's see what values are actually in the Tone enum
+print([t.value for t in Tone])
+
+cli.add_argument(
+ "--tone",
+ type=str,
+ help="The tone of the report (optional).",
+ choices=["objective", "formal", "analytical", "persuasive", "informative",
+ "explanatory", "descriptive", "critical", "comparative", "speculative",
+ "reflective", "narrative", "humorous", "optimistic", "pessimistic"],
+ default="objective"
+)
+
+# =============================================================================
+# Main
+# =============================================================================
+
+
+async def main(args):
+ """
+ Conduct research on the given query, generate the report, and write
+ it as a markdown file to the output directory.
+ """
+ if args.report_type == 'detailed_report':
+ detailed_report = DetailedReport(
+ query=args.query,
+ report_type="research_report",
+ report_source="web_search",
+ )
+
+ report = await detailed_report.run()
+ else:
+ # Convert the simple keyword to the full Tone enum value
+ tone_map = {
+ "objective": Tone.Objective,
+ "formal": Tone.Formal,
+ "analytical": Tone.Analytical,
+ "persuasive": Tone.Persuasive,
+ "informative": Tone.Informative,
+ "explanatory": Tone.Explanatory,
+ "descriptive": Tone.Descriptive,
+ "critical": Tone.Critical,
+ "comparative": Tone.Comparative,
+ "speculative": Tone.Speculative,
+ "reflective": Tone.Reflective,
+ "narrative": Tone.Narrative,
+ "humorous": Tone.Humorous,
+ "optimistic": Tone.Optimistic,
+ "pessimistic": Tone.Pessimistic
+ }
+
+ researcher = GPTResearcher(
+ query=args.query,
+ report_type=args.report_type,
+ tone=tone_map[args.tone]
+ )
+
+ await researcher.conduct_research()
+
+ report = await researcher.write_report()
+
+ # Write the report to a file
+ artifact_filepath = f"outputs/{uuid4()}.md"
+ os.makedirs("outputs", exist_ok=True)
+ with open(artifact_filepath, "w") as f:
+ f.write(report)
+
+ print(f"Report written to '{artifact_filepath}'")
+
+if __name__ == "__main__":
+ load_dotenv()
+ args = cli.parse_args()
+ asyncio.run(main(args))
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..62a416c8645d756bddfe5d4e52a1b3ac0892c2a1
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,48 @@
+services:
+ gpt-researcher:
+ pull_policy: build
+ image: gptresearcher/gpt-researcher
+ build: ./
+ environment:
+ OPENAI_API_KEY: ${OPENAI_API_KEY}
+ TAVILY_API_KEY: ${TAVILY_API_KEY}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LOGGING_LEVEL: INFO
+ volumes:
+ - ./outputs:/usr/src/app/outputs
+ restart: always
+ ports:
+ - 8000:8000
+ gptr-nextjs:
+ pull_policy: build
+ image: gptresearcher/gptr-nextjs
+ stdin_open: true
+ environment:
+ CHOKIDAR_USEPOLLING: true
+ LOGGING_LEVEL: INFO
+ build:
+ dockerfile: Dockerfile.dev
+ context: frontend/nextjs
+ volumes:
+ - /app/node_modules
+ - ./frontend/nextjs:/app
+ - ./outputs:/app/outputs
+ restart: always
+ ports:
+ - 3000:3000
+
+ gpt-researcher-tests:
+ image: gptresearcher/gpt-researcher-tests
+ build: ./
+ environment:
+ OPENAI_API_KEY: ${OPENAI_API_KEY}
+ TAVILY_API_KEY: ${TAVILY_API_KEY}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LOGGING_LEVEL: INFO
+ profiles: ["test"]
+ command: >
+ /bin/sh -c "
+ pip install pytest pytest-asyncio faiss-cpu &&
+ python -m pytest tests/report-types.py &&
+ python -m pytest tests/vector-store.py
+ "
diff --git a/docs/CNAME b/docs/CNAME
new file mode 100644
index 0000000000000000000000000000000000000000..c5661873f63f93d752bab52acbd9296443f05926
--- /dev/null
+++ b/docs/CNAME
@@ -0,0 +1 @@
+docs.gptr.dev
\ No newline at end of file
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6cb2e952509f7258ad64bf2c7bfcfeb321d7a793
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,31 @@
+# Website
+
+This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
+
+## Prerequisites
+
+To build and test documentation locally, begin by downloading and installing [Node.js](https://nodejs.org/en/download/), and then installing [Yarn](https://classic.yarnpkg.com/en/).
+On Windows, you can install via the npm package manager (npm) which comes bundled with Node.js:
+
+```console
+npm install --global yarn
+```
+
+## Installation
+
+```console
+pip install pydoc-markdown
+cd website
+yarn install
+```
+
+## Local Development
+
+Navigate to the website folder and run:
+
+```console
+pydoc-markdown
+yarn start
+```
+
+This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
diff --git a/docs/babel.config.js b/docs/babel.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..92d391e31ebcc2f53ea66a6f2d2b1ec4737c11b7
--- /dev/null
+++ b/docs/babel.config.js
@@ -0,0 +1,3 @@
+module.exports = {
+ presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
+};
diff --git a/docs/blog/2023-09-22-gpt-researcher/architecture.png b/docs/blog/2023-09-22-gpt-researcher/architecture.png
new file mode 100644
index 0000000000000000000000000000000000000000..0ad8847db3f74a4e9b792d8221ea7d4a9e6399fc
--- /dev/null
+++ b/docs/blog/2023-09-22-gpt-researcher/architecture.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93f7c083722105b00dc714d372a1075f4d5770b46fa19551dc2b772738f82d89
+size 143143
diff --git a/docs/blog/2023-09-22-gpt-researcher/index.md b/docs/blog/2023-09-22-gpt-researcher/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ebd004ec6f6e2fa442e6940ae36457131dec3d36
--- /dev/null
+++ b/docs/blog/2023-09-22-gpt-researcher/index.md
@@ -0,0 +1,88 @@
+---
+slug: building-gpt-researcher
+title: How we built GPT Researcher
+authors: [assafe]
+tags: [gpt-researcher, autonomous-agent, opensource, github]
+---
+
+After [AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.
+
+But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.
+
+Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.
+
+The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created [GPT Researcher](https://github.com/assafelovic/gpt-researcher) — an open source autonomous agent for online comprehensive research.
+
+In this article, we will share the steps that guided me toward the proposed solution.
+
+### Moving from infinite loops to deterministic results
+The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.
+
+This is when we stumbled upon the recent paper [Plan and Solve](https://arxiv.org/abs/2305.04091). The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.
+
+
+
+As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.
+
+Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?
+
+### Aiming for objective and unbiased results
+The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.
+
+To tackle this challenges, we assumed the following:
+
+- Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
+- Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.
+
+After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.
+
+In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The [law of large numbers](https://en.wikipedia.org/wiki/Law_of_large_numbers). In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.
+
+Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?
+
+### Speeding up the research process
+Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?
+
+By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.
+
+```python
+# Create a list to hold the coroutine agent tasks
+tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]
+
+# Gather the results as they become available
+responses = await asyncio.gather(*tasks, return_exceptions=True)
+```
+
+In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.
+
+### Finalizing the research report
+Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.
+
+After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.
+
+The prompt is as follows:
+```commandline
+"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.
+```
+
+The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.
+
+### The final architecture
+Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:
+
+
+
+
+
+More specifically:
+- Generate an outline of research questions that form an objective opinion on any given task.
+- For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
+- For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
+- Finally, aggregate all summarized sources and generate a final research report.
+
+### Going forward
+The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.
+
+Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.
+
+It’s all a matter of time and what [GPT Researcher](https://github.com/assafelovic/gpt-researcher) is all about.
diff --git a/docs/blog/2023-09-22-gpt-researcher/planner.jpeg b/docs/blog/2023-09-22-gpt-researcher/planner.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..2a5d3892ed06e95753562ff652173b7a9bd85163
--- /dev/null
+++ b/docs/blog/2023-09-22-gpt-researcher/planner.jpeg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8686560147e72d08dbdf97fd16fe2934d1621a08c881ceb6bf8fd3652f81a31b
+size 135677
diff --git a/docs/blog/2023-11-12-openai-assistant/diagram-1.png b/docs/blog/2023-11-12-openai-assistant/diagram-1.png
new file mode 100644
index 0000000000000000000000000000000000000000..31a46a5f9bcafed1360350e1a2a855baac0582f2
Binary files /dev/null and b/docs/blog/2023-11-12-openai-assistant/diagram-1.png differ
diff --git a/docs/blog/2023-11-12-openai-assistant/diagram-assistant.jpeg b/docs/blog/2023-11-12-openai-assistant/diagram-assistant.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..f467f5bc75ef46b81a6d4cf804cbd2eb227872f6
Binary files /dev/null and b/docs/blog/2023-11-12-openai-assistant/diagram-assistant.jpeg differ
diff --git a/docs/blog/2023-11-12-openai-assistant/index.md b/docs/blog/2023-11-12-openai-assistant/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..85d48ccda303b3a0b4adbf4e38373b42948ba251
--- /dev/null
+++ b/docs/blog/2023-11-12-openai-assistant/index.md
@@ -0,0 +1,259 @@
+---
+slug: building-openai-assistant
+title: How to build an OpenAI Assistant with Internet access
+authors: [assafe]
+tags: [tavily, search-api, openai, assistant-api]
+---
+
+OpenAI has done it again with a [groundbreaking DevDay](https://openai.com/blog/new-models-and-developer-products-announced-at-devday) showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new [Assistants API](https://platform.openai.com/docs/assistants/overview) that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.
+
+The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.
+
+This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.
+
+To skip the tutorial below, feel free to check out the full [Github Gist here](https://gist.github.com/assafelovic/579822cd42d52d80db1e1c1ff82ffffd).
+
+At a high level, a typical integration of the Assistants API has the following steps:
+
+- Create an [Assistant](https://platform.openai.com/docs/api-reference/assistants/createAssistant) in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
+- Create a [Thread](https://platform.openai.com/docs/api-reference/threads) when a user starts a conversation.
+- Add [Messages](https://platform.openai.com/docs/api-reference/messages) to the Thread as the user ask questions.
+- [Run](https://platform.openai.com/docs/api-reference/runs) the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.
+
+As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.
+
+
+
+Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the [OpenAI Python SDK v1.2](https://github.com/openai/openai-python/tree/main#installation) and [Tavily Search API](https://tavily.com).
+
+First things first, let’s define the assistant’s instructions:
+
+```python
+assistant_prompt_instruction = """You are a finance expert.
+Your goal is to provide answers based on information from the internet.
+You must use the provided Tavily search API function to find relevant online information.
+You should never use your own knowledge to answer questions.
+Please include relevant url sources in the end of your answers.
+"""
+```
+Next, let’s finalize step 1 and create an assistant using the latest [GPT-4 Turbo model](https://github.com/openai/openai-python/tree/main#installation) (128K context), and the call function using the [Tavily web search API](https://tavily.com/):
+
+```python
+# Create an assistant
+assistant = client.beta.assistants.create(
+ instructions=assistant_prompt_instruction,
+ model="gpt-4-1106-preview",
+ tools=[{
+ "type": "function",
+ "function": {
+ "name": "tavily_search",
+ "description": "Get information on recent events from the web.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
+ },
+ "required": ["query"]
+ }
+ }
+ }]
+)
+```
+
+Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:
+
+```python
+thread = client.beta.threads.create()
+user_input = input("You: ")
+message = client.beta.threads.messages.create(
+ thread_id=thread.id,
+ role="user",
+ content=user_input,
+)
+```
+
+Finally, we’ll run the assistant on the thread to trigger the function call and get the response:
+
+```python
+run = client.beta.threads.runs.create(
+ thread_id=thread.id,
+ assistant_id=assistant_id,
+)
+```
+
+So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.
+
+
+
+To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):
+
+```python
+# Function to wait for a run to complete
+def wait_for_run_completion(thread_id, run_id):
+ while True:
+ time.sleep(1)
+ run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
+ print(f"Current run status: {run.status}")
+ if run.status in ['completed', 'failed', 'requires_action']:
+ return run
+```
+
+This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.
+
+We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:
+
+```python
+# Function to handle tool output submission
+def submit_tool_outputs(thread_id, run_id, tools_to_call):
+ tool_output_array = []
+ for tool in tools_to_call:
+ output = None
+ tool_call_id = tool.id
+ function_name = tool.function.name
+ function_args = tool.function.arguments
+
+ if function_name == "tavily_search":
+ output = tavily_search(query=json.loads(function_args)["query"])
+
+ if output:
+ tool_output_array.append({"tool_call_id": tool_call_id, "output": output})
+
+ return client.beta.threads.runs.submit_tool_outputs(
+ thread_id=thread_id,
+ run_id=run_id,
+ tool_outputs=tool_output_array
+ )
+```
+
+As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:
+
+```python
+if run.status == 'requires_action':
+ run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
+ run = wait_for_run_completion(thread.id, run.id)
+```
+
+That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:
+
+```python
+import os
+import json
+import time
+from openai import OpenAI
+from tavily import TavilyClient
+
+# Initialize clients with API keys
+client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
+tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
+
+assistant_prompt_instruction = """You are a finance expert.
+Your goal is to provide answers based on information from the internet.
+You must use the provided Tavily search API function to find relevant online information.
+You should never use your own knowledge to answer questions.
+Please include relevant url sources in the end of your answers.
+"""
+
+# Function to perform a Tavily search
+def tavily_search(query):
+ search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
+ return search_result
+
+# Function to wait for a run to complete
+def wait_for_run_completion(thread_id, run_id):
+ while True:
+ time.sleep(1)
+ run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
+ print(f"Current run status: {run.status}")
+ if run.status in ['completed', 'failed', 'requires_action']:
+ return run
+
+# Function to handle tool output submission
+def submit_tool_outputs(thread_id, run_id, tools_to_call):
+ tool_output_array = []
+ for tool in tools_to_call:
+ output = None
+ tool_call_id = tool.id
+ function_name = tool.function.name
+ function_args = tool.function.arguments
+
+ if function_name == "tavily_search":
+ output = tavily_search(query=json.loads(function_args)["query"])
+
+ if output:
+ tool_output_array.append({"tool_call_id": tool_call_id, "output": output})
+
+ return client.beta.threads.runs.submit_tool_outputs(
+ thread_id=thread_id,
+ run_id=run_id,
+ tool_outputs=tool_output_array
+ )
+
+# Function to print messages from a thread
+def print_messages_from_thread(thread_id):
+ messages = client.beta.threads.messages.list(thread_id=thread_id)
+ for msg in messages:
+ print(f"{msg.role}: {msg.content[0].text.value}")
+
+# Create an assistant
+assistant = client.beta.assistants.create(
+ instructions=assistant_prompt_instruction,
+ model="gpt-4-1106-preview",
+ tools=[{
+ "type": "function",
+ "function": {
+ "name": "tavily_search",
+ "description": "Get information on recent events from the web.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
+ },
+ "required": ["query"]
+ }
+ }
+ }]
+)
+assistant_id = assistant.id
+print(f"Assistant ID: {assistant_id}")
+
+# Create a thread
+thread = client.beta.threads.create()
+print(f"Thread: {thread}")
+
+# Ongoing conversation loop
+while True:
+ user_input = input("You: ")
+ if user_input.lower() == 'exit':
+ break
+
+ # Create a message
+ message = client.beta.threads.messages.create(
+ thread_id=thread.id,
+ role="user",
+ content=user_input,
+ )
+
+ # Create a run
+ run = client.beta.threads.runs.create(
+ thread_id=thread.id,
+ assistant_id=assistant_id,
+ )
+ print(f"Run ID: {run.id}")
+
+ # Wait for run to complete
+ run = wait_for_run_completion(thread.id, run.id)
+
+ if run.status == 'failed':
+ print(run.error)
+ continue
+ elif run.status == 'requires_action':
+ run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
+ run = wait_for_run_completion(thread.id, run.id)
+
+ # Print messages from the thread
+ print_messages_from_thread(thread.id)
+```
+
+The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.
+
+Feel free to drop a comment below if you have any further questions!
diff --git a/docs/blog/2024-05-19-gptr-langgraph/architecture.jpeg b/docs/blog/2024-05-19-gptr-langgraph/architecture.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..76f70d482d3bd064d715c5f9133036ba4f5e3d31
Binary files /dev/null and b/docs/blog/2024-05-19-gptr-langgraph/architecture.jpeg differ
diff --git a/docs/blog/2024-05-19-gptr-langgraph/blog-langgraph.jpeg b/docs/blog/2024-05-19-gptr-langgraph/blog-langgraph.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..e07f5f8b627824237297f1e3142268e634263c07
--- /dev/null
+++ b/docs/blog/2024-05-19-gptr-langgraph/blog-langgraph.jpeg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58f12de290d61f39177ef2e9c6f284f7153f6860d2e97136036ff0b94df3b2ad
+size 391989
diff --git a/docs/blog/2024-05-19-gptr-langgraph/index.md b/docs/blog/2024-05-19-gptr-langgraph/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..aa29e196fab96ab9f773000aae4ddeed3f8a1276
--- /dev/null
+++ b/docs/blog/2024-05-19-gptr-langgraph/index.md
@@ -0,0 +1,223 @@
+---
+slug: gptr-langgraph
+title: How to Build the Ultimate Research Multi-Agent Assistant
+authors: [assafe]
+tags: [multi-skills, gpt-researcher, langchain, langgraph]
+---
+
+# Introducing the GPT Researcher Multi-Agent Assistant
+### Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents
+
+It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).
+
+Andrew Ng has [recently stated](https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/), “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”
+
+In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.
+
+To skip this tutorial, feel free to check out the Github repo of [GPT Researcher x LangGraph](https://github.com/assafelovic/gpt-researcher/tree/master/multi_agents).
+
+## Introducing LangGraph
+LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.
+
+LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.
+
+Enough with the smalltalk, let’s start building!
+
+## Building the Ultimate Autonomous Research Agent
+By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.
+
+Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.
+
+### The Research Agent Team
+The research team consists of seven LLM agents:
+
+* **Chief Editor** — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
+* **GPT Researcher** — A specialized autonomous agent that conducts in depth research on a given topic.
+* **Editor** — Responsible for planning the research outline and structure.
+* **Reviewer** — Validates the correctness of the research results given a set of criteria.
+* **Reviser** — Revises the research results based on the feedback from the reviewer.
+* **Writer** — Responsible for compiling and writing the final report.
+* **Publisher** — Responsible for publishing the final report in various formats.
+
+### Architecture
+As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:
+
+
+
+More specifically the process is as follows:
+
+* **Browser (gpt-researcher)** — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.
+* **Editor** — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.
+* For each outline topic (in parallel):
+ * **Researcher (gpt-researcher)** — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
+ * **Reviewer** — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
+ * **Reviser** — Revises the draft until it is satisfactory based on the reviewer feedback.
+* **Writer** — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.
+* **Publisher** — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.
+
+* We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.
+
+## Define the Graph State
+One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.
+
+Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:
+
+```python
+class ResearchState(TypedDict):
+ task: dict
+ initial_research: str
+ sections: List[str]
+ research_data: List[dict]
+ # Report layout
+ title: str
+ headers: dict
+ date: str
+ table_of_contents: str
+ introduction: str
+ conclusion: str
+ sources: List[str]
+ report: str
+```
+
+As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.
+
+We can then initialize the graph with the following:
+
+
+```python
+from langgraph.graph import StateGraph
+workflow = StateGraph(ResearchState)
+```
+
+Initializing the graph with LangGraph
+As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:
+
+```python
+from gpt_researcher import GPTResearcher
+
+class ResearchAgent:
+ def __init__(self):
+ pass
+
+ async def research(self, query: str):
+ # Initialize the researcher
+ researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
+ # Conduct research on the given query
+ await researcher.conduct_research()
+ # Write the report
+ report = await researcher.write_report()
+
+ return report
+```
+
+As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:
+
+```python
+def init_research_team(self):
+ # Initialize skills
+ editor_agent = EditorAgent(self.task)
+ research_agent = ResearchAgent()
+ writer_agent = WriterAgent()
+ publisher_agent = PublisherAgent(self.output_dir)
+
+ # Define a Langchain StateGraph with the ResearchState
+ workflow = StateGraph(ResearchState)
+
+ # Add nodes for each agent
+ workflow.add_node("browser", research_agent.run_initial_research)
+ workflow.add_node("planner", editor_agent.plan_research)
+ workflow.add_node("researcher", editor_agent.run_parallel_research)
+ workflow.add_node("writer", writer_agent.run)
+ workflow.add_node("publisher", publisher_agent.run)
+
+ workflow.add_edge('browser', 'planner')
+ workflow.add_edge('planner', 'researcher')
+ workflow.add_edge('researcher', 'writer')
+ workflow.add_edge('writer', 'publisher')
+
+ # set up start and end nodes
+ workflow.set_entry_point("browser")
+ workflow.add_edge('publisher', END)
+
+ return workflow
+```
+
+As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.
+
+Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!
+
+## A Graph within a Graph to support stateful Parallelization
+This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.
+
+Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.
+
+As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:
+
+```python
+class DraftState(TypedDict):
+ task: dict
+ topic: str
+ draft: dict
+ review: str
+ revision_notes: str
+```
+
+As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:
+
+```python
+async def run_parallel_research(self, research_state: dict):
+ workflow = StateGraph(DraftState)
+
+ workflow.add_node("researcher", research_agent.run_depth_research)
+ workflow.add_node("reviewer", reviewer_agent.run)
+ workflow.add_node("reviser", reviser_agent.run)
+
+ # set up edges researcher->reviewer->reviser->reviewer...
+ workflow.set_entry_point("researcher")
+ workflow.add_edge('researcher', 'reviewer')
+ workflow.add_edge('reviser', 'reviewer')
+ workflow.add_conditional_edges('reviewer',
+ (lambda draft: "accept" if draft['review'] is None else "revise"),
+ {"accept": END, "revise": "reviser"})
+```
+
+By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.
+
+Running the Research Assistant
+After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:
+
+```json
+{
+ "query": "Is AI in a hype cycle?",
+ "max_sections": 3,
+ "publish_formats": {
+ "markdown": true,
+ "pdf": true,
+ "docx": true
+ },
+ "follow_guidelines": false,
+ "model": "gpt-4-turbo",
+ "guidelines": [
+ "The report MUST be written in APA format",
+ "Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
+ "The report MUST be written in spanish"
+ ]
+}
+```
+
+The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.
+
+Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.
+
+To download and run the example check out the GPT Researcher x LangGraph [open source page](https://github.com/assafelovic/gpt-researcher/tree/master/multi_agents).
+
+## What’s Next?
+Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.
+
+In addition, having support for research about both web and local data would be key for many types of business and personal use cases.
+
+Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.
+
+A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!
+
+To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!
\ No newline at end of file
diff --git a/docs/blog/2024-09-7-hybrid-research/gptr-hybrid.png b/docs/blog/2024-09-7-hybrid-research/gptr-hybrid.png
new file mode 100644
index 0000000000000000000000000000000000000000..6f61df534085d6b1c284b90837e1839566928321
--- /dev/null
+++ b/docs/blog/2024-09-7-hybrid-research/gptr-hybrid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5aa8651fbac012c82ac12d1009ef10a170ee1a356b2a7898858f920a2f6b17a
+size 194577
diff --git a/docs/blog/2024-09-7-hybrid-research/index.md b/docs/blog/2024-09-7-hybrid-research/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..6374d50b1bec0ef59dd424ccdb246636dcabe984
--- /dev/null
+++ b/docs/blog/2024-09-7-hybrid-research/index.md
@@ -0,0 +1,182 @@
+---
+slug: gptr-hybrid
+title: The Future of Research is Hybrid
+authors: [assafe]
+tags: [hybrid-research, gpt-researcher, langchain, langgraph, tavily]
+image: https://miro.medium.com/v2/resize:fit:1400/1*NgVIlZVSePqrK5EkB1wu4Q.png
+---
+
+
+Over the past few years, we've seen an explosion of new AI tools designed to disrupt research. Some, like [ChatPDF](https://www.chatpdf.com/) and [Consensus](https://consensus.app), focus on extracting insights from documents. Others, such as [Perplexity](https://www.perplexity.ai/), excel at scouring the web for information. But here's the thing: none of these tools combine both web and local document search within a single contextual research pipeline.
+
+This is why I'm excited to introduce the latest advancements of **[GPT Researcher](https://gptr.dev)** — now able to conduct hybrid research on any given task and documents.
+
+Web driven research often lacks specific context, risks information overload, and may include outdated or unreliable data. On the flip side, local driven research is limited to historical data and existing knowledge, potentially creating organizational echo chambers and missing out on crucial market trends or competitor moves. Both approaches, when used in isolation, can lead to incomplete or biased insights, hampering your ability to make fully informed decisions.
+
+Today, we're going to change the game. By the end of this guide, you'll learn how to conduct hybrid research that combines the best of both worlds — web and local — enabling you to conduct more thorough, relevant, and insightful research.
+
+## Why Hybrid Research Works Better
+
+By combining web and local sources, hybrid research addresses these limitations and offers several key advantages:
+
+1. **Grounded context**: Local documents provide a foundation of verified, organization specific information. This grounds the research in established knowledge, reducing the risk of straying from core concepts or misinterpreting industry specific terminology.
+
+ *Example*: A pharmaceutical company researching a new drug development opportunity can use its internal research papers and clinical trial data as a base, then supplement this with the latest published studies and regulatory updates from the web.
+
+2. **Enhanced accuracy**: Web sources offer up-to-date information, while local documents provide historical context. This combination allows for more accurate trend analysis and decision-making.
+
+ *Example*: A financial services firm analyzing market trends can combine their historical trading data with real-time market news and social media sentiment analysis to make more informed investment decisions.
+
+3. **Reduced bias**: By drawing from both web and local sources, we mitigate the risk of bias that might be present in either source alone.
+
+ *Example*: A tech company evaluating its product roadmap can balance internal feature requests and usage data with external customer reviews and competitor analysis, ensuring a well-rounded perspective.
+
+4. **Improved planning and reasoning**: LLMs can leverage the context from local documents to better plan their web research strategies and reason about the information they find online.
+
+ *Example*: An AI-powered market research tool can use a company's past campaign data to guide its web search for current marketing trends, resulting in more relevant and actionable insights.
+
+5. **Customized insights**: Hybrid research allows for the integration of proprietary information with public data, leading to unique, organization-specific insights.
+
+ *Example*: A retail chain can combine its sales data with web-scraped competitor pricing and economic indicators to optimize its pricing strategy in different regions.
+
+These are just a few examples for business use cases that can leverage hybrid research, but enough with the small talk — let's build!
+
+## Building the Hybrid Research Assistant
+
+Before we dive into the details, it's worth noting that GPT Researcher has the capability to conduct hybrid research out of the box! However, to truly appreciate how this works and to give you a deeper understanding of the process, we're going to take a look under the hood.
+
+
+
+GPT Researcher conducts web research based on an auto-generated plan from local documents, as seen in the architecture above. It then retrieves relevant information from both local and web data for the final research report.
+
+We'll explore how local documents are processed using LangChain, which is a key component of GPT Researcher's document handling. Then, we'll show you how to leverage GPT Researcher to conduct hybrid research, combining the advantages of web search with your local document knowledge base.
+
+### Processing Local Documents with Langchain
+
+LangChain provides a variety of document loaders that allow us to process different file types. This flexibility is crucial when dealing with diverse local documents. Here's how to set it up:
+
+```python
+from langchain_community.document_loaders import (
+ PyMuPDFLoader,
+ TextLoader,
+ UnstructuredCSVLoader,
+ UnstructuredExcelLoader,
+ UnstructuredMarkdownLoader,
+ UnstructuredPowerPointLoader,
+ UnstructuredWordDocumentLoader
+)
+from langchain.text_splitter import RecursiveCharacterTextSplitter
+from langchain.embeddings import OpenAIEmbeddings
+from langchain.vectorstores import Chroma
+
+def load_local_documents(file_paths):
+ documents = []
+ for file_path in file_paths:
+ if file_path.endswith('.pdf'):
+ loader = PyMuPDFLoader(file_path)
+ elif file_path.endswith('.txt'):
+ loader = TextLoader(file_path)
+ elif file_path.endswith('.csv'):
+ loader = UnstructuredCSVLoader(file_path)
+ elif file_path.endswith('.xlsx'):
+ loader = UnstructuredExcelLoader(file_path)
+ elif file_path.endswith('.md'):
+ loader = UnstructuredMarkdownLoader(file_path)
+ elif file_path.endswith('.pptx'):
+ loader = UnstructuredPowerPointLoader(file_path)
+ elif file_path.endswith('.docx'):
+ loader = UnstructuredWordDocumentLoader(file_path)
+ else:
+ raise ValueError(f"Unsupported file type: {file_path}")
+
+ documents.extend(loader.load())
+
+ return documents
+
+# Use the function to load your local documents
+local_docs = load_local_documents(['company_report.pdf', 'meeting_notes.docx', 'data.csv'])
+
+# Split the documents into smaller chunks for more efficient processing
+text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
+splits = text_splitter.split_documents(local_docs)
+
+# Create embeddings and store them in a vector database for quick retrieval
+embeddings = OpenAIEmbeddings()
+vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
+
+# Example of how to perform a similarity search
+query = "What were the key points from our last strategy meeting?"
+relevant_docs = vectorstore.similarity_search(query, k=3)
+
+for doc in relevant_docs:
+ print(doc.page_content)
+```
+
+### Conducting Web Research with GPT Researcher
+
+Now that we've learned how to work with local documents, let's take a quick look at how GPT Researcher works under the hood:
+
+
+
+As seen above, GPT Researcher creates a research plan based on the given task by generating potential research queries that can collectively provide an objective and broad overview of the topic. Once these queries are generated, GPT Researcher uses a search engine like Tavily to find relevant results. Each scraped result is then saved in a vector database. Finally, the top k chunks most related to the research task are retrieved to generate a final research report.
+
+GPT Researcher supports hybrid research, which involves an additional step of chunking local documents (implemented using Langchain) before retrieving the most related information. After numerous evaluations conducted by the community, we've found that hybrid research improved the correctness of final results by over 40%!
+
+### Running the Hybrid Research with GPT Researcher
+
+Now that you have a better understanding of how hybrid research works, let's demonstrate how easy this can be achieved with GPT Researcher.
+
+#### Step 1: Install GPT Researcher with PIP
+
+```bash
+pip install gpt-researcher
+```
+
+#### Step 2: Setting up the environment
+
+We will run GPT Researcher with OpenAI as the LLM vendor and Tavily as the search engine. You'll need to obtain API keys for both before moving forward. Then, export the environment variables in your CLI as follows:
+
+```bash
+export OPENAI_API_KEY={your-openai-key}
+export TAVILY_API_KEY={your-tavily-key}
+```
+
+#### Step 3: Initialize GPT Researcher with hybrid research configuration
+
+GPT Researcher can be easily initialized with params that signal it to run a hybrid research. You can conduct many forms of research, head to the documentation page to learn more.
+
+To get GPT Researcher to run a hybrid research, you need to include all relevant files in my-docs directory (create it if it doesn't exist), and set the instance report_source to "hybrid" as seen below. Once the report source is set to hybrid, GPT Researcher will look for existing documents in the my-docs directory and include them in the research. If no documents exist, it will ignore it.
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_research_report(query: str, report_type: str, report_source: str) -> str:
+ researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source)
+ research = await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "How does our product roadmap compare to emerging market trends in our industry?"
+ report_source = "hybrid"
+
+ report = asyncio.run(get_research_report(query=query, report_type="research_report", report_source=report_source))
+ print(report)
+```
+
+As seen above, we can run the research on the following example:
+
+- Research task: "How does our product roadmap compare to emerging market trends in our industry?"
+- Web: Current market trends, competitor announcements, and industry forecasts
+- Local: Internal product roadmap documents and feature prioritization lists
+
+After various community evaluations we've found that the results of this research improve quality and correctness of research by over 40% and remove hallucinations by 50%. Moreover as stated above, local information helps the LLM improve planning reasoning allowing it to make better decisions and researching more relevant web sources.
+
+But wait, there's more! GPT Researcher also includes a sleek front-end app using NextJS and Tailwind. To learn how to get it running check out the documentation page. You can easily use drag and drop for documents to run hybrid research.
+
+## Conclusion
+
+Hybrid research represents a significant advancement in data gathering and decision making. By leveraging tools like [GPT Researcher](https://gptr.dev), teams can now conduct more comprehensive, context-aware, and actionable research. This approach addresses the limitations of using web or local sources in isolation, offering benefits such as grounded context, enhanced accuracy, reduced bias, improved planning and reasoning, and customized insights.
+
+The automation of hybrid research can enable teams to make faster, more data-driven decisions, ultimately enhancing productivity and offering a competitive advantage in analyzing an expanding pool of unstructured and dynamic information.
\ No newline at end of file
diff --git a/docs/blog/authors.yml b/docs/blog/authors.yml
new file mode 100644
index 0000000000000000000000000000000000000000..846ba2f00962f641fd09c6e389c8a7ee684888f7
--- /dev/null
+++ b/docs/blog/authors.yml
@@ -0,0 +1,5 @@
+assafe:
+ name: Assaf Elovic
+ title: Creator @ GPT Researcher and Tavily
+ url: https://github.com/assafelovic
+ image_url: https://lh3.googleusercontent.com/a/ACg8ocJtrLku69VG_2Y0sJa5mt66gIGNaEBX5r_mgE6CRPEb7A=s96-c
diff --git a/docs/docs/contribute.md b/docs/docs/contribute.md
new file mode 100644
index 0000000000000000000000000000000000000000..fa9c9f14049ce646b76ad750a0c3e3196ff1b5f0
--- /dev/null
+++ b/docs/docs/contribute.md
@@ -0,0 +1,5 @@
+# Contribute
+
+We highly welcome contributions! Please check out [contributing](https://github.com/assafelovic/gpt-researcher/blob/master/CONTRIBUTING.md) if you're interested.
+
+Please check out our [roadmap](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap) page and reach out to us via our [Discord community](https://discord.gg/QgZXvJAccX) if you're interested in joining our mission.
\ No newline at end of file
diff --git a/docs/docs/examples/detailed_report.md b/docs/docs/examples/detailed_report.md
new file mode 100644
index 0000000000000000000000000000000000000000..e4e86eced0337c5c00f86c9112a1f236dc3d5d73
--- /dev/null
+++ b/docs/docs/examples/detailed_report.md
@@ -0,0 +1,82 @@
+# Detailed Report
+
+## Overview
+
+The `DetailedReport` class inspired by the recent STORM paper, is a powerful component of GPT Researcher, designed to generate comprehensive reports on complex topics. It's particularly useful for creating long-form content that exceeds the typical limits of LLM outputs. This class orchestrates the research process, breaking down the main query into subtopics, conducting in-depth research on each, and combining the results into a cohesive, detailed report.
+
+Located in `backend/report_types/detailed_report.py` in the [GPT Researcher GitHub repository](https://github.com/assafelovic/gpt-researcher), this class leverages the capabilities of the `GPTResearcher` agent to perform targeted research and generate content.
+
+## Key Features
+
+- Breaks down complex topics into manageable subtopics
+- Conducts in-depth research on each subtopic
+- Generates a comprehensive report with introduction, table of contents, and body
+- Avoids redundancy by tracking previously written content
+- Supports asynchronous operations for improved performance
+
+## Class Structure
+
+### Initialization
+
+The `DetailedReport` class is initialized with the following parameters:
+
+- `query`: The main research query
+- `report_type`: Type of the report
+- `report_source`: Source of the report
+- `source_urls`: Initial list of source URLs
+- `config_path`: Path to the configuration file
+- `tone`: Tone of the report (using the `Tone` enum)
+- `websocket`: WebSocket for real-time communication
+- `subtopics`: Optional list of predefined subtopics
+- `headers`: Optional headers for HTTP requests
+
+## How It Works
+
+1. The `DetailedReport` class starts by conducting initial research on the main query.
+2. It then breaks down the topic into subtopics.
+3. For each subtopic, it:
+ - Conducts focused research
+ - Generates draft section titles
+ - Retrieves relevant previously written content to avoid redundancy
+ - Writes a report section
+4. Finally, it combines all subtopic reports, adds a table of contents, and includes source references to create the final detailed report.
+
+## Usage Example
+
+Here's how you can use the `DetailedReport` class in your project:
+
+```python
+import asyncio
+from fastapi import WebSocket
+from gpt_researcher.utils.enum import Tone
+from backend.report_type import DetailedReport
+
+async def generate_report(websocket: WebSocket):
+ detailed_report = DetailedReport(
+ query="The impact of artificial intelligence on modern healthcare",
+ report_type="research_report",
+ report_source="web_search",
+ source_urls=[], # You can provide initial source URLs if available
+ config_path="path/to/config.yaml",
+ tone=Tone.FORMAL,
+ websocket=websocket,
+ subtopics=[], # You can provide predefined subtopics if desired
+ headers={} # Add any necessary HTTP headers
+ )
+
+ final_report = await detailed_report.run()
+ return final_report
+
+# In your FastAPI app
+@app.websocket("/generate_report")
+async def websocket_endpoint(websocket: WebSocket):
+ await websocket.accept()
+ report = await generate_report(websocket)
+ await websocket.send_text(report)
+```
+
+This example demonstrates how to create a `DetailedReport` instance and run it to generate a comprehensive report on the impact of AI on healthcare.
+
+## Conclusion
+
+The `DetailedReport` class is a sophisticated tool for generating in-depth, well-structured reports on complex topics. By breaking down the main query into subtopics and leveraging the power of GPT Researcher, it can produce content that goes beyond the typical limitations of LLM outputs. This makes it an invaluable asset for researchers, content creators, and anyone needing detailed, well-researched information on a given topic.
\ No newline at end of file
diff --git a/docs/docs/examples/examples.ipynb b/docs/docs/examples/examples.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..fdcd33a48e2fba6b305ea42e8a7b6007b69bf907
--- /dev/null
+++ b/docs/docs/examples/examples.ipynb
@@ -0,0 +1,261 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "6ab73899",
+ "metadata": {},
+ "source": [
+ "# Tavily Samples"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "013eda36",
+ "metadata": {},
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8ad25551",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2023-11-08T15:57:13.339729Z",
+ "start_time": "2023-11-08T15:57:11.156595Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# install tavily\n",
+ "!pip install tavily-python"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "c0722950",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2023-11-08T16:01:01.318977Z",
+ "start_time": "2023-11-08T16:01:01.314688Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# import and connect\n",
+ "from tavily import TavilyClient\n",
+ "client = TavilyClient(api_key=\"\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "9328a188",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2023-11-08T16:02:25.587726Z",
+ "start_time": "2023-11-08T16:02:18.663961Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'query': 'What happend in the latest burning man floods?',\n",
+ " 'follow_up_questions': ['How severe were the floods at Burning Man?',\n",
+ " 'What were the impacts of the floods?',\n",
+ " 'How did the organizers handle the floods at Burning Man?'],\n",
+ " 'answer': None,\n",
+ " 'images': None,\n",
+ " 'results': [{'content': \"This year’s rains opened the floodgates for Burning Man criticism Give Newsletters Site search Vox main menu Filed under: The Burning Man flameout, explained Climate change — and schadenfreude\\xa0— finally caught up to the survivalist cosplayers. Share this story Share Has Burning Man finally lost its glamour? September 1, after most of the scheduled events and live performances were canceled due to the weather, Burning Man organizers closed routes in and out of the area, forcing attendees to stay behindShare Attendees look at a rainbow over flooding on a desert plain on September 1, 2023, after heavy rains turned the annual Burning Man festival site in Nevada's Black Rock desert into a mud...\",\n",
+ " 'url': 'https://www.vox.com/culture/2023/9/6/23861675/burning-man-2023-mud-stranded-climate-change-playa-foot',\n",
+ " 'score': 0.9797,\n",
+ " 'raw_content': None},\n",
+ " {'content': 'Tens of thousands of Burning Man festivalgoers are slowly making their way home from the Nevada desert after muddy conditions from heavy rains made it nearly impossible to leave over the weekend. according to burningman.org. Though the death at this year\\'s Burning Man is still being investigated, a social media hoax was blamed for spreading rumors that it\\'s due to a breakout of Ebola. \"Thank goodness this community knows how to take care of each other,\" the Instagram page for Burning Man Information Radio wrote on a post predicting more rain.News Burning Man attendees make mass exodus after being stranded in the mud at festival A caravan of festivalgoers were backed up as much as eight hours when they were finally allowed to leave...',\n",
+ " 'url': 'https://www.today.com/news/what-is-burning-man-flood-death-rcna103231',\n",
+ " 'score': 0.9691,\n",
+ " 'raw_content': None},\n",
+ " {'content': '“It was a perfect, typical Burning Man weather until Friday — then the rain started coming down hard,\" said Phillip Martin, 37. \"Then it turned into Mud Fest.\" After more than a half-inch (1.3 centimeters) of rain fell Friday, flooding turned the playa to foot-deep mud — closing roads and forcing burners to lean on each other for help. ABC News Video Live Shows Election 2024 538 Stream on No longer stranded, tens of thousands clean up and head home after Burning Man floods Mark Fromson, 54, who goes by the name “Stuffy” on the playa, had been staying in an RV, but the rains forced him to find shelter at another camp, where fellow burners provided him food and cover.RENO, Nev. -- The traffic jam leaving the Burning Man festival eased up considerably Tuesday as the exodus from the mud-caked Nevada desert entered another day following massive rain that left tens of thousands of partygoers stranded for days.',\n",
+ " 'url': 'https://abcnews.go.com/US/wireStory/wait-times-exit-burning-man-drop-after-flooding-102936473',\n",
+ " 'score': 0.9648,\n",
+ " 'raw_content': None},\n",
+ " {'content': 'Burning Man hit by heavy rains, now mud soaked.People there told to conserve food and water as they shelter in place.(Video: Josh Keppel) pic.twitter.com/DuBj0Ejtb8 More on this story Burning Man revelers begin exodus from festival after road reopens Officials investigate death at Burning Man as thousands stranded by floods Burning Man festival-goers trapped in desert as rain turns site to mud Tens of thousands of ‘burners’ urged to conserve food and water as rain and flash floods sweep Nevada Burning Man festivalgoers surrounded by mud in Nevada desert – video Burning Man attendees roadblocked by climate activists: ‘They have a privileged mindset’Last year, Burning Man drew approximately 80,000 people. This year, only about 60,000 were expected - with many citing the usual heat and dust and eight-hour traffic jams when they tried to leave.',\n",
+ " 'url': 'https://www.theguardian.com/culture/2023/sep/02/burning-man-festival-mud-trapped-shelter-in-place',\n",
+ " 'score': 0.9618,\n",
+ " 'raw_content': None},\n",
+ " {'content': 'Skip links Live Navigation menu Live Death at Burning Man investigated in US, thousands stranded by flooding Attendees trudged through mud, many barefoot or wearing plastic bags on their feet. The revellers were urged to shelter in place and conserve food, water and other supplies. Thousands of festivalgoers remain stranded as organisers close vehicular traffic to the festival site following storm flooding in Nevada’s desert. Authorities in Nevada are investigating a death at the site of the Burning Man festival, where thousands of attendees remained stranded after flooding from storms swept through the Nevada desert in3 Sep 2023. Authorities in Nevada are investigating a death at the site of the Burning Man festival, where thousands of attendees remained stranded after flooding from storms swept through the ...',\n",
+ " 'url': 'https://www.aljazeera.com/news/2023/9/3/death-under-investigation-after-storm-flooding-at-burning-man-festival',\n",
+ " 'score': 0.9612,\n",
+ " 'raw_content': None}],\n",
+ " 'response_time': 6.23}"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# simple query using tavily's advanced search\n",
+ "client.search(\"What happend in the latest burning man floods?\", search_depth=\"advanced\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e98ea835",
+ "metadata": {},
+ "source": [
+ "## Sample 1: Reseach Report using Tavily and GPT-4 with Langchain"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b7b05128",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# install lanchain\n",
+ "!pip install langchain"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "b2246f61",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2023-11-08T16:57:59.797466Z",
+ "start_time": "2023-11-08T16:57:59.793194Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# set up openai api key\n",
+ "openai_api_key = \"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "c574f1b8",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2023-11-08T16:59:03.572367Z",
+ "start_time": "2023-11-08T16:58:01.823114Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "# The Burning Man Festival 2023: A Festival Turned Mud Fest\n",
+ "\n",
+ "**Abstract:** The Burning Man Festival of 2023 in Nevada’s Black Rock desert will be remembered for a significant event: a heavy rainfall that turned the festival site into a muddy mess, testing the community spirit of the annual event attendees and stranding tens of thousands of festival-goers. \n",
+ "\n",
+ "**Keywords:** Burning Man Festival, flooding, rainfall, mud, community spirit, Nevada, Black Rock desert, stranded attendees, shelter\n",
+ "\n",
+ "---\n",
+ "## 1. Introduction\n",
+ "\n",
+ "The Burning Man Festival, an annual event known for its art installations, free spirit, and community ethos, faced an unprecedented challenge in 2023 due to heavy rains that flooded the festival site, turning it into a foot-deep mud pit[^1^][^2^]. The festival, held in Nevada's Black Rock desert, is known for its harsh weather conditions, including heat and dust, but this was the first time the event was affected to such an extent by rainfall[^4^].\n",
+ "\n",
+ "## 2. Impact of the Rain\n",
+ "\n",
+ "The heavy rains started on Friday, and more than a half-inch of rain fell, leading to flooding that turned the playa into a foot-deep mud pit[^2^]. The roads were closed due to the muddy conditions, stranding tens of thousands of festival-goers[^2^][^5^]. The burners, as the attendees are known, were forced to lean on each other for help[^2^].\n",
+ "\n",
+ "## 3. Community Spirit Tested\n",
+ "\n",
+ "The unexpected weather conditions put the Burning Man community spirit to the test[^1^]. Festival-goers found themselves sheltering in place, conserving food and water, and helping each other out[^3^]. For instance, Mark Fromson, who had been staying in an RV, was forced to find shelter at another camp due to the rains, where fellow burners provided him with food and cover[^2^].\n",
+ "\n",
+ "## 4. Exodus After Rain\n",
+ "\n",
+ "Despite the challenges, the festival-goers made the best of the situation. Once the rain stopped and things dried up a bit, the party quickly resumed[^3^]. A day later than scheduled, the massive wooden effigy known as the Man was set ablaze[^5^]. As the situation improved, thousands of Burning Man attendees began their mass exodus from the festival site[^5^].\n",
+ "\n",
+ "## 5. Conclusion\n",
+ "\n",
+ "The Burning Man Festival of 2023 will be remembered for the community spirit shown by the attendees in the face of heavy rainfall and flooding. Although the event was marred by the weather, the festival-goers managed to make the best of the situation, demonstrating the resilience and camaraderie that the Burning Man Festival is known for.\n",
+ "\n",
+ "---\n",
+ "**References**\n",
+ "\n",
+ "[^1^]: \"Attendees walk through a muddy desert plain...\" NPR. 2023. https://www.npr.org/2023/09/02/1197441202/burning-man-festival-rains-floods-stranded-nevada.\n",
+ "\n",
+ "[^2^]: “'It was a perfect, typical Burning Man weather until Friday...'\" ABC News. 2023. https://abcnews.go.com/US/wireStory/wait-times-exit-burning-man-drop-after-flooding-102936473.\n",
+ "\n",
+ "[^3^]: \"The latest on the Burning Man flooding...\" WUNC. 2023. https://www.wunc.org/2023-09-03/the-latest-on-the-burning-man-flooding.\n",
+ "\n",
+ "[^4^]: \"Burning Man hit by heavy rains, now mud soaked...\" The Guardian. 2023. https://www.theguardian.com/culture/2023/sep/02/burning-man-festival-mud-trapped-shelter-in-place.\n",
+ "\n",
+ "[^5^]: \"One day later than scheduled, the massive wooden effigy known as the Man was set ablaze...\" CNN. 2023. https://www.cnn.com/2023/09/05/us/burning-man-storms-shelter-exodus-tuesday/index.html.\n"
+ ]
+ }
+ ],
+ "source": [
+ "# libraries\n",
+ "from langchain.adapters.openai import convert_openai_messages\n",
+ "from langchain_community.chat_models import ChatOpenAI\n",
+ "\n",
+ "# setup query\n",
+ "query = \"What happend in the latest burning man floods?\"\n",
+ "\n",
+ "# run tavily search\n",
+ "content = client.search(query, search_depth=\"advanced\")[\"results\"]\n",
+ "\n",
+ "# setup prompt\n",
+ "prompt = [{\n",
+ " \"role\": \"system\",\n",
+ " \"content\": f'You are an AI critical thinker research assistant. '\\\n",
+ " f'Your sole purpose is to write well written, critically acclaimed,'\\\n",
+ " f'objective and structured reports on given text.'\n",
+ "}, {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": f'Information: \"\"\"{content}\"\"\"\\n\\n' \\\n",
+ " f'Using the above information, answer the following'\\\n",
+ " f'query: \"{query}\" in a detailed report --'\\\n",
+ " f'Please use MLA format and markdown syntax.'\n",
+ "}]\n",
+ "\n",
+ "# run gpt-4\n",
+ "lc_messages = convert_openai_messages(prompt)\n",
+ "report = ChatOpenAI(model='gpt-4',openai_api_key=openai_api_key).invoke(lc_messages).content\n",
+ "\n",
+ "# print report\n",
+ "print(report)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c679fbfe",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/examples/examples.md b/docs/docs/examples/examples.md
new file mode 100644
index 0000000000000000000000000000000000000000..dfca2e92ce930c0575c933bb37024ea02f49ccfb
--- /dev/null
+++ b/docs/docs/examples/examples.md
@@ -0,0 +1,31 @@
+# Simple Run
+
+### Run PIP Package
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+
+async def main():
+ """
+ This is a sample script that shows how to run a research report.
+ """
+ # Query
+ query = "What happened in the latest burning man floods?"
+
+ # Report Type
+ report_type = "research_report"
+
+ # Initialize the researcher
+ researcher = GPTResearcher(query=query, report_type=report_type, config_path=None)
+ # Conduct research on the given query
+ await researcher.conduct_research()
+ # Write the report
+ report = await researcher.write_report()
+
+ return report
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+```
\ No newline at end of file
diff --git a/docs/docs/examples/hybrid_research.md b/docs/docs/examples/hybrid_research.md
new file mode 100644
index 0000000000000000000000000000000000000000..82519762e0fbdcd1a860f8adc3e735df5af49acd
--- /dev/null
+++ b/docs/docs/examples/hybrid_research.md
@@ -0,0 +1,125 @@
+# Hybrid Research
+
+## Introduction
+
+GPT Researcher can combine web search capabilities with local document analysis to provide comprehensive, context-aware research results.
+
+This guide will walk you through the process of setting up and running hybrid research using GPT Researcher.
+
+## Prerequisites
+
+Before you begin, ensure you have the following:
+
+- Python 3.10 or higher installed on your system
+- pip (Python package installer)
+- An OpenAI API key (you can also choose other supported [LLMs](../gpt-researcher/llms/llms.md))
+- A Tavily API key (you can also choose other supported [Retrievers](../gpt-researcher/search-engines/retrievers.md))
+
+## Installation
+
+```bash
+pip install gpt-researcher
+```
+
+## Setting Up the Environment
+
+Export your API keys as environment variables:
+
+```bash
+export OPENAI_API_KEY=your_openai_api_key_here
+export TAVILY_API_KEY=your_tavily_api_key_here
+```
+
+Alternatively, you can set these in your Python script:
+
+```python
+import os
+os.environ['OPENAI_API_KEY'] = 'your_openai_api_key_here'
+os.environ['TAVILY_API_KEY'] = 'your_tavily_api_key_here'
+```
+Set the environment variable REPORT_SOURCE to an empty string "" in default.py
+## Preparing Documents
+
+### 1. Local Documents
+1. Create a directory named `my-docs` in your project folder.
+2. Place all relevant local documents (PDFs, TXTs, DOCXs, etc.) in this directory.
+
+### 2. Online Documents
+1. Here is an example of your online document URL example: https://xxxx.xxx.pdf (supports file formats like PDFs, TXTs, DOCXs, etc.)
+
+
+## Running Hybrid Research By "Local Documents"
+
+Here's a basic script to run hybrid research:
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_research_report(query: str, report_type: str, report_source: str) -> str:
+ researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source)
+ research = await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "How does our product roadmap compare to emerging market trends in our industry?"
+ report_source = "hybrid"
+
+ report = asyncio.run(get_research_report(query=query, report_type="research_report", report_source=report_source))
+ print(report)
+```
+
+## Running Hybrid Research By "Online Documents"
+
+Here's a basic script to run hybrid research:
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_research_report(query: str, report_type: str, report_source: str) -> str:
+ researcher = GPTResearcher(query=query, report_type=report_type, document_urls=document_urls, report_source=report_source)
+ research = await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "How does our product roadmap compare to emerging market trends in our industry?"
+ report_source = "hybrid"
+ document_urls = ["https://xxxx.xxx.pdf", "https://xxxx.xxx.doc"]
+
+ report = asyncio.run(get_research_report(query=query, report_type="research_report", document_urls=document_urls, report_source=report_source))
+ print(report)
+```
+
+To run the script:
+
+1. Save it as `run_research.py`
+2. Execute it with: `python run_research.py`
+
+## Understanding the Results
+
+The output will be a comprehensive research report that combines insights from both web sources and your local documents. The report typically includes an executive summary, key findings, detailed analysis, comparisons between your internal data and external trends, and recommendations based on the combined insights.
+
+## Troubleshooting
+
+1. **API Key Issues**: Ensure your API keys are correctly set and have the necessary permissions.
+2. **Document Loading Errors**: Check that your local documents are in supported formats and are not corrupted.
+3. **Memory Issues**: For large documents or extensive research, you may need to increase your system's available memory or adjust the `chunk_size` in the document processing step.
+
+## FAQ
+
+**Q: How long does a typical research session take?**
+A: The duration varies based on the complexity of the query and the amount of data to process. It can range from 1-5 minutes for very comprehensive research.
+
+**Q: Can I use GPT Researcher with other language models?**
+A: Currently, GPT Researcher is optimized for OpenAI's models. Support for other models can be found [here](../gpt-researcher/llms/llms.md).
+
+**Q: How does GPT Researcher handle conflicting information between local and web sources?**
+A: The system attempts to reconcile differences by providing context and noting discrepancies in the final report. It prioritizes more recent or authoritative sources when conflicts arise.
+
+**Q: Is my local data sent to external servers during the research process?**
+A: No, your local documents are processed on your machine. Only the generated queries and synthesized information (not raw data) are sent to external services for web research.
+
+For more information and updates, please visit the [GPT Researcher GitHub repository](https://github.com/assafelovic/gpt-researcher).
diff --git a/docs/docs/examples/pip-run.ipynb b/docs/docs/examples/pip-run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b4f6f9cadac38bb0e9db316679613b75412f260a
--- /dev/null
+++ b/docs/docs/examples/pip-run.ipynb
@@ -0,0 +1,85 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "id": "byPgKYhAE6gn"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.environ['OPENAI_API_KEY'] = 'your_openai_api_key'\n",
+ "os.environ['TAVILY_API_KEY'] = 'your_tavily_api_key' # Get a free key here: https://app.tavily.com"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!pip install -U gpt-researcher nest_asyncio"
+ ],
+ "metadata": {
+ "id": "-rXET3OZLxwH"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import nest_asyncio # required for notebooks\n",
+ "nest_asyncio.apply()\n",
+ "\n",
+ "from gpt_researcher import GPTResearcher\n",
+ "import asyncio\n",
+ "\n",
+ "async def get_report(query: str, report_type: str) -> str:\n",
+ " researcher = GPTResearcher(query, report_type)\n",
+ " research_result = await researcher.conduct_research()\n",
+ " report = await researcher.write_report()\n",
+ " \n",
+ " # Get additional information\n",
+ " research_context = researcher.get_research_context()\n",
+ " research_costs = researcher.get_costs()\n",
+ " research_images = researcher.get_research_images()\n",
+ " research_sources = researcher.get_research_sources()\n",
+ " \n",
+ " return report, research_context, research_costs, research_images, research_sources\n",
+ "\n",
+ "if __name__ == \"__main__\":\n",
+ " query = \"Should I invest in Nvidia?\"\n",
+ " report_type = \"research_report\"\n",
+ "\n",
+ " report, context, costs, images, sources = asyncio.run(get_report(query, report_type))\n",
+ " \n",
+ " print(\"Report:\")\n",
+ " print(report)\n",
+ " print(\"\\nResearch Costs:\")\n",
+ " print(costs)\n",
+ " print(\"\\nResearch Images:\")\n",
+ " print(images)\n",
+ " print(\"\\nResearch Sources:\")\n",
+ " print(sources)"
+ ],
+ "metadata": {
+ "id": "KWZe2InrL0ji"
+ },
+ "execution_count": null,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/docs/docs/examples/sample_report.py b/docs/docs/examples/sample_report.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bd23f8c58e6be9a8703fa431b5b5be796d79ca4
--- /dev/null
+++ b/docs/docs/examples/sample_report.py
@@ -0,0 +1,36 @@
+import nest_asyncio # required for notebooks
+
+nest_asyncio.apply()
+
+from gpt_researcher import GPTResearcher
+import asyncio
+
+
+async def get_report(query: str, report_type: str):
+ researcher = GPTResearcher(query, report_type)
+ research_result = await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ # Get additional information
+ research_context = researcher.get_research_context()
+ research_costs = researcher.get_costs()
+ research_images = researcher.get_research_images()
+ research_sources = researcher.get_research_sources()
+
+ return report, research_context, research_costs, research_images, research_sources
+
+
+if __name__ == "__main__":
+ query = "Should I invest in Nvidia?"
+ report_type = "research_report"
+
+ report, context, costs, images, sources = asyncio.run(get_report(query, report_type))
+
+ print("Report:")
+ print(report)
+ print("\nResearch Costs:")
+ print(costs)
+ print("\nResearch Images:")
+ print(images)
+ print("\nResearch Sources:")
+ print(sources)
\ No newline at end of file
diff --git a/docs/docs/examples/sample_sources_only.py b/docs/docs/examples/sample_sources_only.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfef394254f2b09ae1b45cc6e4a9e6a803840182
--- /dev/null
+++ b/docs/docs/examples/sample_sources_only.py
@@ -0,0 +1,20 @@
+from gpt_researcher import GPTResearcher
+import asyncio
+
+
+async def get_report(query: str, report_source: str, sources: list) -> str:
+ researcher = GPTResearcher(query=query, report_source=report_source, source_urls=sources)
+ research_context = await researcher.conduct_research()
+ return await researcher.write_report()
+
+if __name__ == "__main__":
+ query = "What are the biggest trends in AI lately?"
+ report_source = "static"
+ sources = [
+ "https://en.wikipedia.org/wiki/Artificial_intelligence",
+ "https://www.ibm.com/think/insights/artificial-intelligence-trends",
+ "https://www.forbes.com/advisor/business/ai-statistics"
+ ]
+
+ report = asyncio.run(get_report(query=query, report_source=report_source, sources=sources))
+ print(report)
diff --git a/docs/docs/faq.md b/docs/docs/faq.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab97090206504bb1c2ca3e5ee93118c266cdb7fb
--- /dev/null
+++ b/docs/docs/faq.md
@@ -0,0 +1,34 @@
+# FAQ
+
+### How do I get started?
+It really depends on what you're aiming for.
+
+If you're looking to connect your AI application to the internet with Tavily tailored API, check out the [Tavily API](https://docs.tavily.com/docs/tavily-api/introductionn) documentation.
+If you're looking to build and deploy our open source autonomous research agent GPT Researcher, please see [GPT Researcher](/docs/gpt-researcher/getting-started/introduction) documentation.
+You can also check out demos and examples for inspiration [here](/docs/examples/examples).
+
+### What is GPT Researcher?
+
+GPT Researcher is a popular open source autonomous research agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task.
+
+GPT Researcher is built with best practices for leveraging LLMs (prompt engineering, RAG, chains, embeddings, etc), and is optimized for quick and efficient research. It is also fully customizable and can be tailored to your specific needs.
+
+To learn more about GPT Researcher, check out the [documentation page](/docs/gpt-researcher/getting-started/introduction).
+
+### How much does each research run cost?
+
+A research task using GPT Researcher costs around $0.01 per a single run (for GPT-4 usage). We're constantly optimizing LLM calls to reduce costs and improve performance.
+
+### How do you ensure the report is factual and accurate?
+
+we do our best to ensure that the information we provide is factual and accurate. We do this by using multiple sources, and by using proprietary AI to score and rank the most relevant and accurate information. We also use proprietary AI to filter out irrelevant information and sources.
+
+Lastly, by using RAG and other techniques, we ensure that the information is relevant to the context of the research task, leading to more accurate generative AI content and reduced hallucinations.
+
+### What are your plans for the future?
+
+We're constantly working on improving our products and services. We're currently working on improving our search API together with design partners, and adding more data sources to our search engine. We're also working on improving our research agent GPT Researcher, and adding more features to it while growing our amazing open source community.
+
+If you're interested in our roadmap or looking to collaborate, check out our [roadmap page](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap).
+
+Feel free to [contact us](mailto:assafelovic@gmail.com) if you have any further questions or suggestions!
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/context/filtering-by-domain.md b/docs/docs/gpt-researcher/context/filtering-by-domain.md
new file mode 100644
index 0000000000000000000000000000000000000000..07fe00c2258139b5c1ff32675d4436ef3d2d0dbb
--- /dev/null
+++ b/docs/docs/gpt-researcher/context/filtering-by-domain.md
@@ -0,0 +1,24 @@
+# Filtering by Domain
+
+If you set Google as a Retriever, you can filter web results by site.
+
+For example, set in the query param you pass the GPTResearcher class instance: `query="site:linkedin.com a python web developer to implement my custom gpt-researcher flow"` will limit the results to linkedin.com
+
+> **Step 1** - Set these environment variables with a .env file in the root folder
+
+```bash
+TAVILY_API_KEY=
+LANGCHAIN_TRACING_V2=true
+LANGCHAIN_API_KEY=
+OPENAI_API_KEY=
+DOC_PATH=./my-docs
+RETRIEVER=google
+GOOGLE_API_KEY=
+GOOGLE_CX_KEY=
+```
+
+> **Step 2** - from the root project run:
+
+docker-compose up -- build
+
+> **Step 3** - from the frontend input box in localhost:3000, you can append any google search filter (such as filtering by domain names)
diff --git a/docs/docs/gpt-researcher/context/gptr-hybrid.png b/docs/docs/gpt-researcher/context/gptr-hybrid.png
new file mode 100644
index 0000000000000000000000000000000000000000..c3a3027ec7ae0d6e17b342e2322b1a832875154d
--- /dev/null
+++ b/docs/docs/gpt-researcher/context/gptr-hybrid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:684394fa4ee4a17caab40113029ebc4d7a3f0ebf240d4b5a50d768e7b947a678
+size 198033
diff --git a/docs/docs/gpt-researcher/context/local-docs.md b/docs/docs/gpt-researcher/context/local-docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..60cb3423d04adaa8b8bcd5d4a2da77fa92f341c7
--- /dev/null
+++ b/docs/docs/gpt-researcher/context/local-docs.md
@@ -0,0 +1,22 @@
+# Research on Local Documents
+
+## Just Local Docs
+
+You can instruct the GPT Researcher to run research tasks based on your local documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents.
+
+Step 1: Add the env variable `DOC_PATH` pointing to the folder where your documents are located.
+
+```bash
+export DOC_PATH="./my-docs"
+```
+
+Step 2:
+ - If you're running the frontend app on localhost:8000, simply select "My Documents" from the "Report Source" Dropdown Options.
+ - If you're running GPT Researcher with the [PIP package](https://docs.tavily.com/docs/gpt-researcher/gptr/pip-package), pass the `report_source` argument as "local" when you instantiate the `GPTResearcher` class [code sample here](https://docs.gptr.dev/docs/gpt-researcher/context/tailored-research).
+
+## Local Docs + Web (Hybrid)
+
+
+
+Check out the blog post on [Hybrid Research](https://docs.gptr.dev/blog/gptr-hybrid) to learn more about how to combine local documents with web research.
+```
diff --git a/docs/docs/gpt-researcher/context/tailored-research.md b/docs/docs/gpt-researcher/context/tailored-research.md
new file mode 100644
index 0000000000000000000000000000000000000000..4efbff027d3a4cf97afb070216d8cb726415e4bb
--- /dev/null
+++ b/docs/docs/gpt-researcher/context/tailored-research.md
@@ -0,0 +1,147 @@
+# Tailored Research
+
+The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources (URLs) or local documents, and even specify the agent prompt instruction upon which the research is conducted.
+
+### Research on Specific Sources 📚
+
+You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. The GPT Researcher will then conduct research on the provided sources via `source_urls`.
+
+If you want GPT Researcher to perform additional research outside of the URLs you provided, i.e., conduct research on various other websites that it finds suitable for the query/sub-query, you can set the parameter `complement_source_urls` as `True`. Default value of `False` will only scour the websites you provide via `source_urls`.
+
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_report(query: str, report_type: str, sources: list) -> str:
+ researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources, complement_source_urls=False)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "What are the biggest trends in AI lately?"
+ report_source = "static"
+ sources = [
+ "https://en.wikipedia.org/wiki/Artificial_intelligence",
+ "https://www.ibm.com/think/insights/artificial-intelligence-trends",
+ "https://www.forbes.com/advisor/business/ai-statistics"
+ ]
+ report = asyncio.run(get_report(query=query, report_source=report_source, sources=sources))
+ print(report)
+```
+
+### Specify Agent Prompt 📝
+
+You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout.
+Simply pass the prompt as the `query` argument to the `GPTResearcher` class and the "custom_report" `report_type`.
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_report(prompt: str, report_type: str) -> str:
+ researcher = GPTResearcher(query=prompt, report_type=report_type)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ report_type = "custom_report"
+ prompt = "Research the latest advancements in AI and provide a detailed report in APA format including sources."
+
+ report = asyncio.run(get_report(prompt=prompt, report_type=report_type))
+ print(report)
+```
+
+### Research on Local Documents 📄
+You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents.
+
+*Step 1*: Add the env variable `DOC_PATH` pointing to the folder where your documents are located.
+
+For example:
+
+```bash
+export DOC_PATH="./my-docs"
+```
+
+*Step 2*: When you create an instance of the `GPTResearcher` class, pass the `report_source` argument as `"local"`.
+
+GPT Researcher will then conduct research on the provided documents.
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_report(query: str, report_source: str) -> str:
+ researcher = GPTResearcher(query=query, report_source=report_source)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "What can you tell me about myself based on my documents?"
+ report_source = "local" # "local" or "web"
+
+ report = asyncio.run(get_report(query=query, report_source=report_source))
+ print(report)
+```
+
+### Hybrid Research 🔄
+You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents.
+Simply provide the sources and set the `report_source` argument as `"hybrid"` and watch the magic happen.
+
+Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work.
+To learn more about retrievers check out the [Retrievers](https://docs.gptr.dev/docs/gpt-researcher/search-engines/retrievers) documentation.
+
+
+### Research on LangChain Documents 🦜️🔗
+You can instruct the GPT Researcher to research on a list of langchain document instances.
+
+For example:
+
+```python
+from langchain_core.documents import Document
+from typing import List, Dict
+from gpt_researcher import GPTResearcher
+from langchain_postgres.vectorstores import PGVector
+from langchain_openai import OpenAIEmbeddings
+from sqlalchemy import create_engine
+import asyncio
+
+
+
+CONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'
+
+def get_retriever(collection_name: str, search_kwargs: Dict[str, str]):
+ engine = create_engine(CONNECTION_STRING)
+ embeddings = OpenAIEmbeddings()
+
+ index = PGVector.from_existing_index(
+ use_jsonb=True,
+ embedding=embeddings,
+ collection_name=collection_name,
+ connection=engine,
+ )
+
+ return index.as_retriever(search_kwargs=search_kwargs)
+
+
+async def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str:
+ researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+if __name__ == "__main__":
+ query = "What can you tell me about blue cheese based on my documents?"
+ report_type = "research_report"
+ report_source = "langchain_documents"
+
+ # using a LangChain retriever to get all the documents regarding cheese
+ # https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke
+ langchain_retriever = get_retriever("cheese_collection", { "k": 3 })
+ documents = langchain_retriever.invoke("All the documents about cheese")
+ report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents))
+ print(report)
+```
diff --git a/docs/docs/gpt-researcher/context/vector-stores.md b/docs/docs/gpt-researcher/context/vector-stores.md
new file mode 100644
index 0000000000000000000000000000000000000000..3e4725f06d38ec2474a43e6ecd9bb1813f551cda
--- /dev/null
+++ b/docs/docs/gpt-researcher/context/vector-stores.md
@@ -0,0 +1,155 @@
+# Vector Stores
+
+The GPT Researcher package allows you to integrate with existing langchain vector stores that have been populated.
+For a complete list of supported langchain vector stores, please refer to this [link](https://python.langchain.com/v0.2/docs/integrations/vectorstores/).
+
+You can create a set of embeddings and langchain documents and store them in any supported vector store of your choosing.
+GPT-Researcher will work with any langchain vector store that implements the `asimilarity_search` method.
+
+**If you want to use the existing knowledge in your vector store, make sure to set `report_source="langchain_vectorstore"`. Any other settings will add additional information from scraped data and might contaminate your vectordb (See _How to add scraped data to your vector store_ for more context)**
+
+## Faiss
+```python
+from gpt_researcher import GPTResearcher
+
+from langchain.text_splitter import CharacterTextSplitter
+from langchain_openai import OpenAIEmbeddings
+from langchain_community.vectorstores import FAISS
+from langchain_core.documents import Document
+
+# exerpt taken from - https://paulgraham.com/wealth.html
+essay = """
+May 2004
+
+(This essay was originally published in Hackers & Painters.)
+
+If you wanted to get rich, how would you do it? I think your best bet would be to start or join a startup.
+That's been a reliable way to get rich for hundreds of years. The word "startup" dates from the 1960s,
+but what happens in one is very similar to the venture-backed trading voyages of the Middle Ages.
+
+Startups usually involve technology, so much so that the phrase "high-tech startup" is almost redundant.
+A startup is a small company that takes on a hard technical problem.
+
+Lots of people get rich knowing nothing more than that. You don't have to know physics to be a good pitcher.
+But I think it could give you an edge to understand the underlying principles. Why do startups have to be small?
+Will a startup inevitably stop being a startup as it grows larger?
+And why do they so often work on developing new technology? Why are there so many startups selling new drugs or computer software,
+and none selling corn oil or laundry detergent?
+
+
+The Proposition
+
+Economically, you can think of a startup as a way to compress your whole working life into a few years.
+Instead of working at a low intensity for forty years, you work as hard as you possibly can for four.
+This pays especially well in technology, where you earn a premium for working fast.
+
+Here is a brief sketch of the economic proposition. If you're a good hacker in your mid twenties,
+you can get a job paying about $80,000 per year. So on average such a hacker must be able to do at
+least $80,000 worth of work per year for the company just to break even. You could probably work twice
+as many hours as a corporate employee, and if you focus you can probably get three times as much done in an hour.[1]
+You should get another multiple of two, at least, by eliminating the drag of the pointy-haired middle manager who
+would be your boss in a big company. Then there is one more multiple: how much smarter are you than your job
+description expects you to be? Suppose another multiple of three. Combine all these multipliers,
+and I'm claiming you could be 36 times more productive than you're expected to be in a random corporate job.[2]
+If a fairly good hacker is worth $80,000 a year at a big company, then a smart hacker working very hard without
+any corporate bullshit to slow him down should be able to do work worth about $3 million a year.
+...
+...
+...
+"""
+
+document = [Document(page_content=essay)]
+text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=30, separator="\n")
+docs = text_splitter.split_documents(documents=document)
+
+vector_store = FAISS.from_documents(documents, OpenAIEmbeddings())
+
+query = """
+ Summarize the essay into 3 or 4 succinct sections.
+ Make sure to include key points regarding wealth creation.
+
+ Include some recommendations for entrepreneurs in the conclusion.
+"""
+
+
+# Create an instance of GPTResearcher
+researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ report_source="langchain_vectorstore",
+ vector_store=vector_store,
+)
+
+# Conduct research and write the report
+await researcher.conduct_research()
+report = await researcher.write_report()
+```
+
+
+## PGVector
+```python
+from gpt_researcher import GPTResearcher
+from langchain_postgres.vectorstores import PGVector
+from langchain_openai import OpenAIEmbeddings
+
+CONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'
+
+
+# assuming the vector store exists and contains the relevent documents
+# also assuming embeddings have been or will be generated
+vector_store = PGVector.from_existing_index(
+ use_jsonb=True,
+ embedding=OpenAIEmbeddings(),
+ collection_name='some collection name',
+ connection=CONNECTION_STRING,
+ async_mode=True,
+)
+
+query = """
+ Create a short report about apples.
+ Include a section about which apples are considered best
+ during each season.
+"""
+
+# Create an instance of GPTResearcher
+researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ report_source="langchain_vectorstore",
+ vector_store=vector_store,
+)
+
+# Conduct research and write the report
+await researcher.conduct_research()
+report = await researcher.write_report()
+```
+## Adding Scraped Data to your vector store
+
+In some cases in which you want to store the scraped data and documents into your own vector store for future usages, GPT-Researcher also allows you to do so seamlessly just by inputting your vector store (make sure to set `report_source` value to something other than `langchain_vectorstore`)
+
+```python
+from gpt_researcher import GPTResearcher
+
+from langchain_community.vectorstores import InMemoryVectorStore
+from langchain_openai import OpenAIEmbeddings
+
+vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+
+query = "The best LLM"
+
+# Create an instance of GPTResearcher
+researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ report_source="web",
+ vector_store=vector_store,
+)
+
+# Conduct research, the context will be chunked and stored in the vector_store
+await researcher.conduct_research()
+
+# Query the 5 most relevant context in our vector store
+related_contexts = await vector_store.asimilarity_search("GPT-4", k = 5)
+print(related_contexts)
+print(len(related_contexts)) #Should be 5
+```
diff --git a/docs/docs/gpt-researcher/frontend/frontend.md b/docs/docs/gpt-researcher/frontend/frontend.md
new file mode 100644
index 0000000000000000000000000000000000000000..afbd79b294674cb7d259eab3001fef3e4bded4d5
--- /dev/null
+++ b/docs/docs/gpt-researcher/frontend/frontend.md
@@ -0,0 +1,124 @@
+# Frontend Application
+
+This frontend project aims to enhance the user experience of GPT Researcher, providing an intuitive and efficient interface for automated research. It offers two deployment options to suit different needs and environments.
+
+View a Product Tutorial here: [GPT-Researcher Frontend Tutorial](https://www.youtube.com/watch?v=hIZqA6lPusk)
+
+
+## NextJS Frontend App
+
+The React app (located in the `frontend` directory) is our Frontend 2.0 which we hope will enable us to display the robustness of the backend on the frontend, as well.
+
+It comes with loads of added features, such as:
+ - a drag-n-drop user interface for uploading and deleting files to be used as local documents by GPTResearcher.
+ - a GUI for setting your GPTR environment variables.
+ - the ability to trigger the multi_agents flow via the Backend Module or Langgraph Cloud Host (currently in closed beta).
+ - stability fixes
+ - and more coming soon!
+
+### Run the NextJS React App with Docker
+
+> **Step 1** - [Install Docker](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started-with-docker)
+
+> **Step 2** - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env'
+
+> **Step 3** - Within the docker-compose file comment out services that you don't want to run with Docker.
+
+```bash
+docker compose up --build
+```
+
+If that doesn't work, try running it without the dash:
+```bash
+docker compose up --build
+```
+
+> **Step 4** - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes:
+ - the Python server running on localhost:8000
+ - the React app running on localhost:3000
+
+Visit localhost:3000 on any browser and enjoy researching!
+
+
+## Other Options: 1: Static Frontend (FastAPI)
+
+A lightweight solution using FastAPI to serve static files.
+
+#### Prerequisites
+- Python 3.11+
+- pip
+
+#### Setup and Running
+
+1. Install required packages:
+ ```
+ pip install -r requirements.txt
+ ```
+
+2. Start the server:
+ ```
+ python -m uvicorn main:app
+ ```
+
+3. Access at `http://localhost:8000`
+
+#### Demo
+
+
+
+## Yet Another Option: Running NextJS Frontend via CLI
+
+A more robust solution with enhanced features and performance.
+
+#### Prerequisites
+- Node.js (v18.17.0 recommended)
+- npm
+
+#### Setup and Running
+
+1. Navigate to NextJS directory:
+ ```
+ cd nextjs
+ ```
+
+2. Set up Node.js:
+ ```
+ nvm install 18.17.0
+ nvm use v18.17.0
+ ```
+
+3. Install dependencies:
+ ```
+ npm install --legacy-peer-deps
+ ```
+
+4. Start development server:
+ ```
+ npm run dev
+ ```
+
+5. Access at `http://localhost:3000`
+
+Note: Requires backend server on `localhost:8000` as detailed in option 1.
+
+#### Demo
+
+
+## Choosing an Option
+
+- Static Frontend: Quick setup, lightweight deployment.
+- NextJS Frontend: Feature-rich, scalable, better performance and SEO.
+
+For production, NextJS is recommended.
+
+## Frontend Features
+
+Our frontend enhances GPT-Researcher by providing:
+
+1. Intuitive Research Interface: Streamlined input for research queries.
+2. Real-time Progress Tracking: Visual feedback on ongoing research tasks.
+3. Interactive Results Display: Easy-to-navigate presentation of findings.
+4. Customizable Settings: Adjust research parameters to suit specific needs.
+5. Responsive Design: Optimal experience across various devices.
+
+These features aim to make the research process more efficient and user-friendly, complementing GPT-Researcher's powerful agent capabilities.
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/frontend/logs.md b/docs/docs/gpt-researcher/frontend/logs.md
new file mode 100644
index 0000000000000000000000000000000000000000..146ba07861bbb6758154d10d883e98046e000e08
--- /dev/null
+++ b/docs/docs/gpt-researcher/frontend/logs.md
@@ -0,0 +1,170 @@
+# Log Files
+
+This document explains how to interpret the log files generated for each report. These logs provide a detailed record of the research process, from initial task planning to the gathering of information, and finally, the report writing process. Reports may change over time as new features are developed.
+
+## Log File Overview
+
+The log file is a JSON file that contains a list of events that happened during the research process. Each event is an object with a timestamp, type, and data. The data contains the specific information about the event.
+
+You can find the log file in the `outputs` folder.
+
+Or you can access the log file from the report page itself by clicking the "Download Logs" button.
+
+For developers, there is an additional `logs` folder that may be useful. See description below for more details.
+
+## Key Components:
+
+* `timestamp`: The timestamp is in the format `YYYY-MM-DDTHH:MM:SS.ffffff` which is an ISO format. The main timestamp is for the generation of the file itself. The timestamps for the events are when each specific event happened during the research process.
+* `events`: This is an array containing all the logged events during the research task. Each event object has the following structure.
+* `timestamp`: The specific time when the event occurred, allowing you to follow the sequence of actions.
+* `type`: This will always be "event" for now.
+* `data`: Contains specific information about the event. Includes:
+* `type`: This indicates the general kind of event (e.g., "logs").
+* `content`: A descriptor of what the tool is doing (e.g., "starting\_research", "running\_subquery\_research", "scraping\_content").
+* `output`: A more detailed message, which often includes visual indicators (emojis), that is sent to the user when the tool performs the task
+* `metadata`: Additional data related to the event. This can be `null` or contain an array of relevant information like URLs.
+
+## Types of Events & Their Significance
+Here's a complete breakdown of all the unique `content` types and what they mean. This is a comprehensive list of all the different actions the research tool will perform.
+1. **`starting_research`**:
+* Indicates that the research process has begun for a given task.
+* `output`: Includes the text of the research query.
+2. **`agent_generated`**:
+* This is an indicator of what the agent is used for this task
+* `output`: Will show the name of the agent
+3. **`planning_research`**:
+* Shows the tool is initially browsing to understand the scope of the request and start planning.
+* The `output` indicates the tool is either browsing or doing initial planning.
+4. **`subqueries`**:
+* Indicates that the tool has created subqueries that it will use for research
+* `output`: Lists out all of the subqueries that the tool will be running to perform the research
+* `metadata`: An array of strings that contain the subqueries to be run
+5. **`running_subquery_research`**:
+* Indicates that a specific subquery research is being performed.
+* `output`: Shows the specific subquery being run.
+6. **`added_source_url`**:
+* Signifies a URL that was identified as a relevant source of information.
+* `output`: Provides the URL with a checkmark emoji to indicate success.
+* `metadata`: Contains the actual URL added.
+7. **`researching`**:
+* Indicates the tool is actively searching across multiple sources for information.
+* `output`: A general message indicating research across multiple sources is happening.
+8. **`scraping_urls`**:
+* Shows the tool is beginning to scrape content from a group of URLs.
+* `output`: Indicates how many URLs the tool will be scraping from.
+9. **`scraping_content`**:
+* Indicates the tool successfully scraped the content from the URLs.
+* `output`: Shows the number of pages that have been successfully scraped.
+10. **`scraping_images`**:
+* Signifies that images were identified and selected during the scraping process.
+* `output`: Shows the number of new images selected and the total images found
+* `metadata`: An array containing URLs of the selected images.
+11. **`scraping_complete`**:
+* Indicates that the scraping process is complete for the URLs.
+* `output`: A message stating that the scraping process is complete
+12. **`fetching_query_content`**:
+* Indicates that the tool is fetching content based on a specific query.
+* `output`: The specific query for which content is being fetched
+13. **`subquery_context_window`**:
+* Indicates the tool is creating a context window for a given subquery to help with more detailed research.
+* `output`: A message stating the context window for the subquery is created.
+14. **`research_step_finalized`**:
+* Indicates that the research portion of a step is finalized.
+* `output`: A message stating that the research is complete.
+15. **`generating_subtopics`**:
+* Signifies that the tool is generating subtopics to guide the report.
+* `output`: A message indicating that the tool is generating subtopics.
+16. **`subtopics_generated`**:
+* Indicates that subtopics have been generated.
+* `output`: A message that subtopics have been generated.
+17. **`writing_introduction`**:
+* Indicates the tool is beginning to write the introduction to the report.
+* `output`: A message to the user that the introduction writing has started.
+18. **`introduction_written`**:
+* Indicates the introduction to the report is finished
+* `output`: A message to the user that the introduction writing is complete
+19. **`generating_draft_sections`**:
+* Shows that the tool is generating draft sections for the report.
+* `output`: A message that the report is generating draft sections.
+20. **`draft_sections_generated`**:
+* Indicates the draft sections of the report are generated.
+* `output`: A message to the user that the draft sections have been generated.
+21. **`fetching_relevant_written_content`**:
+* Indicates the tool is fetching relevant written content for the report.
+* `output`: A message to the user that relevant content is being fetched
+22. **`writing_report`**:
+* Indicates that the tool is starting to compile the research into a report.
+* `output`: A message to the user that the report generation has started.
+23. **`report_written`**:
+* Signifies that the report generation is complete.
+* `output`: A message that the report generation is finished.
+24. **`relevant_contents_context`**:
+* Indicates that a context window for relevant content has been created.
+* `output`: A message indicating a context window for relevant content has been created.
+25. **`writing_conclusion`**:
+* Indicates the tool has started writing the conclusion for the report
+* `output`: A message to the user that the conclusion is being written
+26. **`conclusion_written`**:
+* Indicates the conclusion of the report has been written
+* `output`: A message to the user that the conclusion has been written
+
+## How to Use the Logs
+
+* **Troubleshooting:** If the research results are unexpected, the log files can help you understand the exact steps the tool took, including the queries used, the sources it visited, and how the report was generated.
+* **Transparency:** The logs provide transparency into the research process. You can see exactly which URLs were visited, which images were selected, and how the report was built.
+* **Understanding the Process**: The logs will provide an overview of what the tool does and what each of the steps look like.
+* **Reproducibility:** The log files allow users to trace the exact process.
+
+## Example Usage
+By looking at the timestamps, you can see the flow of the research task. The logs will show you the subqueries used by the tool to approach the main query, all the URLs used, if images were selected for the research, and all the steps the tool took to generate the report.
+
+## Logs for Developers
+In addition to the user-facing log files (detailed and summary reports), the application also generates two types of log files specifically for developers:
+1. A `.log` file which is a basic log file format for logging events as they occur
+2. A `.json` file which is more structured
+Find the logs in the `logs` folder.
+
+### Basic Log File (.log)
+
+* **Format:** Plain text format. Each line represents a log entry.
+* **Content:**
+ * Timestamps with millisecond precision.
+ * Log level: Usually `INFO`, but could include `DEBUG`, `WARNING`, or `ERROR` in a more complex setup.
+ * Module name (e.g., "research").
+ * Descriptive messages about various processes.
+ * Includes data about:
+ * Start and end of research tasks
+ * Web searches being performed
+ * Planning of the research
+ * Subqueries generated and their results
+ * The sizes of scraped data
+ * The size of content found from subqueries
+ * The final combined size of all context found
+* **Use Cases for Developers:**
+ * **Real-time Monitoring:** Can be used to monitor the tool's activity in real time.
+ * **Debugging:** Helpful for pinpointing issues by seeing the chronological flow of operations, the size of content collected, etc.
+ * **Performance Analysis:** Timestamps can help in identifying bottlenecks by measuring how long certain operations take.
+ * **High-level overview**: Allows developers to easily see which steps of the tool were performed, and some basic information like sizes of collected content.
+* **Key Differences from User Logs:**
+ * Less structured, more for developers to review in real-time.
+ * Contains technical information not usually relevant to a non-developer user.
+ * Does not have emojis or simplified language.
+ * No information on the images collected
+
+### JSON Log File (.json)
+
+* **Format**: Structured JSON format
+* **Content**:
+ * Timestamps, as in all log files
+ * `type` field that can be:
+ * `sub_query`: which contains the subquery string along with `scraped_data_size`
+ * `content_found`: which includes the `sub_query` and the `content_size`
+ * A `content` field which gives a snapshot of the overall research and can contain the final context and sources found from the research for that task
+* **Use Cases for Developers**:
+ * **Detailed Analysis**: Allows developers to view specific details of how the tool is running, particularly related to the subqueries and the results of the research.
+ * **Process Understanding**: Developers can see the different subqueries run and how much content each generated which can lead to better debugging and understanding of the tool.
+ * **Data Inspection**: Can be useful for reviewing the generated queries and content sizes.
+* **Key Differences from User Logs**:
+ * Highly structured and focused on subquery execution, and the results of this process, specifically the sizes of collected information.
+ * Does not contain simplified language, emojis, or high-level explanations.
+ * Does not contain information on the overall context or the images collected, it mainly focuses on the subquery process.
diff --git a/docs/docs/gpt-researcher/frontend/playing-with-webhooks.md b/docs/docs/gpt-researcher/frontend/playing-with-webhooks.md
new file mode 100644
index 0000000000000000000000000000000000000000..f532f49fba5f1b24f73d1ce55439147fc7eb2582
--- /dev/null
+++ b/docs/docs/gpt-researcher/frontend/playing-with-webhooks.md
@@ -0,0 +1,23 @@
+# Playing with Webhooks
+
+The GPTR Frontend is powered by Webhooks streaming back from the Backend. This allows for real-time updates on the status of your research tasks, as well as the ability to interact with the Backend directly from the Frontend.
+
+
+## Inspecting Webhooks
+
+When running reports via the frontend, you can inspect the websocket messages in the Network Tab.
+
+Here's how:
+
+
+
+
+### Am I polling the right URL?
+
+If you're concerned that your frontend isn't hitting the right API Endpoint, you can check the URL in the Network Tab.
+
+Click into the WS request & go to the "Headers" tab
+
+
+
+For debugging, have a look at the getHost function.
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/getting-started/cli.md b/docs/docs/gpt-researcher/getting-started/cli.md
new file mode 100644
index 0000000000000000000000000000000000000000..4e9451936c5e9173cfc1db119a4d5f028e936eea
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/cli.md
@@ -0,0 +1,81 @@
+# Run with CLI
+
+This command-line interface (CLI) tool allows you to generate research reports using the GPTResearcher class. It provides an easy way to conduct research on various topics and generate different types of reports.
+
+## Installation
+
+1. Clone the repository:
+ ```
+ git clone https://github.com/yourusername/gpt-researcher.git
+ cd gpt-researcher
+ ```
+
+2. Install the required dependencies:
+ ```
+ pip install -r requirements.txt
+ ```
+
+3. Set up your environment variables:
+ Create a `.env` file in the project root and add your API keys or other necessary configurations.
+
+## Usage
+
+The basic syntax for using the CLI is:
+
+```
+python cli.py "" --report_type [--tone ]
+```
+
+### Arguments
+
+- `query` (required): The research query you want to investigate.
+- `--report_type` (required): The type of report to generate. Options include:
+ - `research_report`: Summary - Short and fast (~2 min)
+ - `detailed_report`: Detailed - In depth and longer (~5 min)
+ - `resource_report`
+ - `outline_report`
+ - `custom_report`
+ - `subtopic_report`
+- `--tone` (optional): The tone of the report. Defaults to 'objective'. Options include:
+ - `objective`: Impartial and unbiased presentation
+ - `formal`: Academic standards with sophisticated language
+ - `analytical`: Critical evaluation and examination
+ - `persuasive`: Convincing viewpoint
+ - `informative`: Clear and comprehensive information
+ - `explanatory`: Clarifying complex concepts
+ - `descriptive`: Detailed depiction
+ - `critical`: Judging validity and relevance
+ - `comparative`: Juxtaposing different theories
+ - `speculative`: Exploring hypotheses
+ - `reflective`: Personal insights
+ - `narrative`: Story-based presentation
+ - `humorous`: Light-hearted and engaging
+ - `optimistic`: Highlighting positive aspects
+ - `pessimistic`: Focusing on challenges
+
+## Examples
+
+1. Generate a quick research report on climate change:
+ ```
+ python cli.py "What are the main causes of climate change?" --report_type research_report
+ ```
+
+2. Create a detailed report on artificial intelligence with an analytical tone:
+ ```
+ python cli.py "The impact of artificial intelligence on job markets" --report_type detailed_report --tone analytical
+ ```
+
+3. Generate an outline report on renewable energy with a persuasive tone:
+ ```
+ python cli.py "Renewable energy sources and their potential" --report_type outline_report --tone persuasive
+ ```
+
+## Output
+
+The generated report will be saved as a Markdown file in the `outputs` directory. The filename will be a unique UUID.
+
+## Note
+
+- The execution time may vary depending on the complexity of the query and the type of report requested.
+- Make sure you have the necessary API keys and permissions set up in your `.env` file for the tool to function correctly.
+- All tone options should be provided in lowercase.
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/getting-started/getting-started-with-docker.md b/docs/docs/gpt-researcher/getting-started/getting-started-with-docker.md
new file mode 100644
index 0000000000000000000000000000000000000000..4e26043a231a14cf39b48d46bb063c8ab0aa6c1d
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/getting-started-with-docker.md
@@ -0,0 +1,28 @@
+# Docker: Quickstart
+
+> **Step 1** - Install & Open Docker Desktop
+
+Follow instructions at https://www.docker.com/products/docker-desktop/
+
+
+> **Step 2** - [Follow this flow](https://www.youtube.com/watch?v=x1gKFt_6Us4)
+
+This mainly includes cloning the '.env.example' file, adding your API Keys to the cloned file and saving the file as '.env'
+
+> **Step 3** - Within root, run with Docker.
+
+```bash
+docker-compose up --build
+```
+
+If that doesn't work, try running it without the dash:
+```bash
+docker compose up --build
+```
+
+> **Step 4** - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes:
+ - the Python server running on localhost:8000
+ - the React app running on localhost:3000
+
+Visit localhost:3000 on any browser and enjoy researching!
+
diff --git a/docs/docs/gpt-researcher/getting-started/getting-started.md b/docs/docs/gpt-researcher/getting-started/getting-started.md
new file mode 100644
index 0000000000000000000000000000000000000000..a77847819e31cac3ef2a7fa59b73446745327a00
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/getting-started.md
@@ -0,0 +1,104 @@
+# Getting Started
+
+> **Step 0** - Install Python 3.11 or later. [See here](https://www.tutorialsteacher.com/python/install-python) for a step-by-step guide.
+
+> **Step 1** - Download the project and navigate to its directory
+
+```bash
+$ git clone https://github.com/assafelovic/gpt-researcher.git
+$ cd gpt-researcher
+```
+
+> **Step 3** - Set up API keys using two methods: exporting them directly or storing them in a `.env` file.
+
+For Linux/Temporary Windows Setup, use the export method:
+
+```bash
+export OPENAI_API_KEY={Your OpenAI API Key here}
+export TAVILY_API_KEY={Your Tavily API Key here}
+```
+
+For a more permanent setup, create a `.env` file in the current `gpt-researcher` directory and input the env vars (without `export`).
+
+- For LLM provider, we recommend **[OpenAI GPT](https://platform.openai.com/docs/guides/gpt)**, but you can use any other LLM model (including open sources). To learn how to change the LLM model, please refer to the [documentation](https://docs.gptr.dev/docs/gpt-researcher/llms/llms) page.
+- For web search API, we recommend **[Tavily Search API](https://app.tavily.com)**, but you can also refer to other search APIs of your choice by changing the search provider in config/config.py to `duckduckgo`, `google`, `bing`, `searchapi`, `serper`, `searx` and more. Then add the corresponding env API key.
+
+## Quickstart
+
+> **Step 1** - Install dependencies
+
+```bash
+$ pip install -r requirements.txt
+```
+
+> **Step 2** - Run the agent with FastAPI
+
+```bash
+$ uvicorn main:app --reload
+```
+
+> **Step 3** - Go to http://localhost:8000 on any browser and enjoy researching!
+
+## Using Virtual Environment or Poetry
+Select either based on your familiarity with each:
+
+### Virtual Environment
+
+#### *Establishing the Virtual Environment with Activate/Deactivate configuration*
+
+Create a virtual environment using the `venv` package with the environment name ``, for example, `env`. Execute the following command in the PowerShell/CMD terminal:
+
+```bash
+python -m venv env
+```
+
+To activate the virtual environment, use the following activation script in PowerShell/CMD terminal:
+
+```bash
+.\env\Scripts\activate
+```
+
+To deactivate the virtual environment, run the following deactivation script in PowerShell/CMD terminal:
+
+```bash
+deactivate
+```
+
+#### *Install the dependencies for a Virtual environment*
+
+After activating the `env` environment, install dependencies using the `requirements.txt` file with the following command:
+
+```bash
+python -m pip install -r requirements.txt
+```
+
+
+
+### Poetry
+
+#### *Establishing the Poetry dependencies and virtual environment with Poetry version `~1.7.1`*
+
+Install project dependencies and simultaneously create a virtual environment for the specified project. By executing this command, Poetry reads the project's "pyproject.toml" file to determine the required dependencies and their versions, ensuring a consistent and isolated development environment. The virtual environment allows for a clean separation of project-specific dependencies, preventing conflicts with system-wide packages and enabling more straightforward dependency management throughout the project's lifecycle.
+
+```bash
+poetry install
+```
+
+#### *Activate the virtual environment associated with a Poetry project*
+
+By running this command, the user enters a shell session within the isolated environment associated with the project, providing a dedicated space for development and execution. This virtual environment ensures that the project dependencies are encapsulated, avoiding conflicts with system-wide packages. Activating the Poetry shell is essential for seamlessly working on a project, as it ensures that the correct versions of dependencies are used and provides a controlled environment conducive to efficient development and testing.
+
+```bash
+poetry shell
+```
+
+### *Run the app*
+> Launch the FastAPI application agent on a *Virtual Environment or Poetry* setup by executing the following command:
+```bash
+python -m uvicorn main:app --reload
+```
+> Visit http://localhost:8000 in any web browser and explore your research!
+
+
+
+
diff --git a/docs/docs/gpt-researcher/getting-started/how-to-choose.md b/docs/docs/gpt-researcher/getting-started/how-to-choose.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3079766997d6d1b7ee2975d25a1ea26d85830d7
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/how-to-choose.md
@@ -0,0 +1,128 @@
+# How to Choose
+
+GPT Researcher is a powerful autonomous research agent designed to enhance and streamline your research processes. Whether you're a developer looking to integrate research capabilities into your project or an end-user seeking a comprehensive research solution, GPT Researcher offers flexible options to meet your needs.
+
+We envision a future where AI agents collaborate to complete complex tasks, with research being a critical step in the process. GPT Researcher aims to be your go-to agent for any research task, regardless of complexity. It can be easily integrated into existing agent workflows, eliminating the need to create your own research agent from scratch.
+
+## Options
+
+GPT Researcher offers multiple ways to leverage its capabilities:
+
+
+
+
+1. **GPT Researcher PIP agent**: Ideal for integrating GPT Researcher into your existing projects and workflows.
+2. **Backend**: A backend service to interact with the frontend user interfaces, offering advanced features like detailed reports.
+3. **Multi Agent System**: An advanced setup using LangGraph, offering the most comprehensive research capabilities.
+4. **Frontend**: Several front-end solutions depending on your needs, including a simple HTML/JS version and a more advanced NextJS version.
+
+## Usage Options
+
+### 1. PIP Package
+
+The PIP package is ideal for leveraging GPT Researcher as an agent in your preferred environment and code.
+
+**Pros:**
+- Easy integration into existing projects
+- Flexible usage in multi-agent systems, chains, or workflows
+- Optimized for production performance
+
+**Cons:**
+- Requires some coding knowledge
+- May need additional setup for advanced features
+
+**Installation:**
+```
+pip install gpt-researcher
+```
+
+**System Requirements:**
+- Python 3.10+
+- pip package manager
+
+**Learn More:** [PIP Documentation](https://docs.gptr.dev/docs/gpt-researcher/gptr/pip-package)
+
+### 2. End-to-End Application
+
+For a complete out-of-the-box experience, including a sleek frontend, you can clone our repository.
+
+**Pros:**
+- Ready-to-use frontend and backend services
+- Includes advanced use cases like detailed report generation
+- Optimal user experience
+
+**Cons:**
+- Less flexible than the PIP package for custom integrations
+- Requires setting up the entire application
+
+**Getting Started:**
+1. Clone the repository: `git clone https://github.com/assafelovic/gpt-researcher.git`
+2. Follow the [installation instructions](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started)
+
+**System Requirements:**
+- Git
+- Python 3.10+
+- Node.js and npm (for frontend)
+
+**Advanced Usage Example:** [Detailed Report Implementation](https://github.com/assafelovic/gpt-researcher/tree/master/backend/report_type/detailed_report)
+
+### 3. Multi Agent System with LangGraph
+
+We've collaborated with LangChain to support multi-agents with LangGraph and GPT Researcher, offering the most complex and comprehensive version of GPT Researcher.
+
+**Pros:**
+- Very detailed, customized research reports
+- Inner AI agent loops and reasoning
+
+**Cons:**
+- More expensive and time-consuming
+- Heavyweight for production use
+
+This version is recommended for local, experimental, and educational use. We're working on providing a lighter version soon!
+
+**System Requirements:**
+- Python 3.10+
+- LangGraph library
+
+**Learn More:** [GPT Researcher x LangGraph](https://docs.gptr.dev/docs/gpt-researcher/multi_agents/langgraph)
+
+## Comparison Table
+
+| Feature | PIP Package | End-to-End Application | Multi Agent System |
+|---------|-------------|------------------------|---------------------|
+| Ease of Integration | High | Medium | Low |
+| Customization | High | Medium | High |
+| Out-of-the-box UI | No | Yes | No |
+| Complexity | Low | Medium | High |
+| Best for | Developers | End-users | Researchers/Experimenters |
+
+Please note that all options have been optimized and refined for production use.
+
+## Deep Dive
+
+To learn more about each of the options, check out these docs and code snippets:
+
+1. **PIP Package**:
+ - Install: `pip install gpt-researcher`
+ - [Integration guide](https://docs.gptr.dev/docs/gpt-researcher/gptr/pip-package)
+
+2. **End-to-End Application**:
+ - Clone the repository: `git clone https://github.com/assafelovic/gpt-researcher.git`
+ - [Installation instructions](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started)
+
+3. **Multi-Agent System**:
+ - [Multi-Agents code](https://github.com/assafelovic/gpt-researcher/tree/master/multi_agents)
+ - [LangGraph documentation](https://docs.gptr.dev/docs/gpt-researcher/multi_agents/langgraph)
+ - [Blog](https://docs.gptr.dev/blog/gptr-langgraph)
+
+## Versioning and Updates
+
+GPT Researcher is actively maintained and updated. To ensure you're using the latest version:
+
+- For the PIP package: `pip install --upgrade gpt-researcher`
+- For the End-to-End Application: Pull the latest changes from the GitHub repository
+- For the Multi-Agent System: Check the documentation for compatibility with the latest LangChain and LangGraph versions
+
+## Troubleshooting and FAQs
+
+For common issues and questions, please refer to our [FAQ section](https://docs.gptr.dev/docs/faq) in the documentation.
diff --git a/docs/docs/gpt-researcher/getting-started/introduction.md b/docs/docs/gpt-researcher/getting-started/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..24ecf6505badcb2402e267fa5940d4c827459d9a
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/introduction.md
@@ -0,0 +1,58 @@
+# Introduction
+
+[](https://gptr.dev)
+[](https://discord.gg/QgZXvJAccX)
+
+[](https://github.com/assafelovic/gpt-researcher)
+[](https://twitter.com/assaf_elovic)
+[](https://badge.fury.io/py/gpt-researcher)
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+
+**[GPT Researcher](https://gptr.dev) is an autonomous agent designed for comprehensive online research on a variety of tasks.**
+
+The agent can produce detailed, factual and unbiased research reports, with customization options for focusing on relevant resources, outlines, and lessons. Inspired by the recent [Plan-and-Solve](https://arxiv.org/abs/2305.04091) and [RAG](https://arxiv.org/abs/2005.11401) papers, GPT Researcher addresses issues of speed, determinism and reliability, offering a more stable performance and increased speed through parallelized agent work, as opposed to synchronous operations.
+
+## Why GPT Researcher?
+
+- To form objective conclusions for manual research tasks can take time, sometimes weeks to find the right resources and information.
+- Current LLMs are trained on past and outdated information, with heavy risks of hallucinations, making them almost irrelevant for research tasks.
+- Current LLMs are limited to short token outputs which are not sufficient for long detailed research reports (2k+ words).
+- Solutions that enable web search (such as ChatGPT + Web Plugin), only consider limited resources and content that in some cases result in superficial conclusions or biased answers.
+- Using only a selection of resources can create bias in determining the right conclusions for research questions or tasks.
+
+## Architecture
+The main idea is to run "planner" and "execution" agents, whereas the planner generates questions to research, and the execution agents seek the most related information based on each generated research question. Finally, the planner filters and aggregates all related information and creates a research report.
+The agents leverage both gpt-4o-mini and gpt-4o (128K context) to complete a research task. We optimize for costs using each only when necessary. **The average research task takes around 3 minutes to complete, and costs ~$0.1.**
+
+
+
+
+
+
+More specifically:
+* Create a domain specific agent based on research query or task.
+* Generate a set of research questions that together form an objective opinion on any given task.
+* For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
+* For each scraped resources, summarize based on relevant information and keep track of its sources.
+* Finally, filter and aggregate all summarized sources and generate a final research report.
+
+## Demo
+
+
+## Tutorials
+ - [Full Introduction Playlist](https://www.youtube.com/playlist?list=PLUGOUZPIB0F-qv6MvKq3HGr0M_b3U2ATv)
+ - [How it Works](https://medium.com/better-programming/how-i-built-an-autonomous-ai-agent-for-online-research-93435a97c6c)
+ - [How to Install](https://www.loom.com/share/04ebffb6ed2a4520a27c3e3addcdde20?sid=da1848e8-b1f1-42d1-93c3-5b0b9c3b24ea)
+ - [Live Demo](https://www.loom.com/share/6a3385db4e8747a1913dd85a7834846f?sid=a740fd5b-2aa3-457e-8fb7-86976f59f9b8)
+ - [Homepage](https://gptr.dev)
+
+## Features
+- 📝 Generate research, outlines, resources and lessons reports
+- 📜 Can generate long and detailed research reports (over 2K words)
+- 🌐 Aggregates over 20 web sources per research to form objective and factual conclusions
+- 🖥️ Includes an easy-to-use web interface (HTML/CSS/JS)
+- 🔍 Scrapes web sources with javascript support
+- 📂 Keeps track and context of visited and used web sources
+- 📄 Export research reports to PDF, Word and more...
+
+Let's get started [here](/docs/gpt-researcher/getting-started/getting-started)!
diff --git a/docs/docs/gpt-researcher/getting-started/linux-deployment.md b/docs/docs/gpt-researcher/getting-started/linux-deployment.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb0f19f0ccdd50020b89cdde20c94e19600e6961
--- /dev/null
+++ b/docs/docs/gpt-researcher/getting-started/linux-deployment.md
@@ -0,0 +1,167 @@
+# Running on Linux
+
+This guide will walk you through the process of deploying GPT Researcher on a Linux server.
+
+## Server Requirements
+
+The default Ubuntu droplet option on [DigitalOcean](https://m.do.co/c/1a2af257efba) works well, but this setup should work on any hosting service with similar specifications:
+
+- 2 GB RAM
+- 1 vCPU
+- 50 GB SSD Storage
+
+Here's a screenshot of the recommended Ubuntu machine specifications:
+
+
+
+## Deployment Steps
+
+After setting up your server, follow these steps to install Docker, Docker Compose, and Nginx.
+
+
+Some more commands to achieve that:
+
+### Step 1: Update the System
+### First, ensure your package index is up-to-date:
+
+```bash
+sudo apt update
+### Step 2: Install Git
+### Git is a version control system. Install it using:
+
+sudo apt install git -y
+
+### Verify the installation by checking the Git version:
+git --version
+### Step 3: Install Docker
+### Docker is a platform for developing, shipping, and running applications inside containers.
+
+### Install prerequisites:
+
+sudo apt install apt-transport-https ca-certificates curl software-properties-common -y
+### Add Docker’s official GPG key:
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+### Set up the stable repository:
+
+echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+### Update the package index again and install Docker:
+
+sudo apt update
+sudo apt install docker-ce -y
+### Verify Docker installation:
+
+sudo systemctl status docker
+### Optionally, add your user to the docker group to run Docker without sudo:
+
+sudo usermod -aG docker ${USER}
+### Log out and back in for the group change to take effect.
+
+Step 4: Install Nginx
+### Nginx is a high-performance web server.
+
+### Install Nginx:
+
+sudo apt install nginx -y
+### Start and enable Nginx:
+
+sudo systemctl start nginx
+sudo systemctl enable nginx
+### Verify Nginx installation:
+
+sudo systemctl status nginx
+```
+
+Here's your nginx config file:
+
+```bash
+events {}
+
+http {
+ server {
+ listen 80;
+ server_name name.example;
+
+ client_max_body_size 64M;
+
+ location / {
+ proxy_pass http://localhost:3000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+
+ location ~ ^/(ws|upload|files|outputs|getConfig|setConfig) {
+ proxy_pass http://localhost:8000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+ }
+ }
+}
+```
+
+And if you're using SSL:
+
+```nginx
+server {
+ server_name name.example;
+
+ client_max_body_size 64M;
+
+ location / {
+ proxy_pass http://localhost:3000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+
+ location ~ ^/(ws|upload|files|outputs|getConfig|setConfig) {
+ proxy_pass http://localhost:8000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+ }
+
+ listen 443 ssl; # managed by Certbot
+ ssl_certificate /etc/letsencrypt/live/name.example/fullchain.pem; # managed by Certbot
+ ssl_certificate_key /etc/letsencrypt/live/name.example/privkey.pem; # managed by Certbot
+ include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
+ ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
+}
+
+server {
+ if ($host = name.example) {
+ return 301 https://$host$request_uri;
+ } # managed by Certbot
+
+ listen 80;
+ server_name name.example;
+ return 404; # managed by Certbot
+}
+```
+
+And the relevant commands:
+
+
+```bash
+vim /etc/nginx/nginx.conf
+### Edit it to reflect above. Then verify all is good with:
+
+sudo nginx -t
+# If there are no errors:
+
+sudo systemctl restart nginx
+
+# Clone .env.example as .env
+# Run from root:
+
+docker-compose up --build
+
+```
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/gptr/automated-tests.md b/docs/docs/gpt-researcher/gptr/automated-tests.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f6f3d08f14f9bf0c0650a072213111865eff047
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/automated-tests.md
@@ -0,0 +1,43 @@
+# Automated Tests
+
+## Automated Testing with Github Actions
+
+This repository contains the code for the automated testing of the GPT-Researcher Repo using Github Actions.
+
+The tests are triggered in a docker container which runs the tests via the `pytest` module.
+
+## Running the Tests
+
+You can run the tests:
+
+### Via a docker command
+
+```bash
+docker-compose --profile test run --rm gpt-researcher-tests
+```
+
+### Via a Github Action
+
+
+
+Attaching here the required settings & screenshots on the github repo level:
+
+Step 1: Within the repo, press the "Settings" tab
+
+Step 2: Create a new environment named "tests" (all lowercase)
+
+Step 3: Click into the "tests" environment & add environment secrets of ```OPENAI_API_KEY``` & ```TAVILY_API_KEY```
+
+Get the keys from here:
+
+https://app.tavily.com/sign-in
+
+https://platform.openai.com/api-keys
+
+
+
+
+
+If configured correctly, here's what the Github action should look like when opening a new PR or committing to an open PR:
+
+
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/gptr/config.md b/docs/docs/gpt-researcher/gptr/config.md
new file mode 100644
index 0000000000000000000000000000000000000000..3203cc4123a4a68471b0e83ef27c98c916794329
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/config.md
@@ -0,0 +1,80 @@
+# Configuration
+
+The config.py enables you to customize GPT Researcher to your specific needs and preferences.
+
+Thanks to our amazing community and contributions, GPT Researcher supports multiple LLMs and Retrievers.
+In addition, GPT Researcher can be tailored to various report formats (such as APA), word count, research iterations depth, etc.
+
+GPT Researcher defaults to our recommended suite of integrations: [OpenAI](https://platform.openai.com/docs/overview) for LLM calls and [Tavily API](https://app.tavily.com) for retrieving real-time web information.
+
+As seen below, OpenAI still stands as the superior LLM. We assume it will stay this way for some time, and that prices will only continue to decrease, while performance and speed increase over time.
+
+
+
+
+
+The default config.py file can be found in `/gpt_researcher/config/`. It supports various options for customizing GPT Researcher to your needs.
+You can also include your own external JSON file `config.json` by adding the path in the `config_file` param. **Please follow the config.py file for additional future support**.
+
+Below is a list of current supported options:
+
+- **`RETRIEVER`**: Web search engine used for retrieving sources. Defaults to `tavily`. Options: `duckduckgo`, `bing`, `google`, `searchapi`, `serper`, `searx`. [Check here](https://github.com/assafelovic/gpt-researcher/tree/master/gpt_researcher/retrievers) for supported retrievers
+- **`EMBEDDING`**: Embedding model. Defaults to `openai:text-embedding-3-small`. Options: `ollama`, `huggingface`, `azure_openai`, `custom`.
+- **`FAST_LLM`**: Model name for fast LLM operations such summaries. Defaults to `openai:gpt-4o-mini`.
+- **`SMART_LLM`**: Model name for smart operations like generating research reports and reasoning. Defaults to `openai:gpt-4o`.
+- **`STRATEGIC_LLM`**: Model name for strategic operations like generating research plans and strategies. Defaults to `openai:o1-preview`.
+- **`LANGUAGE`**: Language to be used for the final research report. Defaults to `english`.
+- **`CURATE_SOURCES`**: Whether to curate sources for research. This step adds an LLM run which may increase costs and total run time but improves quality of source selection. Defaults to `True`.
+- **`FAST_TOKEN_LIMIT`**: Maximum token limit for fast LLM responses. Defaults to `2000`.
+- **`SMART_TOKEN_LIMIT`**: Maximum token limit for smart LLM responses. Defaults to `4000`.
+- **`STRATEGIC_TOKEN_LIMIT`**: Maximum token limit for strategic LLM responses. Defaults to `4000`.
+- **`BROWSE_CHUNK_MAX_LENGTH`**: Maximum length of text chunks to browse in web sources. Defaults to `8192`.
+- **`SUMMARY_TOKEN_LIMIT`**: Maximum token limit for generating summaries. Defaults to `700`.
+- **`TEMPERATURE`**: Sampling temperature for LLM responses, typically between 0 and 1. A higher value results in more randomness and creativity, while a lower value results in more focused and deterministic responses. Defaults to `0.55`.
+- **`TOTAL_WORDS`**: Total word count limit for document generation or processing tasks. Defaults to `800`.
+- **`REPORT_FORMAT`**: Preferred format for report generation. Defaults to `APA`. Consider formats like `MLA`, `CMS`, `Harvard style`, `IEEE`, etc.
+- **`MAX_ITERATIONS`**: Maximum number of iterations for processes like query expansion or search refinement. Defaults to `3`.
+- **`AGENT_ROLE`**: Role of the agent. This might be used to customize the behavior of the agent based on its assigned roles. No default value.
+- **`MAX_SUBTOPICS`**: Maximum number of subtopics to generate or consider. Defaults to `3`.
+- **`SCRAPER`**: Web scraper to use for gathering information. Defaults to `bs` (BeautifulSoup). You can also use [newspaper](https://github.com/codelucas/newspaper).
+- **`DOC_PATH`**: Path to read and research local documents. Defaults to an empty string indicating no path specified.
+- **`USER_AGENT`**: Custom User-Agent string for web crawling and web requests.
+- **`MEMORY_BACKEND`**: Backend used for memory operations, such as local storage of temporary data. Defaults to `local`.
+
+To change the default configurations, you can simply add env variables to your `.env` file as named above or export manually in your local project directory.
+
+For example, to manually change the search engine and report format:
+```bash
+export RETRIEVER=bing
+export REPORT_FORMAT=IEEE
+```
+Please note that you might need to export additional env vars and obtain API keys for other supported search retrievers and LLM providers. Please follow your console logs for further assistance.
+To learn more about additional LLM support you can check out the docs [here](/docs/gpt-researcher/llms/llms).
+
+You can also include your own external JSON file `config.json` by adding the path in the `config_file` param.
+
+## Example: Azure OpenAI Configuration
+
+If you are not using OpenAI's models, but other model providers, besides the general configuration above, also additional environment variables are required.
+Check the [langchain documentation](https://python.langchain.com/v0.2/docs/integrations/platforms/) about your model for the exact configuration of the API keys and endpoints.
+
+Here is an example for [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) configuration:
+
+```bash
+
+OPENAI_API_VERSION="2024-05-01-preview" # or whatever you are using
+AZURE_OPENAI_ENDPOINT="https://CHANGEMEN.openai.azure.com/" # change to the name of your deployment
+AZURE_OPENAI_API_KEY="[Your Key]" # change to your API key
+
+EMBEDDING="azure_openai:text-embedding-ada-002" # change to the deployment of your embedding model
+
+FAST_LLM="azure_openai:gpt-4o-mini" # change to the name of your deployment (not model-name)
+FAST_TOKEN_LIMIT=4000
+
+SMART_LLM="azure_openai:gpt-4o" # change to the name of your deployment (not model-name)
+SMART_TOKEN_LIMIT=4000
+
+RETRIEVER="bing" # if you are using Bing as your search engine (which is likely if you use Azure)
+BING_API_KEY="[Your Key]"
+
+```
diff --git a/docs/docs/gpt-researcher/gptr/example.md b/docs/docs/gpt-researcher/gptr/example.md
new file mode 100644
index 0000000000000000000000000000000000000000..cc38e3f7d98334f644c0f4d1b89d68f94dc75113
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/example.md
@@ -0,0 +1,32 @@
+# Agent Example
+
+If you're interested in using GPT Researcher as a standalone agent, you can easily import it into any existing Python project. Below, is an example of calling the agent to generate a research report:
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def fetch_report(query):
+ """
+ Fetch a research report based on the provided query and report type.
+ """
+ researcher = GPTResearcher(query=query)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report
+
+async def generate_research_report(query):
+ """
+ This is a sample script that executes an async main function to run a research report.
+ """
+ report = await fetch_report(query)
+ print(report)
+
+if __name__ == "__main__":
+ QUERY = "What happened in the latest burning man floods?"
+ asyncio.run(generate_research_report(query=QUERY))
+```
+
+You can further enhance this example to use the returned report as context for generating valuable content such as news article, marketing content, email templates, newsletters, etc.
+
+You can also use GPT Researcher to gather information about code documentation, business analysis, financial information and more. All of which can be used to complete much more complex tasks that require factual and high quality realtime information.
diff --git a/docs/docs/gpt-researcher/gptr/handling-logs-as-they-stream.md b/docs/docs/gpt-researcher/gptr/handling-logs-as-they-stream.md
new file mode 100644
index 0000000000000000000000000000000000000000..f013c041d89fe7a58fc8d9dc9231096aff0b6d01
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/handling-logs-as-they-stream.md
@@ -0,0 +1,64 @@
+# Handling Logs
+
+Here is a snippet of code to help you handle the streaming logs of your Research tasks.
+
+```python
+from typing import Dict, Any
+import asyncio
+from gpt_researcher import GPTResearcher
+
+class CustomLogsHandler:
+ """A custom Logs handler class to handle JSON data."""
+ def __init__(self):
+ self.logs = [] # Initialize logs to store data
+
+ async def send_json(self, data: Dict[str, Any]) -> None:
+ """Send JSON data and log it."""
+ self.logs.append(data) # Append data to logs
+ print(f"My custom Log: {data}") # For demonstration, print the log
+
+async def run():
+ # Define the necessary parameters with sample values
+
+ query = "What happened in the latest burning man floods?"
+ report_type = "research_report" # Type of report to generate
+ report_source = "online" # Could specify source like 'online', 'books', etc.
+ tone = "informative" # Tone of the report ('informative', 'casual', etc.)
+ config_path = None # Path to a config file, if needed
+
+ # Initialize researcher with a custom WebSocket
+ custom_logs_handler = CustomLogsHandler()
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type=report_type,
+ report_source=report_source,
+ tone=tone,
+ config_path=config_path,
+ websocket=custom_logs_handler
+ )
+
+ await researcher.conduct_research() # Conduct the research
+ report = await researcher.write_report() # Write the research report
+
+ return report
+
+# Run the asynchronous function using asyncio
+if __name__ == "__main__":
+ asyncio.run(run())
+```
+
+The data from the research process will be logged and stored in the `CustomLogsHandler` instance. You can customize the logging behavior as needed for your application.
+
+Here's a sample of the output:
+
+```
+{
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "✅ Added source url to research: https://www.npr.org/2023/09/28/1202110410/how-rumors-and-conspiracy-theories-got-in-the-way-of-mauis-fire-recovery\n",
+ "metadata": "https://www.npr.org/2023/09/28/1202110410/how-rumors-and-conspiracy-theories-got-in-the-way-of-mauis-fire-recovery"
+}
+```
+
+The `metadata` field will include whatever metadata is relevant to the log entry. Let the script above run to completion for the full logs output of a given research task.
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/gptr/pip-package.md b/docs/docs/gpt-researcher/gptr/pip-package.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e9d60bcb28bd1d1ed80465ee86ee3e06d032369
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/pip-package.md
@@ -0,0 +1,272 @@
+# PIP Package
+[](https://badge.fury.io/py/gpt-researcher)
+[](https://colab.research.google.com/github/assafelovic/gpt-researcher/blob/master/docs/docs/examples/pip-run.ipynb)
+
+🌟 **Exciting News!** Now, you can integrate `gpt-researcher` with your apps seamlessly!
+
+## Steps to Install GPT Researcher
+
+Follow these easy steps to get started:
+
+0. **Pre-requisite**: Ensure Python 3.10+ is installed on your machine 💻
+1. **Install gpt-researcher**: Grab the official package from [PyPi](https://pypi.org/project/gpt-researcher/).
+
+```bash
+pip install gpt-researcher
+```
+
+2. **Environment Variables:** Create a .env file with your OpenAI API key or simply export it
+
+```bash
+export OPENAI_API_KEY={Your OpenAI API Key here}
+```
+
+```bash
+export TAVILY_API_KEY={Your Tavily API Key here}
+```
+
+3. **Start using GPT Researcher in your own codebase**
+
+## Example Usage
+
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+async def get_report(query: str, report_type: str):
+ researcher = GPTResearcher(query, report_type)
+ research_result = await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ # Get additional information
+ research_context = researcher.get_research_context()
+ research_costs = researcher.get_costs()
+ research_images = researcher.get_research_images()
+ research_sources = researcher.get_research_sources()
+
+ return report, research_context, research_costs, research_images, research_sources
+
+if __name__ == "__main__":
+ query = "what team may win the NBA finals?"
+ report_type = "research_report"
+
+ report, context, costs, images, sources = asyncio.run(get_report(query, report_type))
+
+ print("Report:")
+ print(report)
+ print("\nResearch Costs:")
+ print(costs)
+ print("\nNumber of Research Images:")
+ print(len(images))
+ print("\nNumber of Research Sources:")
+ print(len(sources))
+```
+
+## Specific Examples
+
+### Example 1: Research Report
+
+```python
+query = "Latest developments in renewable energy technologies"
+report_type = "research_report"
+```
+
+### Example 2: Resource Report
+
+```python
+query = "List of top AI conferences in 2023"
+report_type = "resource_report"
+```
+
+### Example 3: Outline Report
+
+```python
+query = "Outline for an article on the impact of AI in education"
+report_type = "outline_report"
+```
+
+## Integration with Web Frameworks
+
+### FastAPI Example
+
+```python
+from fastapi import FastAPI
+from gpt_researcher import GPTResearcher
+import asyncio
+
+app = FastAPI()
+
+@app.get("/report/{report_type}")
+async def get_report(query: str, report_type: str) -> dict:
+ researcher = GPTResearcher(query, report_type)
+ research_result = await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ source_urls = researcher.get_source_urls()
+ research_costs = researcher.get_costs()
+ research_images = researcher.get_research_images()
+ research_sources = researcher.get_research_sources()
+
+ return {
+ "report": report,
+ "source_urls": source_urls,
+ "research_costs": research_costs,
+ "num_images": len(research_images),
+ "num_sources": len(research_sources)
+ }
+
+# Run the server
+# uvicorn main:app --reload
+```
+
+### Flask Example
+
+**Pre-requisite**: Install flask with the async extra.
+
+```bash
+pip install 'flask[async]'
+```
+
+```python
+from flask import Flask, request, jsonify
+from gpt_researcher import GPTResearcher
+
+app = Flask(__name__)
+
+@app.route('/report/', methods=['GET'])
+async def get_report(report_type):
+ query = request.args.get('query')
+ researcher = GPTResearcher(query, report_type)
+ research_result = await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ source_urls = researcher.get_source_urls()
+ research_costs = researcher.get_costs()
+ research_images = researcher.get_research_images()
+ research_sources = researcher.get_research_sources()
+
+ return jsonify({
+ "report": report,
+ "source_urls": source_urls,
+ "research_costs": research_costs,
+ "num_images": len(research_images),
+ "num_sources": len(research_sources)
+ })
+
+# Run the server
+# flask run
+```
+
+**Run the server**
+
+```bash
+flask run
+```
+
+**Example Request**
+
+```bash
+curl -X GET "http://localhost:5000/report/research_report?query=what team may win the nba finals?"
+```
+
+## Getters and Setters
+GPT Researcher provides several methods to retrieve additional information about the research process:
+
+### Get Research Sources
+Sources are the URLs that were used to gather information for the research.
+```python
+source_urls = researcher.get_source_urls()
+```
+
+### Get Research Context
+Context is all the retrieved information from the research. It includes the sources and their corresponding content.
+```python
+research_context = researcher.get_research_context()
+```
+
+### Get Research Costs
+Costs are the number of tokens consumed during the research process.
+```python
+research_costs = researcher.get_costs()
+```
+
+### Get Research Images
+Retrieves a list of images found during the research process.
+```python
+research_images = researcher.get_research_images()
+```
+
+### Get Research Sources
+Retrieves a list of research sources, including title, content, and images.
+```python
+research_sources = researcher.get_research_sources()
+```
+
+### Set Verbose
+You can set the verbose mode to get more detailed logs.
+```python
+researcher.set_verbose(True)
+```
+
+### Add Costs
+You can also add costs to the research process if you want to track the costs from external usage.
+```python
+researcher.add_costs(0.22)
+```
+
+## Advanced Usage
+
+### Customizing the Research Process
+
+You can customize various aspects of the research process by passing additional parameters when initializing the GPTResearcher:
+
+```python
+researcher = GPTResearcher(
+ query="Your research query",
+ report_type="research_report",
+ report_format="APA",
+ tone="formal and objective",
+ max_subtopics=5,
+ verbose=True
+)
+```
+
+### Handling Research Results
+
+After conducting research, you can process the results in various ways:
+
+```python
+# Conduct research
+research_result = await researcher.conduct_research()
+
+# Generate a report
+report = await researcher.write_report()
+
+# Generate a conclusion
+conclusion = await researcher.write_report_conclusion(report)
+
+# Get subtopics
+subtopics = await researcher.get_subtopics()
+
+# Get draft section titles for a subtopic
+draft_titles = await researcher.get_draft_section_titles("Subtopic name")
+```
+
+### Working with Research Context
+
+You can use the research context for further processing or analysis:
+
+```python
+# Get the full research context
+context = researcher.get_research_context()
+
+# Get similar written contents based on draft section titles
+similar_contents = await researcher.get_similar_written_contents_by_draft_section_titles(
+ current_subtopic="Subtopic name",
+ draft_section_titles=["Title 1", "Title 2"],
+ written_contents=some_written_contents,
+ max_results=10
+)
+```
+
+This comprehensive documentation should help users understand and utilize the full capabilities of the GPT Researcher package.
diff --git a/docs/docs/gpt-researcher/gptr/querying-the-backend.md b/docs/docs/gpt-researcher/gptr/querying-the-backend.md
new file mode 100644
index 0000000000000000000000000000000000000000..993289a760960a7e391acf9a7f6c1da37b868305
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/querying-the-backend.md
@@ -0,0 +1,106 @@
+# Querying the Backend
+
+## Introduction
+
+In this section, we will discuss how to query the GPTR backend server. The GPTR backend server is a Python server that runs the GPTR Python package. The server listens for WebSocket connections and processes incoming messages to generate reports, streaming back logs and results to the client.
+
+An example WebSocket client is implemented in the `gptr-webhook.js` file below.
+
+This function sends a Webhook Message to the GPTR Python backend running on localhost:8000, but this example can also be modified to query a [GPTR Server hosted on Linux](https://docs.gptr.dev/docs/gpt-researcher/getting-started/linux-deployment).
+
+// gptr-webhook.js
+
+```javascript
+
+const WebSocket = require('ws');
+
+let socket = null;
+let responseCallback = null;
+
+async function initializeWebSocket() {
+ if (!socket) {
+ const host = 'localhost:8000';
+ const ws_uri = `ws://${host}/ws`;
+
+ socket = new WebSocket(ws_uri);
+
+ socket.onopen = () => {
+ console.log('WebSocket connection established');
+ };
+
+ socket.onmessage = (event) => {
+ const data = JSON.parse(event.data);
+ console.log('WebSocket data received:', data);
+
+ if (data.content === 'dev_team_result'
+ && data.output.rubber_ducker_thoughts != undefined
+ && data.output.tech_lead_review != undefined) {
+ if (responseCallback) {
+ responseCallback(data.output);
+ responseCallback = null; // Clear callback after use
+ }
+ } else {
+ console.log('Received data:', data);
+ }
+ };
+
+ socket.onclose = () => {
+ console.log('WebSocket connection closed');
+ socket = null;
+ };
+
+ socket.onerror = (error) => {
+ console.error('WebSocket error:', error);
+ };
+ }
+}
+
+async function sendWebhookMessage(message) {
+ return new Promise((resolve, reject) => {
+ if (!socket || socket.readyState !== WebSocket.OPEN) {
+ initializeWebSocket();
+ }
+
+ const data = {
+ task: message,
+ report_type: 'dev_team',
+ report_source: 'web',
+ tone: 'Objective',
+ headers: {},
+ repo_name: 'elishakay/gpt-researcher'
+ };
+
+ const payload = "start " + JSON.stringify(data);
+
+ responseCallback = (response) => {
+ resolve(response); // Resolve the promise with the WebSocket response
+ };
+
+ if (socket.readyState === WebSocket.OPEN) {
+ socket.send(payload);
+ console.log('Message sent:', payload);
+ } else {
+ socket.onopen = () => {
+ socket.send(payload);
+ console.log('Message sent after connection:', payload);
+ };
+ }
+ });
+}
+
+module.exports = {
+ sendWebhookMessage
+};
+```
+
+And here's how you can leverage this helper function:
+
+```javascript
+const { sendWebhookMessage } = require('./gptr-webhook');
+
+async function main() {
+ const message = 'How do I get started with GPT-Researcher Websockets?';
+ const response = await sendWebhookMessage(message);
+ console.log('Response:', response);
+}
+```
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/gptr/scraping.md b/docs/docs/gpt-researcher/gptr/scraping.md
new file mode 100644
index 0000000000000000000000000000000000000000..cca9e4b3beae6c6b2e62077bfb571c5c506987dc
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/scraping.md
@@ -0,0 +1,133 @@
+# Scraping Options
+
+GPT Researcher now offers various methods for web scraping: static scraping with BeautifulSoup, dynamic scraping with Selenium, and High scale scraping with Tavily Extract. This document explains how to switch between these methods and the benefits of each approach.
+
+## Configuring Scraping Method
+
+You can choose your preferred scraping method by setting the `SCRAPER` environment variable:
+
+1. For BeautifulSoup (static scraping):
+ ```
+ export SCRAPER="bs"
+ ```
+
+2. For Selenium (dynamic browser scraping):
+ ```
+ export SCRAPER="browser"
+ ```
+
+3. For **production** use cases, you can set the Scraper to `tavily_extract`. [Tavily](https://tavily.com) allows you to scrape sites at scale without the hassle of setting up proxies, managing cookies, or dealing with CAPTCHAs. Please note that you need to have a Tavily account and [API key](https://app.tavily.com) to use this option. To learn more about Tavily Extract [see here](https://docs.tavily.com/docs/python-sdk/tavily-extract/getting-started).
+ Make sure to first install the pip package `tavily-python`. Then:
+ ```
+ export SCRAPER="tavily_extract"
+ ```
+
+Note: If not set, GPT Researcher will default to BeautifulSoup for scraping.
+
+## Scraping Methods Explained
+
+### BeautifulSoup (Static Scraping)
+
+When `SCRAPER="bs"`, GPT Researcher uses BeautifulSoup for static scraping. This method:
+
+- Sends a single HTTP request to fetch the page content
+- Parses the static HTML content
+- Extracts text and data from the parsed HTML
+
+Benefits:
+- Faster and more lightweight
+- Doesn't require additional setup
+- Works well for simple, static websites
+
+Limitations:
+- Cannot handle dynamic content loaded by JavaScript
+- May miss content that requires user interaction to display
+
+### Selenium (Browser Scraping)
+
+When `SCRAPER="browser"`, GPT Researcher uses Selenium for dynamic scraping. This method:
+
+- Opens a real browser instance (Chrome by default)
+- Loads the page and executes JavaScript
+- Waits for dynamic content to load
+- Extracts text and data from the fully rendered page
+
+Benefits:
+- Can scrape dynamically loaded content
+- Simulates real user interactions (scrolling, clicking, etc.)
+- Works well for complex, JavaScript-heavy websites
+
+Limitations:
+- Slower than static scraping
+- Requires more system resources
+- Requires additional setup (Selenium and WebDriver installation)
+
+### Tavily Extract (Recommended for Production)
+
+When `SCRAPER="tavily_extract"`, GPT Researcher uses Tavily's Extract API for web scraping. This method:
+
+- Uses Tavily's robust infrastructure to handle web scraping at scale
+- Automatically handles CAPTCHAs, JavaScript rendering, and anti-bot measures
+- Provides clean, structured content extraction
+
+Benefits:
+- Production-ready and highly reliable
+- No need to manage proxies or handle rate limiting
+- Excellent success rate on most websites
+- Handles both static and dynamic content
+- Built-in content cleaning and formatting
+- Fast response times through Tavily's distributed infrastructure
+
+Setup:
+1. Create a Tavily account at [app.tavily.com](https://app.tavily.com)
+2. Get your API key from the dashboard
+3. Install the Tavily Python SDK:
+ ```bash
+ pip install tavily-python
+ ```
+4. Set your Tavily API key:
+ ```bash
+ export TAVILY_API_KEY="your-api-key"
+ ```
+
+Usage Considerations:
+- Requires a Tavily API key and account
+- API calls are metered based on your Tavily plan
+- Best for production environments where reliability is crucial
+- Ideal for businesses and applications that need consistent scraping results
+
+## Additional Setup for Selenium
+
+If you choose to use Selenium (SCRAPER="browser"), you'll need to:
+
+1. Install the Selenium package:
+ ```
+ pip install selenium
+ ```
+
+2. Download the appropriate WebDriver for your browser:
+ - For Chrome: [ChromeDriver](https://sites.google.com/a/chromium.org/chromedriver/downloads)
+ - For Firefox: [GeckoDriver](https://github.com/mozilla/geckodriver/releases)
+ - For Safari: Built-in, no download required
+
+ Ensure the WebDriver is in your system's PATH.
+
+## Choosing the Right Method
+
+- Use BeautifulSoup (static) for:
+ - Simple websites with mostly static content
+ - Scenarios where speed is a priority
+ - When you don't need to interact with the page
+
+- Use Selenium (dynamic) for:
+ - Websites with content loaded via JavaScript
+ - Sites that require scrolling or clicking to load more content
+ - When you need to simulate user interactions
+
+## Troubleshooting
+
+- If Selenium fails to start, ensure you have the correct WebDriver installed and it's in your system's PATH.
+- If you encounter an `ImportError` related to Selenium, make sure you've installed the Selenium package.
+- If the scraper misses expected content, try switching between static and dynamic scraping to see which works better for your target website.
+
+Remember, the choice between static and dynamic scraping can significantly impact the quality and completeness of the data GPT Researcher can gather. Choose the method that best suits your research needs and the websites you're targeting.
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/gptr/troubleshooting.md b/docs/docs/gpt-researcher/gptr/troubleshooting.md
new file mode 100644
index 0000000000000000000000000000000000000000..4eb444adb505b0f5289b51a963f557a1c21bd3a3
--- /dev/null
+++ b/docs/docs/gpt-researcher/gptr/troubleshooting.md
@@ -0,0 +1,56 @@
+# Troubleshooting
+
+We're constantly working to provide a more stable version. If you're running into any issues, please first check out the resolved issues or ask us via our [Discord community](https://discord.gg/QgZXvJAccX).
+
+### model: gpt-4 does not exist
+This relates to not having permission to use gpt-4 yet. Based on OpenAI, it will be [widely available for all by end of July](https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4).
+
+### cannot load library 'gobject-2.0-0'
+
+The issue relates to the library WeasyPrint (which is used to generate PDFs from the research report). Please follow this guide to resolve it: https://doc.courtbouillon.org/weasyprint/stable/first_steps.html
+
+Or you can install this package manually
+
+In case of MacOS you can install this lib using
+`brew install glib pango`
+If you face an issue with linking afterward, you can try running `brew link glib`
+
+In case of Linux you can install this lib using
+`sudo apt install libglib2.0-dev`
+
+### cannot load library 'pango'
+
+In case of MacOS you can install this lib using
+`brew install pango`
+
+In case of Linux you can install this lib using
+`sudo apt install libpango-1.0-0`
+
+**Workaround for Mac M chip users**
+
+If the above solutions don't work, you can try the following:
+- Install a fresh version of Python 3.11 pointed to brew:
+`brew install python@3.11`
+- Install the required libraries:
+`brew install pango glib gobject-introspection`
+- Install the required GPT Researcher Python packages:
+`pip3.11 install -r requirements.txt`
+- Run the app with Python 3.11 (using brew):
+`python3.11 -m uvicorn main:app --reload`
+
+### Error processing the url
+
+We're using [Selenium](https://www.selenium.dev) for site scraping. Some sites fail to be scraped. In these cases, restart and try running again.
+
+
+### Chrome version issues
+
+Many users have an issue with their chromedriver because the latest chrome browser version doesn't have a compatible chrome driver yet.
+
+To downgrade your Chrome web browser using [slimjet](https://www.slimjet.com/chrome/google-chrome-old-version.php), follow these steps. First, visit the website and scroll down to find the list of available older Chrome versions. Choose the version you wish to install
+making sure it's compatible with your operating system.
+Once you've selected the desired version, click on the corresponding link to download the installer. Before proceeding with the installation, it's crucial to uninstall your current version of Chrome to avoid conflicts.
+
+It's important to check if the version you downgrade to, has a chromedriver available in the official [chrome driver website](https://chromedriver.chromium.org/downloads)
+
+**If none of the above work, you can [try out our hosted beta](https://app.tavily.com)**
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/llms/llms.md b/docs/docs/gpt-researcher/llms/llms.md
new file mode 100644
index 0000000000000000000000000000000000000000..c32451f8535f4c9f3c4f76e2721097829096694a
--- /dev/null
+++ b/docs/docs/gpt-researcher/llms/llms.md
@@ -0,0 +1,285 @@
+# Configure LLM
+
+As described in the [introduction](/docs/gpt-researcher/gptr/config), the default LLM and embedding is OpenAI due to its superior performance and speed.
+With that said, GPT Researcher supports various open/closed source LLMs and embeddings, and you can easily switch between them by updating the `SMART_LLM`, `FAST_LLM` and `EMBEDDING` env variables. You might also need to include the provider API key and corresponding configuration params.
+
+Current supported LLMs are `openai`, `anthropic`, `azure_openai`, `cohere`, `google_vertexai`, `google_genai`, `fireworks`, `ollama`, `together`, `mistralai`, `huggingface`, `groq`, `bedrock` and `litellm`.
+
+Current supported embeddings are `openai`, `azure_openai`, `cohere`, `google_vertexai`, `google_genai`, `fireworks`, `ollama`, `together`, `mistralai`, `huggingface`, `nomic` ,`voyageai` and `bedrock`.
+
+To learn more about support customization options see [here](/gpt-researcher/config).
+
+**Please note**: GPT Researcher is optimized and heavily tested on GPT models. Some other models might run into context limit errors, and unexpected responses.
+Please provide any feedback in our [Discord community](https://discord.gg/DUmbTebB) channel, so we can better improve the experience and performance.
+
+Below you can find examples for how to configure the various supported LLMs.
+
+## OpenAI
+
+```bash
+# set the custom OpenAI API key
+OPENAI_API_KEY=[Your Key]
+
+# specify llms
+FAST_LLM="openai:gpt-4o-mini"
+SMART_LLM="openai:gpt-4o"
+STRATEGIC_LLM="openai:o1-preview"
+
+# specify embedding
+EMBEDDING="openai:text-embedding-3-small"
+```
+
+
+## Custom LLM
+
+Create a local OpenAI API using [llama.cpp Server](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#quick-start).
+
+For custom LLM, specify "openai:{your-llm}"
+```bash
+# set the custom OpenAI API url
+OPENAI_BASE_URL="http://localhost:1234/v1"
+# set the custom OpenAI API key
+OPENAI_API_KEY="dummy_key"
+
+# specify custom llms
+FAST_LLM="openai:your_fast_llm"
+SMART_LLM="openai:your_smart_llm"
+STRATEGIC_LLM="openai:your_strategic_llm"
+```
+
+For custom embedding, set "custom:{your-embedding}"
+```bash
+# set the custom OpenAI API url
+OPENAI_BASE_URL="http://localhost:1234/v1"
+# set the custom OpenAI API key
+OPENAI_API_KEY="dummy_key"
+
+# specify the custom embedding model
+EMBEDDING="custom:your_embedding"
+```
+
+
+## Azure OpenAI
+
+See also the documentation in the Langchain [Azure OpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) page.
+
+On Azure OpenAI you will need to create deployments for each model you want to use. Please also specify the model names/deployment names in your `.env` file:
+
+Required Embedding Model:
+To ensure optimal performance, GPT Researcher requires the text-embedding-3-large model. Please deploy this specific model to your Azure Endpoint.
+
+```bash
+AZURE_OPENAI_API_KEY=[Your Key]
+AZURE_OPENAI_ENDPOINT=https://{your-endpoint}.openai.azure.com/
+OPENAI_API_VERSION=2024-05-01-preview
+
+# note that the deployment name must be the same as the model name
+FAST_LLM=azure_openai:gpt-4o-mini
+SMART_LLM=azure_openai:gpt-4o
+STRATEGIC_LLM=azure_openai:o1-preview
+
+
+```
+
+
+## Ollama
+
+GPT Researcher supports both Ollama LLMs and embeddings. You can choose each or both.
+To use [Ollama](http://www.ollama.com) you can set the following environment variables
+
+```bash
+OLLAMA_BASE_URL=http://localhost:11434
+FAST_LLM="ollama:llama3"
+SMART_LLM="ollama:llama3"
+STRATEGIC_LLM="ollama:llama3"
+
+EMBEDDING="ollama:nomic-embed-text"
+```
+
+## Groq
+
+GroqCloud provides advanced AI hardware and software solutions designed to deliver amazingly fast AI inference performance.
+To leverage Groq in GPT-Researcher, you will need a GroqCloud account and an API Key. (__NOTE:__ Groq has a very _generous free tier_.)
+
+### Sign up
+- You can signup here: [https://console.groq.com/login](https://console.groq.com/login)
+- Once you are logged in, you can get an API Key here: [https://console.groq.com/keys](https://console.groq.com/keys)
+
+- Once you have an API key, you will need to add it to your `systems environment` using the variable name:
+`GROQ_API_KEY="*********************"`
+
+### Update env vars
+And finally, you will need to configure the GPT-Researcher Provider and Model variables:
+
+```bash
+GROQ_API_KEY=[Your Key]
+
+# Set one of the LLM models supported by Groq
+FAST_LLM="groq:Mixtral-8x7b-32768"
+SMART_LLM="groq:Mixtral-8x7b-32768"
+STRATEGIC_LLM="groq:Mixtral-8x7b-32768"
+```
+
+__NOTE:__ As of the writing of this Doc (May 2024), the available Language Models from Groq are:
+
+* Llama3-70b-8192
+* Llama3-8b-8192
+* Mixtral-8x7b-32768
+* Gemma-7b-it
+
+
+## Anthropic
+
+Refer to Anthropic [Getting started page](https://docs.anthropic.com/en/api/getting-started) to obtain Anthropic API key. Update the corresponding env vars, for example:
+```bash
+ANTHROPIC_API_KEY=[Your key]
+FAST_LLM="anthropic:claude-2.1"
+SMART_LLM="anthropic:claude-3-opus-20240229"
+STRATEGIC_LLM="anthropic:claude-3-opus-20240229"
+```
+
+Anthropic does not offer its own embedding model.
+
+
+## Mistral AI
+
+Sign up for a [Mistral API key](https://console.mistral.ai/users/api-keys/).
+Then update the corresponding env vars, for example:
+```bash
+MISTRAL_API_KEY=[Your key]
+FAST_LLM="mistralai:open-mistral-7b"
+SMART_LLM="mistralai:mistral-large-latest"
+STRATEGIC_LLM="mistralai:mistral-large-latest"
+
+EMBEDDING="mistralai:mistral-embed"
+```
+
+
+## Together AI
+[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.
+Then update corresponding env vars, for example:
+```bash
+TOGETHER_API_KEY=[Your key]
+FAST_LLM="together:meta-llama/Llama-3-8b-chat-hf"
+SMART_LLM="together:meta-llama/Llama-3-70b-chat-hf"
+STRATEGIC_LLM="together:meta-llama/Llama-3-70b-chat-hf"
+
+EMBEDDING="mistralai:nomic-ai/nomic-embed-text-v1.5"
+```
+
+
+## HuggingFace
+
+This integration requires a bit of extra work. Follow [this guide](https://python.langchain.com/v0.1/docs/integrations/chat/huggingface/) to learn more.
+After you've followed the tutorial above, update the env vars:
+```bash
+HUGGINGFACE_API_KEY=[Your key]
+FAST_LLM="huggingface:HuggingFaceH4/zephyr-7b-beta"
+SMART_LLM="huggingface:HuggingFaceH4/zephyr-7b-beta"
+STRATEGIC_LLM="huggingface:HuggingFaceH4/zephyr-7b-beta"
+
+EMBEDDING="sentence-transformers/all-MiniLM-L6-v2"
+```
+
+
+## Google Gemini
+
+Sign up [here](https://ai.google.dev/gemini-api/docs/api-key) for obtaining a Google Gemini API Key and update the following env vars:
+```bash
+GOOGLE_API_KEY=[Your key]
+FAST_LLM="google_genai:gemini-1.5-flash"
+SMART_LLM="google_genai:gemini-1.5-pro"
+STRATEGIC_LLM="google_genai:gemini-1.5-pro"
+
+EMBEDDING="google_genai:models/text-embedding-004"
+```
+
+
+## Google VertexAI
+
+```bash
+FAST_LLM="google_vertexai:gemini-1.5-flash-001"
+SMART_LLM="google_vertexai:gemini-1.5-pro-001"
+STRATEGIC_LLM="google_vertexai:gemini-1.5-pro-001"
+
+EMBEDDING="google_vertexai:text-embedding-004"
+```
+
+
+## Cohere
+
+```bash
+COHERE_API_KEY=[Your key]
+FAST_LLM="cohere:command"
+SMART_LLM="cohere:command-nightly"
+STRATEGIC_LLM="cohere:command-nightly"
+
+EMBEDDING="cohere:embed-english-v3.0"
+```
+
+
+## Fireworks
+
+```bash
+FIREWORKS_API_KEY=[Your key]
+base_url="https://api.fireworks.ai/inference/v1/completions"
+FAST_LLM="fireworks:accounts/fireworks/models/mixtral-8x7b-instruct"
+SMART_LLM="fireworks:accounts/fireworks/models/mixtral-8x7b-instruct"
+STRATEGIC_LLM="fireworks:accounts/fireworks/models/mixtral-8x7b-instruct"
+
+EMBEDDING="fireworks:nomic-ai/nomic-embed-text-v1.5"
+```
+
+
+## Bedrock
+
+```bash
+FAST_LLM="bedrock:anthropic.claude-3-sonnet-20240229-v1:0"
+SMART_LLM="bedrock:anthropic.claude-3-sonnet-20240229-v1:0"
+STRATEGIC_LLM="bedrock:anthropic.claude-3-sonnet-20240229-v1:0"
+
+EMBEDDING="bedrock:amazon.titan-embed-text-v2:0"
+```
+
+
+## LiteLLM
+
+```bash
+FAST_LLM="litellm:perplexity/pplx-7b-chat"
+SMART_LLM="litellm:perplexity/pplx-70b-chat"
+STRATEGIC_LLM="litellm:perplexity/pplx-70b-chat"
+```
+
+
+## xAI
+
+```bash
+FAST_LLM="xai:grok-beta"
+SMART_LLM="xai:grok-beta"
+STRATEGIC_LLM="xai:grok-beta"
+```
+
+
+## DeepSeek
+```bash
+DEEPSEEK_API_KEY=[Your key]
+FAST_LLM="deepseek:deepseek-chat"
+SMART_LLM="deepseek:deepseek-chat"
+STRATEGIC_LLM="deepseek:deepseek-chat"
+```
+
+
+## Other Embedding Models
+
+### Nomic
+
+```bash
+EMBEDDING="nomic:nomic-embed-text-v1.5"
+```
+
+### VoyageAI
+
+```bash
+VOYAGE_API_KEY=[Your Key]
+EMBEDDING="voyageai:voyage-law-2"
+```
diff --git a/docs/docs/gpt-researcher/llms/running-with-ollama.md b/docs/docs/gpt-researcher/llms/running-with-ollama.md
new file mode 100644
index 0000000000000000000000000000000000000000..6ecea528f5fdee03ee0709b18aebb66faa0e9aef
--- /dev/null
+++ b/docs/docs/gpt-researcher/llms/running-with-ollama.md
@@ -0,0 +1,112 @@
+# Running with Ollama
+
+Ollama is a platform that allows you to deploy and manage custom language models. This guide will walk you through deploying a custom language model on Ollama.
+
+Read on to understand how to install a Custom LLM with the Ollama WebUI, and how to query it with GPT-Researcher.
+
+
+## Fetching the Desired LLM Models
+
+After deploying Ollama WebUI, you'll want to enter the [Open WebUI Admin App](https://github.com/open-webui/open-webui/tree/main) & download a custom LLM.
+
+Choose a model from [Ollama's Library of LLM's](https://ollama.com/library?sort=popular)
+
+Paste the model name & size into the Web UI:
+
+
+
+For our example, let's choose to download the `qwen2:1.5b` model.
+
+This model now automatically becomes available via your Server's out-of-the-box API - we'll leverage it within our GPT-Researcher .env file in the next step.
+
+
+## Querying your Custom LLM with GPT-Researcher
+
+If you deploy ollama locally, a .env like so, should enable powering GPT-Researcher with Ollama:
+
+```bash
+OPENAI_API_KEY="123"
+OPENAI_API_BASE="http://127.0.0.1:11434/v1"
+OLLAMA_BASE_URL="http://127.0.0.1:11434/"
+FAST_LLM="ollama:qwen2:1.5b"
+SMART_LLM="ollama:qwen2:1.5b"
+EMBEDDING="ollama:all-minilm:22m"
+```
+
+Replace `FAST_LLM` & `SMART_LLM` with the model you downloaded from the Elestio Web UI in the previous step.
+
+
+## Run LLM Test Script for GPTR
+
+And here's a custom python script you can use to query your custom LLM:
+
+```python
+
+import os
+import asyncio
+import logging
+logging.basicConfig(level=logging.DEBUG)
+from gpt_researcher.llm_provider.generic import GenericLLMProvider
+from gpt_researcher.utils.llm import get_llm
+
+OLLAMA_BASE_URL = "https://ollama-ug3qr-u21899.vm.elestio.app:57987"
+LLM_MODEL = "llama3.1"
+
+# Create the GenericLLMProvider instance
+llm_provider = get_llm(
+ "ollama",
+ base_url=OLLAMA_BASE_URL,
+ model=LLM_MODEL,
+ temperature=0.7,
+ max_tokens=2000,
+ verify_ssl=False # Add this line
+)
+
+# Test the connection with a simple query
+messages = [{"role": "user", "content": "sup?"}]
+
+async def test_ollama():
+ try:
+ response = await llm_provider.get_chat_response(messages, stream=False)
+ print("Ollama response:", response)
+ except Exception as e:
+ print(f"Error: {e}")
+
+# Run the async function
+asyncio.run(test_ollama())
+
+```
+
+Replace `OLLAMA_BASE_URL` with the URL of your Ollama instance, and `LLM_MODEL` with the model you downloaded from the Ollama Web UI.
+
+Run the script to test the connection with your custom LLM.
+
+
+## Deploy Ollama on Elestio
+
+Elestio is a platform that allows you to deploy and manage custom language models. This guide will walk you through deploying a custom language model on Elestio.
+
+You can deploy an [Open WebUI](https://github.com/open-webui/open-webui/tree/main) server with [Elestio](https://elest.io/open-source/ollama)
+
+Here's an example .env file that will enable powering GPT-Researcher with Elestio:
+
+```bash
+OPENAI_API_KEY="123"
+OPENAI_API_BASE="https://.vm.elestio.app:57987/v1"
+OLLAMA_BASE_URL="https://.vm.elestio.app:57987/"
+FAST_LLM="openai:qwen2:1.5b"
+SMART_LLM="openai:qwen2:1.5b"
+EMBEDDING="ollama:all-minilm:22m"
+```
+
+#### Disable Elestio Authentication or Add Auth Headers
+
+To remove the basic auth you have to follow the below steps:
+Go to your service -> Security -> at last Nginx -> in that find the below code:
+
+```bash
+auth_basic "Authentication";
+auth_basic_user_file /etc/nginx/conf.d/.htpasswd;
+```
+
+Comment these both these lines out and click the button "Update & Restart" to reflect the changes.
diff --git a/docs/docs/gpt-researcher/llms/testing-your-llm.md b/docs/docs/gpt-researcher/llms/testing-your-llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae6e67046fbdcfc0ab6d85c2e6bd2543c7e542e8
--- /dev/null
+++ b/docs/docs/gpt-researcher/llms/testing-your-llm.md
@@ -0,0 +1,30 @@
+# Testing your LLM
+
+Here is a snippet of code to help you verify that your LLM-related environment variables are set up correctly.
+
+```python
+from gpt_researcher.config.config import Config
+from gpt_researcher.utils.llm import create_chat_completion
+import asyncio
+from dotenv import load_dotenv
+load_dotenv()
+
+async def main():
+ cfg = Config()
+
+ try:
+ report = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages = [{"role": "user", "content": "sup?"}],
+ temperature=0.35,
+ llm_provider=cfg.smart_llm_provider,
+ stream=True,
+ max_tokens=cfg.smart_token_limit,
+ llm_kwargs=cfg.llm_kwargs
+ )
+ except Exception as e:
+ print(f"Error in calling LLM: {e}")
+
+# Run the async function
+asyncio.run(main())
+```
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/multi_agents/langgraph.md b/docs/docs/gpt-researcher/multi_agents/langgraph.md
new file mode 100644
index 0000000000000000000000000000000000000000..b01badc154c11f55020211312ece0da776917b2f
--- /dev/null
+++ b/docs/docs/gpt-researcher/multi_agents/langgraph.md
@@ -0,0 +1,148 @@
+# LangGraph
+
+[LangGraph](https://python.langchain.com/docs/langgraph) is a library for building stateful, multi-actor applications with LLMs.
+This example uses Langgraph to automate the process of an in depth research on any given topic.
+
+## Use case
+By using Langgraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills.
+Inspired by the recent [STORM](https://arxiv.org/abs/2402.14207) paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication.
+
+An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown.
+
+Please note: This example uses the OpenAI API only for optimized performance.
+
+## The Multi Agent Team
+The research team is made up of 7 AI agents:
+- **Human** - The human in the loop that oversees the process and provides feedback to the agents.
+- **Chief Editor** - Oversees the research process and manages the team. This is the "master" agent that coordinates the other agents using Langgraph.
+- **Researcher** (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic.
+- **Editor** - Responsible for planning the research outline and structure.
+- **Reviewer** - Validates the correctness of the research results given a set of criteria.
+- **Revisor** - Revises the research results based on the feedback from the reviewer.
+- **Writer** - Responsible for compiling and writing the final report.
+- **Publisher** - Responsible for publishing the final report in various formats.
+
+## How it works
+Generally, the process is based on the following stages:
+1. Planning stage
+2. Data collection and analysis
+3. Review and revision
+4. Writing and submission
+5. Publication
+
+### Architecture
+
+
+
+
+
+### Steps
+More specifically (as seen in the architecture diagram) the process is as follows:
+- Browser (gpt-researcher) - Browses the internet for initial research based on the given research task.
+- Editor - Plans the report outline and structure based on the initial research.
+- For each outline topic (in parallel):
+ - Researcher (gpt-researcher) - Runs an in depth research on the subtopics and writes a draft.
+ - Reviewer - Validates the correctness of the draft given a set of criteria and provides feedback.
+ - Revisor - Revises the draft until it is satisfactory based on the reviewer feedback.
+- Writer - Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.
+- Publisher - Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.
+
+## How to run
+1. Install required packages:
+ ```bash
+ pip install -r requirements.txt
+ ```
+3. Update env variables
+ ```bash
+ export OPENAI_API_KEY={Your OpenAI API Key here}
+ export TAVILY_API_KEY={Your Tavily API Key here}
+ ```
+2. Run the application:
+ ```bash
+ python main.py
+ ```
+
+## Usage
+To change the research query and customize the report, edit the `task.json` file in the main directory.
+#### Task.json contains the following fields:
+- `query` - The research query or task.
+- `model` - The OpenAI LLM to use for the agents.
+- `max_sections` - The maximum number of sections in the report. Each section is a subtopic of the research query.
+- `include_human_feedback` - If true, the user can provide feedback to the agents. If false, the agents will work autonomously.
+- `publish_formats` - The formats to publish the report in. The reports will be written in the `output` directory.
+- `source` - The location from which to conduct the research. Options: `web` or `local`. For local, please add `DOC_PATH` env var.
+- `follow_guidelines` - If true, the research report will follow the guidelines below. It will take longer to complete. If false, the report will be generated faster but may not follow the guidelines.
+- `guidelines` - A list of guidelines that the report must follow.
+- `verbose` - If true, the application will print detailed logs to the console.
+
+#### For example:
+```json
+{
+ "query": "Is AI in a hype cycle?",
+ "model": "gpt-4o",
+ "max_sections": 3,
+ "publish_formats": {
+ "markdown": true,
+ "pdf": true,
+ "docx": true
+ },
+ "include_human_feedback": false,
+ "source": "web",
+ "follow_guidelines": true,
+ "guidelines": [
+ "The report MUST fully answer the original question",
+ "The report MUST be written in apa format",
+ "The report MUST be written in english"
+ ],
+ "verbose": true
+}
+```
+
+## To Deploy
+
+```shell
+pip install langgraph-cli
+langgraph up
+```
+
+From there, see documentation [here](https://github.com/langchain-ai/langgraph-example) on how to use the streaming and async endpoints, as well as the playground.
+
+## NextJS Frontend App
+
+The React app (located in `frontend` directory) is our Frontend 2.0 which we hope will enable us to display the robustness of the backend on the frontend, as well.
+
+It comes with loads of added features, such as:
+ - a drag-n-drop user interface for uploading and deleting files to be used as local documents by GPTResearcher.
+ - a GUI for setting your GPTR environment variables.
+ - the ability to trigger the multi_agents flow via the Backend Module or Langgraph Cloud Host (currently in closed beta).
+ - stability fixes
+ - and more coming soon!
+
+### Run the NextJS React App with Docker
+
+> **Step 1** - [Install Docker](https://docs.gptr.dev/docs/gpt-researcher/getting-started/getting-started-with-docker)
+
+> **Step 2** - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env'
+
+> **Step 3** - Within the docker-compose file comment out services that you don't want to run with Docker.
+
+```bash
+$ docker-compose up --build
+```
+
+> **Step 4** - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes:
+ - the Python server running on localhost:8000
+ - the React app running on localhost:3000
+
+Visit localhost:3000 on any browser and enjoy researching!
+
+
+### Run the NextJS React App with NPM
+
+```bash
+cd frontend
+nvm install 18.17.0
+nvm use v18.17.0
+npm install --legacy-peer-deps
+npm run dev
+```
\ No newline at end of file
diff --git a/docs/docs/gpt-researcher/search-engines/retrievers.md b/docs/docs/gpt-researcher/search-engines/retrievers.md
new file mode 100644
index 0000000000000000000000000000000000000000..2fe75ab09a25f2cedeea4cd33dd5dedb33342b48
--- /dev/null
+++ b/docs/docs/gpt-researcher/search-engines/retrievers.md
@@ -0,0 +1,75 @@
+# Retrievers
+
+Retrievers are search engines used to find the most relevant documents for a given research task.
+You can specify your preferred web search or use any custom retriever of your choice.
+
+## Web Search Engines
+
+GPT Researcher defaults to using the [Tavily](https://app.tavily.com) search engine for retrieving search results.
+But you can also use other search engines by specifying the `RETRIEVER` env var. Please note that each search engine has its own API Key requirements and usage limits.
+
+For example:
+
+```bash
+RETRIEVER=bing
+```
+
+You can also specify multiple retrievers by separating them with commas. The system will use each specified retriever in sequence.
+For example:
+
+```bash
+RETRIEVER=tavily, arxiv
+```
+
+Thanks to our community, we have integrated the following web search engines:
+
+- [Tavily](https://app.tavily.com) - Default
+- [Bing](https://www.microsoft.com/en-us/bing/apis/bing-web-search-api) - Env: `RETRIEVER=bing`
+- [Google](https://developers.google.com/custom-search/v1/overview) - Env: `RETRIEVER=google`
+- [SearchApi](https://www.searchapi.io/) - Env: `RETRIEVER=searchapi`
+- [Serp API](https://serpapi.com/) - Env: `RETRIEVER=serpapi`
+- [Serper](https://serper.dev/) - Env: `RETRIEVER=serper`
+- [Searx](https://searx.github.io/searx/) - Env: `RETRIEVER=searx`
+- [Duckduckgo](https://pypi.org/project/duckduckgo-search/) - Env: `RETRIEVER=duckduckgo`
+- [Arxiv](https://info.arxiv.org/help/api/index.html) - Env: `RETRIEVER=arxiv`
+- [Exa](https://docs.exa.ai/reference/getting-started) - Env: `RETRIEVER=exa`
+- [PubMedCentral](https://www.ncbi.nlm.nih.gov/home/develop/api/) - Env: `RETRIEVER=pubmed_central`
+
+## Custom Retrievers
+
+You can also use any custom retriever of your choice by specifying the `RETRIEVER=custom` env var.
+Custom retrievers allow you to use any search engine that provides an API to retrieve documents and is widely used for enterprise research tasks.
+
+In addition to setting the `RETRIEVER` env, you also need to set the following env vars:
+
+- `RETRIEVER_ENDPOINT`: The endpoint URL of the custom retriever.
+- Additional arguments required by the retriever should be prefixed with `RETRIEVER_ARG_` (e.g., RETRIEVER_ARG_API_KEY).
+
+### Example
+
+```bash
+RETRIEVER=custom
+RETRIEVER_ENDPOINT=https://api.myretriever.com
+RETRIEVER_ARG_API_KEY=YOUR_API_KEY
+```
+
+### Response Format
+
+For the custom retriever to work correctly, the response from the endpoint should be in the following format:
+
+```json
+[
+ {
+ "url": "http://example.com/page1",
+ "raw_content": "Content of page 1"
+ },
+ {
+ "url": "http://example.com/page2",
+ "raw_content": "Content of page 2"
+ }
+]
+```
+
+The system assumes this response format and processes the list of sources accordingly.
+
+Missing a retriever? Feel free to contribute to this project by submitting issues or pull requests on our [GitHub](https://github.com/assafelovic/gpt-researcher) page.
diff --git a/docs/docs/gpt-researcher/search-engines/test-your-retriever.md b/docs/docs/gpt-researcher/search-engines/test-your-retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..e9aff2b50b0eeaffb6c1400ae2b2ef14be77074e
--- /dev/null
+++ b/docs/docs/gpt-researcher/search-engines/test-your-retriever.md
@@ -0,0 +1,68 @@
+# Testing your Retriever
+
+To test your retriever, you can use the following code snippet. The script will search for a sub-query and display the search results.
+
+```python
+import asyncio
+from dotenv import load_dotenv
+from gpt_researcher.config.config import Config
+from gpt_researcher.actions.retriever import get_retrievers
+from gpt_researcher.skills.researcher import ResearchConductor
+import pprint
+# Load environment variables from .env file
+load_dotenv()
+
+async def test_scrape_data_by_query():
+ # Initialize the Config object
+ config = Config()
+
+ # Retrieve the retrievers based on the current configuration
+ retrievers = get_retrievers({}, config)
+ print("Retrievers:", retrievers)
+
+ # Create a mock researcher object with necessary attributes
+ class MockResearcher:
+ def init(self):
+ self.retrievers = retrievers
+ self.cfg = config
+ self.verbose = True
+ self.websocket = None
+ self.scraper_manager = None # Mock or implement scraper manager
+ self.vector_store = None # Mock or implement vector store
+
+ researcher = MockResearcher()
+ research_conductor = ResearchConductor(researcher)
+ # print('research_conductor',dir(research_conductor))
+ # print('MockResearcher',dir(researcher))
+ # Define a sub-query to test
+ sub_query = "design patterns for autonomous ai agents"
+
+ # Iterate through all retrievers
+ for retriever_class in retrievers:
+ # Instantiate the retriever with the sub-query
+ retriever = retriever_class(sub_query)
+
+ # Perform the search using the current retriever
+ search_results = await asyncio.to_thread(
+ retriever.search, max_results=10
+ )
+
+ print("\033[35mSearch results:\033[0m")
+ pprint.pprint(search_results, indent=4, width=80)
+
+if __name__ == "__main__":
+ asyncio.run(test_scrape_data_by_query())
+```
+
+The output of the search results will include the title, body, and href of each search result. For example:
+
+```json
+[{
+ "body": "Jun 5, 2024 ... Three AI Design Patterns of Autonomous "
+ "Agents. Overview of the Three Patterns. Three notable AI "
+ "design patterns for autonomous agents include:.",
+ "href": "https://accredianpublication.medium.com/building-smarter-systems-the-role-of-agentic-design-patterns-in-genai-13617492f5df",
+ "title": "Building Smarter Systems: The Role of Agentic Design "
+ "Patterns in ..."},
+ ...]
+```
\ No newline at end of file
diff --git a/docs/docs/reference/config/config.md b/docs/docs/reference/config/config.md
new file mode 100644
index 0000000000000000000000000000000000000000..69125fdf46f3f9239160f0308eb751543608d9cf
--- /dev/null
+++ b/docs/docs/reference/config/config.md
@@ -0,0 +1,127 @@
+---
+sidebar_label: config
+title: config.config
+---
+
+Configuration class to store the state of bools for different scripts access.
+
+## Config Objects
+
+```python
+class Config(metaclass=Singleton)
+```
+
+Configuration class to store the state of bools for different scripts access.
+
+#### \_\_init\_\_
+
+```python
+def __init__() -> None
+```
+
+Initialize the Config class
+
+#### set\_fast\_llm\_model
+
+```python
+def set_fast_llm_model(value: str) -> None
+```
+
+Set the fast LLM model value.
+
+#### set\_smart\_llm\_model
+
+```python
+def set_smart_llm_model(value: str) -> None
+```
+
+Set the smart LLM model value.
+
+#### set\_fast\_token\_limit
+
+```python
+def set_fast_token_limit(value: int) -> None
+```
+
+Set the fast token limit value.
+
+#### set\_smart\_token\_limit
+
+```python
+def set_smart_token_limit(value: int) -> None
+```
+
+Set the smart token limit value.
+
+#### set\_browse\_chunk\_max\_length
+
+```python
+def set_browse_chunk_max_length(value: int) -> None
+```
+
+Set the browse_website command chunk max length value.
+
+#### set\_openai\_api\_key
+
+```python
+def set_openai_api_key(value: str) -> None
+```
+
+Set the OpenAI API key value.
+
+#### set\_debug\_mode
+
+```python
+def set_debug_mode(value: bool) -> None
+```
+
+Set the debug mode value.
+
+## APIKeyError Objects
+
+```python
+class APIKeyError(Exception)
+```
+
+Exception raised when an API key is not set in config.py or as an environment variable.
+
+#### check\_openai\_api\_key
+
+```python
+def check_openai_api_key(cfg) -> None
+```
+
+Check if the OpenAI API key is set in config.py or as an environment variable.
+
+#### check\_tavily\_api\_key
+
+```python
+def check_tavily_api_key(cfg) -> None
+```
+
+Check if the Tavily Search API key is set in config.py or as an environment variable.
+
+#### check\_google\_api\_key
+
+```python
+def check_google_api_key(cfg) -> None
+```
+
+Check if the Google API key is set in config.py or as an environment variable.
+
+#### check\_serp\_api\_key
+
+```python
+def check_serp_api_key(cfg) -> None
+```
+
+Check if the SERP API key is set in config.py or as an environment variable.
+
+#### check\_searx\_url
+
+```python
+def check_searx_url(cfg) -> None
+```
+
+Check if the Searx URL is set in config.py or as an environment variable.
+
diff --git a/docs/docs/reference/config/singleton.md b/docs/docs/reference/config/singleton.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b72bff7e8ff9eda45218db9d4eb5f3bb588ae8e
--- /dev/null
+++ b/docs/docs/reference/config/singleton.md
@@ -0,0 +1,31 @@
+---
+sidebar_label: singleton
+title: config.singleton
+---
+
+The singleton metaclass for ensuring only one instance of a class.
+
+## Singleton Objects
+
+```python
+class Singleton(abc.ABCMeta, type)
+```
+
+Singleton metaclass for ensuring only one instance of a class.
+
+#### \_\_call\_\_
+
+```python
+def __call__(cls, *args, **kwargs)
+```
+
+Call method for the singleton metaclass.
+
+## AbstractSingleton Objects
+
+```python
+class AbstractSingleton(abc.ABC, metaclass=Singleton)
+```
+
+Abstract singleton class for ensuring only one instance of a class.
+
diff --git a/docs/docs/reference/processing/html.md b/docs/docs/reference/processing/html.md
new file mode 100644
index 0000000000000000000000000000000000000000..44808d4ede27ce9f067a0bfe7a1a759290040015
--- /dev/null
+++ b/docs/docs/reference/processing/html.md
@@ -0,0 +1,43 @@
+---
+sidebar_label: html
+title: processing.html
+---
+
+HTML processing functions
+
+#### extract\_hyperlinks
+
+```python
+def extract_hyperlinks(soup: BeautifulSoup,
+ base_url: str) -> list[tuple[str, str]]
+```
+
+Extract hyperlinks from a BeautifulSoup object
+
+**Arguments**:
+
+- `soup` _BeautifulSoup_ - The BeautifulSoup object
+- `base_url` _str_ - The base URL
+
+
+**Returns**:
+
+ List[Tuple[str, str]]: The extracted hyperlinks
+
+#### format\_hyperlinks
+
+```python
+def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]
+```
+
+Format hyperlinks to be displayed to the user
+
+**Arguments**:
+
+- `hyperlinks` _List[Tuple[str, str]]_ - The hyperlinks to format
+
+
+**Returns**:
+
+- `List[str]` - The formatted hyperlinks
+
diff --git a/docs/docs/reference/processing/text.md b/docs/docs/reference/processing/text.md
new file mode 100644
index 0000000000000000000000000000000000000000..954718720eea662a2a1e5f1bab56ccb9cba4c97c
--- /dev/null
+++ b/docs/docs/reference/processing/text.md
@@ -0,0 +1,103 @@
+---
+sidebar_label: text
+title: processing.text
+---
+
+Text processing functions
+
+#### split\_text
+
+```python
+def split_text(text: str,
+ max_length: int = 8192) -> Generator[str, None, None]
+```
+
+Split text into chunks of a maximum length
+
+**Arguments**:
+
+- `text` _str_ - The text to split
+- `max_length` _int, optional_ - The maximum length of each chunk. Defaults to 8192.
+
+
+**Yields**:
+
+- `str` - The next chunk of text
+
+
+**Raises**:
+
+- `ValueError` - If the text is longer than the maximum length
+
+#### summarize\_text
+
+```python
+def summarize_text(url: str,
+ text: str,
+ question: str,
+ driver: Optional[WebDriver] = None) -> str
+```
+
+Summarize text using the OpenAI API
+
+**Arguments**:
+
+- `url` _str_ - The url of the text
+- `text` _str_ - The text to summarize
+- `question` _str_ - The question to ask the model
+- `driver` _WebDriver_ - The webdriver to use to scroll the page
+
+
+**Returns**:
+
+- `str` - The summary of the text
+
+#### scroll\_to\_percentage
+
+```python
+def scroll_to_percentage(driver: WebDriver, ratio: float) -> None
+```
+
+Scroll to a percentage of the page
+
+**Arguments**:
+
+- `driver` _WebDriver_ - The webdriver to use
+- `ratio` _float_ - The percentage to scroll to
+
+
+**Raises**:
+
+- `ValueError` - If the ratio is not between 0 and 1
+
+#### create\_message
+
+```python
+def create_message(chunk: str, question: str) -> Dict[str, str]
+```
+
+Create a message for the chat completion
+
+**Arguments**:
+
+- `chunk` _str_ - The chunk of text to summarize
+- `question` _str_ - The question to answer
+
+
+**Returns**:
+
+ Dict[str, str]: The message to send to the chat completion
+
+#### write\_to\_file
+
+```python
+def write_to_file(filename: str, text: str) -> None
+```
+
+Write text to a file
+
+**Arguments**:
+
+- `text` _str_ - The text to write
+- `filename` _str_ - The filename to write to
+
diff --git a/docs/docs/reference/sidebar.json b/docs/docs/reference/sidebar.json
new file mode 100644
index 0000000000000000000000000000000000000000..c9819d64dedbc73287c748cf517670cc0d70c59e
--- /dev/null
+++ b/docs/docs/reference/sidebar.json
@@ -0,0 +1,5 @@
+{
+ "items": [],
+ "label": "Reference",
+ "type": "category"
+}
\ No newline at end of file
diff --git a/docs/docs/roadmap.md b/docs/docs/roadmap.md
new file mode 100644
index 0000000000000000000000000000000000000000..649105f604821a0b44ceac95c135f0f663c76941
--- /dev/null
+++ b/docs/docs/roadmap.md
@@ -0,0 +1,9 @@
+# Roadmap
+
+We're constantly working on additional features and improvements to our products and services. We're also working on new products and services to help you build better AI applications using [GPT Researcher](https://gptr.dev).
+
+Our vision is to build the #1 autonomous research agent for AI developers and researchers, and we're excited to have you join us on this journey!
+
+The roadmap is prioritized based on the following goals: Performance, Quality, Modularity and Conversational flexibility. The roadmap is public and can be found [here](https://trello.com/b/3O7KBePw/gpt-researcher-roadmap).
+
+Interested in collaborating or contributing? Check out our [contributing page](/docs/contribute) for more information.
\ No newline at end of file
diff --git a/docs/docs/welcome.md b/docs/docs/welcome.md
new file mode 100644
index 0000000000000000000000000000000000000000..3037e55c66b95b677e04fd7bbeb102b0a0cb42b7
--- /dev/null
+++ b/docs/docs/welcome.md
@@ -0,0 +1,13 @@
+# Welcome
+
+Hey there! 👋
+
+We're a team of AI researchers and developers who are passionate about building the next generation of AI assistants.
+Our mission is to empower individuals and organizations with accurate, unbiased, and factual information.
+
+### GPT Researcher
+Quickly accessing relevant and trustworthy information is more crucial than ever. However, we've learned that none of today's search engines provide a suitable tool that provides factual, explicit and objective answers without the need to continuously click and explore multiple sites for a given research task.
+
+This is why we've built the trending open source **[GPT Researcher](https://github.com/assafelovic/gpt-researcher)**. GPT Researcher is an autonomous agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task.
+
+To learn more about GPT Researcher, check out the [documentation page](/docs/gpt-researcher/getting-started/introduction).
diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..af7015cf93db401dd5a7a3d73754fc1e25f42694
--- /dev/null
+++ b/docs/docusaurus.config.js
@@ -0,0 +1,131 @@
+/** @type {import('@docusaurus/types').DocusaurusConfig} */
+const math = require('remark-math');
+const katex = require('rehype-katex');
+
+module.exports = {
+ title: 'GPT Researcher',
+ tagline: 'The leading autonomous AI research agent',
+ url: 'https://docs.gptr.dev',
+ baseUrl: '/',
+ onBrokenLinks: 'ignore',
+ //deploymentBranch: 'master',
+ onBrokenMarkdownLinks: 'warn',
+ favicon: 'img/gptr-logo.png',
+ organizationName: 'assafelovic',
+ trailingSlash: false,
+ projectName: 'gpt-researcher',
+ themeConfig: {
+ navbar: {
+ title: 'GPT Researcher',
+ logo: {
+ alt: 'GPT Researcher',
+ src: 'img/gptr-logo.png',
+ },
+ items: [
+ {
+ type: 'doc',
+ docId: 'welcome',
+ position: 'left',
+ label: 'Docs',
+ },
+
+ {to: 'blog', label: 'Blog', position: 'left'},
+ {
+ type: 'doc',
+ docId: 'faq',
+ position: 'left',
+ label: 'FAQ',
+ },
+ {
+ href: 'mailto:assaf.elovic@gmail.com',
+ position: 'left',
+ label: 'Contact',
+ },
+ {
+ href: 'https://github.com/assafelovic/gpt-researcher',
+ label: 'GitHub',
+ position: 'right',
+ },
+ ],
+ },
+ footer: {
+ style: 'dark',
+ links: [
+ {
+ title: 'Community',
+ items: [
+ {
+ label: 'Discord',
+ href: 'https://discord.gg/8YkBcCED5y',
+ },
+ {
+ label: 'Twitter',
+ href: 'https://twitter.com/assaf_elovic',
+ },
+ {
+ label: 'LinkedIn',
+ href: 'https://www.linkedin.com/in/assafe/',
+ },
+ ],
+ },
+ {
+ title: 'Company',
+ items: [
+ {
+ label: 'Homepage',
+ href: 'https://gptr.dev',
+ },
+ {
+ label: 'Contact',
+ href: 'mailto:assafelovic@gmail.com',
+ },
+ ],
+ },
+ ],
+ copyright: `Copyright © ${new Date().getFullYear()} GPT Researcher.`,
+ },
+ },
+ presets: [
+ [
+ '@docusaurus/preset-classic',
+ {
+ docs: {
+ sidebarPath: require.resolve('./sidebars.js'),
+ // Please change this to your repo.
+ editUrl:
+ 'https://github.com/assafelovic/gpt-researcher/tree/master/docs',
+ remarkPlugins: [math],
+ rehypePlugins: [katex],
+ },
+ theme: {
+ customCss: require.resolve('./src/css/custom.css'),
+ },
+ },
+ ],
+ ],
+ stylesheets: [
+ {
+ href: "https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.css",
+ integrity: "sha384-Um5gpz1odJg5Z4HAmzPtgZKdTBHZdw8S29IecapCSB31ligYPhHQZMIlWLYQGVoc",
+ crossorigin: "anonymous",
+ },
+ ],
+
+ plugins: [
+ // ... Your other plugins.
+ [
+ require.resolve("@easyops-cn/docusaurus-search-local"),
+ {
+ // ... Your options.
+ // `hashed` is recommended as long-term-cache of index file is possible.
+ hashed: true,
+ blogDir:"./blog/"
+ // For Docs using Chinese, The `language` is recommended to set to:
+ // ```
+ // language: ["en", "zh"],
+ // ```
+ // When applying `zh` in language, please install `nodejieba` in your project.
+ },
+ ],
+ ],
+};
diff --git a/docs/package.json b/docs/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..45614930cb289dda5e3c21eec4282fbd0da663b5
--- /dev/null
+++ b/docs/package.json
@@ -0,0 +1,56 @@
+{
+ "name": "website",
+ "version": "0.0.0",
+ "private": true,
+ "resolutions" :{
+ "nth-check":"2.0.1",
+ "trim":"0.0.3",
+ "got": "11.8.5",
+ "node-forge": "1.3.0",
+ "minimatch": "3.0.5",
+ "loader-utils": "2.0.4",
+ "eta": "2.0.0",
+ "@sideway/formula": "3.0.1",
+ "http-cache-semantics": "4.1.1"
+ },
+ "scripts": {
+ "docusaurus": "docusaurus",
+ "start": "docusaurus start",
+ "build": "docusaurus build",
+ "swizzle": "docusaurus swizzle",
+ "deploy": "docusaurus deploy",
+ "clear": "docusaurus clear",
+ "serve": "docusaurus serve",
+ "write-translations": "docusaurus write-translations",
+ "write-heading-ids": "docusaurus write-heading-ids"
+ },
+ "dependencies": {
+ "@docusaurus/core": "0.0.0-4193",
+ "@docusaurus/preset-classic": "0.0.0-4193",
+ "@easyops-cn/docusaurus-search-local": "^0.21.1",
+ "@mdx-js/react": "^1.6.21",
+ "@svgr/webpack": "^5.5.0",
+ "clsx": "^1.1.1",
+ "file-loader": "^6.2.0",
+ "hast-util-is-element": "1.1.0",
+ "react": "^17.0.1",
+ "react-dom": "^17.0.1",
+ "rehype-katex": "4",
+ "remark-math": "3",
+ "trim": "^0.0.3",
+ "url-loader": "^4.1.1",
+ "minimatch": "3.0.5"
+ },
+ "browserslist": {
+ "production": [
+ ">0.5%",
+ "not dead",
+ "not op_mini all"
+ ],
+ "development": [
+ "last 1 chrome version",
+ "last 1 firefox version",
+ "last 1 safari version"
+ ]
+ }
+}
diff --git a/docs/pydoc-markdown.yml b/docs/pydoc-markdown.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4c23389ed3a59493b29853b74a66f8eeb38643f6
--- /dev/null
+++ b/docs/pydoc-markdown.yml
@@ -0,0 +1,16 @@
+loaders:
+ - type: python
+ search_path: [../docs]
+processors:
+ - type: filter
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: docusaurus
+ docs_base_path: docs
+ relative_output_path: reference
+ relative_sidebar_path: sidebar.json
+ sidebar_top_level_label: Reference
+ markdown:
+ escape_html_in_docstring: false
diff --git a/docs/sidebars.js b/docs/sidebars.js
new file mode 100644
index 0000000000000000000000000000000000000000..f689e644dd59e1f20738ce16566c8983b23a0596
--- /dev/null
+++ b/docs/sidebars.js
@@ -0,0 +1,105 @@
+/**
+ * Creating a sidebar enables you to:
+ - create an ordered group of docs
+ - render a sidebar for each doc of that group
+ - provide next/previous navigation
+
+ The sidebars can be generated from the filesystem, or explicitly defined here.
+
+ Create as many sidebars as you want.
+ */
+
+ module.exports = {
+ docsSidebar: [
+ 'welcome',
+ {
+ type: 'category',
+ label: 'Getting Started',
+ collapsible: true,
+ collapsed: false,
+ items: [
+ 'gpt-researcher/getting-started/introduction',
+ 'gpt-researcher/getting-started/how-to-choose',
+ 'gpt-researcher/getting-started/getting-started',
+ 'gpt-researcher/getting-started/cli',
+ 'gpt-researcher/getting-started/getting-started-with-docker',
+ 'gpt-researcher/getting-started/linux-deployment',
+ ]
+ },
+ {
+ type: 'category',
+ label: 'GPT Researcher',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/gptr/pip-package',
+ 'gpt-researcher/gptr/example',
+ 'gpt-researcher/gptr/config',
+ 'gpt-researcher/gptr/scraping',
+ 'gpt-researcher/gptr/handling-logs-as-they-stream',
+ 'gpt-researcher/gptr/querying-the-backend',
+ 'gpt-researcher/gptr/automated-tests',
+ 'gpt-researcher/gptr/troubleshooting',
+ ],
+ },
+ {
+ type: 'category',
+ label: 'Frontend',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/frontend/frontend',
+ 'gpt-researcher/frontend/playing-with-webhooks',
+ 'gpt-researcher/frontend/logs',
+ ],
+ },
+ {
+ type: 'category',
+ label: 'Custom Context',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/context/tailored-research',
+ 'gpt-researcher/context/local-docs',
+ 'gpt-researcher/context/filtering-by-domain',
+ 'gpt-researcher/context/vector-stores',
+ ]
+ },
+ {
+ type: 'category',
+ label: 'LLM Providers',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/llms/llms',
+ 'gpt-researcher/llms/running-with-ollama',
+ 'gpt-researcher/llms/testing-your-llm'
+ ]
+ },
+ {
+ type: 'category',
+ label: 'Search Engines',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/search-engines/retrievers',
+ 'gpt-researcher/search-engines/test-your-retriever'
+ ]
+ },
+ {
+ type: 'category',
+ label: 'Multi-Agent Frameworks',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ 'gpt-researcher/multi_agents/langgraph',
+ ]
+ },
+ {'Examples': [{type: 'autogenerated', dirName: 'examples'}]},
+ 'contribute',
+ 'roadmap',
+ 'faq',
+ ],
+ // pydoc-markdown auto-generated markdowns from docstrings
+ referenceSideBar: [require("./docs/reference/sidebar.json")]
+};
diff --git a/docs/src/components/HomepageFeatures.js b/docs/src/components/HomepageFeatures.js
new file mode 100644
index 0000000000000000000000000000000000000000..6b1ed975ed8f87d211bad4fec701fc2c07e9fa05
--- /dev/null
+++ b/docs/src/components/HomepageFeatures.js
@@ -0,0 +1,78 @@
+import React from 'react';
+import clsx from 'clsx';
+import { Link } from 'react-router-dom';
+import styles from './HomepageFeatures.module.css';
+
+const FeatureList = [
+ {
+ title: 'GPT Researcher',
+ Svg: require('../../static/img/gptr-logo.png').default,
+ docLink: './docs/gpt-researcher/getting-started/getting-started',
+ description: (
+ <>
+ GPT Researcher is an open source autonomous agent designed for comprehensive online research on a variety of tasks.
+ >
+ ),
+ },
+ /*{
+ title: 'Tavily Search API',
+ Svg: require('../../static/img/tavily.png').default,
+ docLink: './docs/tavily-api/introduction',
+ description: (
+ <>
+ Tavily Search API is a search engine optimized for LLMs, optimized for a factual, efficient, and persistent search experience
+ >
+ ),
+ },*/
+ {
+ title: 'Multi-Agent Assistant',
+ Svg: require('../../static/img/multi-agent.png').default,
+ docLink: './docs/gpt-researcher/multi_agents/langgraph',
+ description: (
+ <>
+ Learn how a team of AI agents can work together to conduct research on a given topic, from planning to publication.
+ >
+ ),
+ },
+ {
+ title: 'Examples and Demos',
+ Svg: require('../../static/img/examples.png').default,
+ docLink: './docs/examples/examples',
+ description: (
+ <>
+ Check out GPT Researcher in action across multiple frameworks and use cases such as hybrid research and long detailed reports.
+ >
+ ),
+ },
+];
+
+function Feature({Svg, title, description, docLink}) {
+ return (
+
+
+ {/*
*/}
+
+
+
+
+
{title}
+
+
{description}
+
+
+ );
+}
+
+export default function HomepageFeatures() {
+ return (
+
+
+
+ {FeatureList.map((props, idx) => (
+
+ ))}
+
+
+
+ );
+}
diff --git a/docs/src/components/HomepageFeatures.module.css b/docs/src/components/HomepageFeatures.module.css
new file mode 100644
index 0000000000000000000000000000000000000000..6026dd666becd9cc19861785a11f6185aa17e33e
--- /dev/null
+++ b/docs/src/components/HomepageFeatures.module.css
@@ -0,0 +1,13 @@
+/* stylelint-disable docusaurus/copyright-header */
+
+.features {
+ display: flex;
+ align-items: center;
+ padding: 2rem 0;
+ width: 100%;
+}
+
+.featureSvg {
+ height: 120px;
+ width: 200px;
+}
diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..cf88f3b7152c3e7577923921b5fc938652c1fc00
--- /dev/null
+++ b/docs/src/css/custom.css
@@ -0,0 +1,100 @@
+:root {
+ --ifm-font-size-base: 16px;
+ --ifm-code-font-size: 90%;
+
+ --ifm-color-primary: #0c4da2;
+ --ifm-color-primary-dark: rgb(11, 69, 146);
+ --ifm-color-primary-darker: #0a418a;
+ --ifm-color-primary-darkest: #083671;
+ --ifm-color-primary-light: #0d55b2;
+ --ifm-color-primary-lighter: #0e59ba;
+ --ifm-color-primary-lightest: #1064d3;
+
+ --ifm-color-emphasis-300: #1064d3;
+ --ifm-link-color: #1064d3;
+ --ifm-menu-color-active: #1064d3;
+}
+
+.docusaurus-highlight-code-line {
+background-color: rgba(0, 0, 0, 0.1);
+display: block;
+margin: 0 calc(-1 * var(--ifm-pre-padding));
+padding: 0 var(--ifm-pre-padding);
+}
+html[data-theme='dark'] .docusaurus-highlight-code-line {
+background-color: rgb(0, 0, 0, 0.3);
+}
+
+.admonition-content a {
+text-decoration: underline;
+font-weight: 600;
+color: inherit;
+}
+
+a {
+font-weight: 600;
+}
+
+.markdown > p {
+ font-size: 16px;
+}
+
+.navbar {
+ font-size: 16px;
+}
+
+li {
+font-size: 16px;
+}
+
+blockquote {
+ /* samsung blue with lots of transparency */
+ background-color: #0c4da224;
+}
+@media (prefers-color-scheme: dark) {
+:root {
+ --ifm-hero-text-color: white;
+}
+}
+@media (prefers-color-scheme: dark) {
+.hero.hero--primary { --ifm-hero-text-color: white;}
+}
+
+@media (prefers-color-scheme: dark) {
+blockquote {
+ --ifm-color-emphasis-300: var(--ifm-color-primary);
+ /* border-left: 6px solid var(--ifm-color-emphasis-300); */
+}
+}
+@media (prefers-color-scheme: dark) {
+code {
+ /* background-color: rgb(41, 45, 62); */
+}
+}
+
+
+/* Docusaurus still defaults to their green! */
+@media (prefers-color-scheme: dark) {
+.react-toggle-thumb {
+ border-color: var(--ifm-color-primary) !important;
+}
+}
+
+
+.header-github-link:hover {
+opacity: 0.6;
+}
+
+.header-github-link:before {
+content: '';
+width: 24px;
+height: 24px;
+display: flex;
+background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E")
+ no-repeat;
+}
+
+html[data-theme='dark'] .header-github-link:before {
+background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E")
+ no-repeat;
+}
diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..79d397bf8d97ccc8b29bfac5ff316e9b524361b7
--- /dev/null
+++ b/docs/src/pages/index.js
@@ -0,0 +1,40 @@
+import React from 'react';
+import clsx from 'clsx';
+import Layout from '@theme/Layout';
+import Link from '@docusaurus/Link';
+import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
+import styles from './index.module.css';
+import HomepageFeatures from '../components/HomepageFeatures';
+
+function HomepageHeader() {
+ const {siteConfig} = useDocusaurusContext();
+ return (
+
+ );
+}
+
+export default function Home() {
+ const {siteConfig} = useDocusaurusContext();
+ return (
+
+
+
+
+
+
+ );
+}
diff --git a/docs/src/pages/index.module.css b/docs/src/pages/index.module.css
new file mode 100644
index 0000000000000000000000000000000000000000..5e2483060b8798f1ce0ed95bd0d12cdbf6dddd6f
--- /dev/null
+++ b/docs/src/pages/index.module.css
@@ -0,0 +1,25 @@
+/* stylelint-disable docusaurus/copyright-header */
+
+/**
+ * CSS files with the .module.css suffix will be treated as CSS modules
+ * and scoped locally.
+ */
+
+.heroBanner {
+ padding: 5rem 0;
+ text-align: center;
+ position: relative;
+ overflow: hidden;
+}
+
+@media screen and (max-width: 966px) {
+ .heroBanner {
+ padding: 2rem;
+ }
+}
+
+.buttons {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
diff --git a/docs/static/.nojekyll b/docs/static/.nojekyll
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/docs/static/CNAME b/docs/static/CNAME
new file mode 100644
index 0000000000000000000000000000000000000000..c5661873f63f93d752bab52acbd9296443f05926
--- /dev/null
+++ b/docs/static/CNAME
@@ -0,0 +1 @@
+docs.gptr.dev
\ No newline at end of file
diff --git a/docs/static/img/architecture.png b/docs/static/img/architecture.png
new file mode 100644
index 0000000000000000000000000000000000000000..0ad8847db3f74a4e9b792d8221ea7d4a9e6399fc
--- /dev/null
+++ b/docs/static/img/architecture.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93f7c083722105b00dc714d372a1075f4d5770b46fa19551dc2b772738f82d89
+size 143143
diff --git a/docs/static/img/banner1.jpg b/docs/static/img/banner1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..deb14833f10e2a37d981f87be324a7c86c871fc6
Binary files /dev/null and b/docs/static/img/banner1.jpg differ
diff --git a/docs/static/img/examples.png b/docs/static/img/examples.png
new file mode 100644
index 0000000000000000000000000000000000000000..e9062e46a08791e0a45e78f227a812cf15fb62c2
Binary files /dev/null and b/docs/static/img/examples.png differ
diff --git a/docs/static/img/gptr-logo.png b/docs/static/img/gptr-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..76ec0b5e92c20a788103416236cbf37bc7829be9
Binary files /dev/null and b/docs/static/img/gptr-logo.png differ
diff --git a/docs/static/img/leaderboard.png b/docs/static/img/leaderboard.png
new file mode 100644
index 0000000000000000000000000000000000000000..473ad777d8fa654f41d27e7e7039a34ef3be4a27
--- /dev/null
+++ b/docs/static/img/leaderboard.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:464154dc6743c09d800b8c7f234e511f173cd13072c58b05f5d639753f9dcf34
+size 235547
diff --git a/docs/static/img/multi-agent.png b/docs/static/img/multi-agent.png
new file mode 100644
index 0000000000000000000000000000000000000000..8a4c7ea67f8305c9243520ca47714c7ac62348ba
Binary files /dev/null and b/docs/static/img/multi-agent.png differ
diff --git a/docs/yarn.lock b/docs/yarn.lock
new file mode 100644
index 0000000000000000000000000000000000000000..a68a7223cf484d5b0bfeba6449c8d3990376bede
--- /dev/null
+++ b/docs/yarn.lock
@@ -0,0 +1,8114 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+"@algolia/autocomplete-core@1.7.2":
+ version "1.7.2"
+ resolved "https://registry.npmmirror.com/@algolia/autocomplete-core/-/autocomplete-core-1.7.2.tgz"
+ integrity sha512-eclwUDC6qfApNnEfu1uWcL/rudQsn59tjEoUYZYE2JSXZrHLRjBUGMxiCoknobU2Pva8ejb0eRxpIYDtVVqdsw==
+ dependencies:
+ "@algolia/autocomplete-shared" "1.7.2"
+
+"@algolia/autocomplete-preset-algolia@1.7.2":
+ version "1.7.2"
+ resolved "https://registry.npmmirror.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.2.tgz"
+ integrity sha512-+RYEG6B0QiGGfRb2G3MtPfyrl0dALF3cQNTWBzBX6p5o01vCCGTTinAm2UKG3tfc2CnOMAtnPLkzNZyJUpnVJw==
+ dependencies:
+ "@algolia/autocomplete-shared" "1.7.2"
+
+"@algolia/autocomplete-shared@1.7.2":
+ version "1.7.2"
+ resolved "https://registry.npmmirror.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.2.tgz"
+ integrity sha512-QCckjiC7xXHIUaIL3ektBtjJ0w7tTA3iqKcAE/Hjn1lZ5omp7i3Y4e09rAr9ZybqirL7AbxCLLq0Ra5DDPKeug==
+
+"@algolia/cache-browser-local-storage@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.14.2.tgz"
+ integrity sha512-FRweBkK/ywO+GKYfAWbrepewQsPTIEirhi1BdykX9mxvBPtGNKccYAxvGdDCumU1jL4r3cayio4psfzKMejBlA==
+ dependencies:
+ "@algolia/cache-common" "4.14.2"
+
+"@algolia/cache-common@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/cache-common/-/cache-common-4.14.2.tgz"
+ integrity sha512-SbvAlG9VqNanCErr44q6lEKD2qoK4XtFNx9Qn8FK26ePCI8I9yU7pYB+eM/cZdS9SzQCRJBbHUumVr4bsQ4uxg==
+
+"@algolia/cache-in-memory@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/cache-in-memory/-/cache-in-memory-4.14.2.tgz"
+ integrity sha512-HrOukWoop9XB/VFojPv1R5SVXowgI56T9pmezd/djh2JnVN/vXswhXV51RKy4nCpqxyHt/aGFSq2qkDvj6KiuQ==
+ dependencies:
+ "@algolia/cache-common" "4.14.2"
+
+"@algolia/client-account@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/client-account/-/client-account-4.14.2.tgz"
+ integrity sha512-WHtriQqGyibbb/Rx71YY43T0cXqyelEU0lB2QMBRXvD2X0iyeGl4qMxocgEIcbHyK7uqE7hKgjT8aBrHqhgc1w==
+ dependencies:
+ "@algolia/client-common" "4.14.2"
+ "@algolia/client-search" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+"@algolia/client-analytics@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/client-analytics/-/client-analytics-4.14.2.tgz"
+ integrity sha512-yBvBv2mw+HX5a+aeR0dkvUbFZsiC4FKSnfqk9rrfX+QrlNOKEhCG0tJzjiOggRW4EcNqRmaTULIYvIzQVL2KYQ==
+ dependencies:
+ "@algolia/client-common" "4.14.2"
+ "@algolia/client-search" "4.14.2"
+ "@algolia/requester-common" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+"@algolia/client-common@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/client-common/-/client-common-4.14.2.tgz"
+ integrity sha512-43o4fslNLcktgtDMVaT5XwlzsDPzlqvqesRi4MjQz2x4/Sxm7zYg5LRYFol1BIhG6EwxKvSUq8HcC/KxJu3J0Q==
+ dependencies:
+ "@algolia/requester-common" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+"@algolia/client-personalization@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/client-personalization/-/client-personalization-4.14.2.tgz"
+ integrity sha512-ACCoLi0cL8CBZ1W/2juehSltrw2iqsQBnfiu/Rbl9W2yE6o2ZUb97+sqN/jBqYNQBS+o0ekTMKNkQjHHAcEXNw==
+ dependencies:
+ "@algolia/client-common" "4.14.2"
+ "@algolia/requester-common" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+"@algolia/client-search@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/client-search/-/client-search-4.14.2.tgz"
+ integrity sha512-L5zScdOmcZ6NGiVbLKTvP02UbxZ0njd5Vq9nJAmPFtjffUSOGEp11BmD2oMJ5QvARgx2XbX4KzTTNS5ECYIMWw==
+ dependencies:
+ "@algolia/client-common" "4.14.2"
+ "@algolia/requester-common" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+"@algolia/events@^4.0.1":
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/@algolia/events/-/events-4.0.1.tgz"
+ integrity sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==
+
+"@algolia/logger-common@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/logger-common/-/logger-common-4.14.2.tgz"
+ integrity sha512-/JGlYvdV++IcMHBnVFsqEisTiOeEr6cUJtpjz8zc0A9c31JrtLm318Njc72p14Pnkw3A/5lHHh+QxpJ6WFTmsA==
+
+"@algolia/logger-console@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/logger-console/-/logger-console-4.14.2.tgz"
+ integrity sha512-8S2PlpdshbkwlLCSAB5f8c91xyc84VM9Ar9EdfE9UmX+NrKNYnWR1maXXVDQQoto07G1Ol/tYFnFVhUZq0xV/g==
+ dependencies:
+ "@algolia/logger-common" "4.14.2"
+
+"@algolia/requester-browser-xhr@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.14.2.tgz"
+ integrity sha512-CEh//xYz/WfxHFh7pcMjQNWgpl4wFB85lUMRyVwaDPibNzQRVcV33YS+63fShFWc2+42YEipFGH2iPzlpszmDw==
+ dependencies:
+ "@algolia/requester-common" "4.14.2"
+
+"@algolia/requester-common@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/requester-common/-/requester-common-4.14.2.tgz"
+ integrity sha512-73YQsBOKa5fvVV3My7iZHu1sUqmjjfs9TteFWwPwDmnad7T0VTCopttcsM3OjLxZFtBnX61Xxl2T2gmG2O4ehg==
+
+"@algolia/requester-node-http@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/requester-node-http/-/requester-node-http-4.14.2.tgz"
+ integrity sha512-oDbb02kd1o5GTEld4pETlPZLY0e+gOSWjWMJHWTgDXbv9rm/o2cF7japO6Vj1ENnrqWvLBmW1OzV9g6FUFhFXg==
+ dependencies:
+ "@algolia/requester-common" "4.14.2"
+
+"@algolia/transporter@4.14.2":
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/@algolia/transporter/-/transporter-4.14.2.tgz"
+ integrity sha512-t89dfQb2T9MFQHidjHcfhh6iGMNwvuKUvojAj+JsrHAGbuSy7yE4BylhLX6R0Q1xYRoC4Vvv+O5qIw/LdnQfsQ==
+ dependencies:
+ "@algolia/cache-common" "4.14.2"
+ "@algolia/logger-common" "4.14.2"
+ "@algolia/requester-common" "4.14.2"
+
+"@ampproject/remapping@^2.1.0":
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/@ampproject/remapping/-/remapping-2.2.0.tgz"
+ integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==
+ dependencies:
+ "@jridgewell/gen-mapping" "^0.1.0"
+ "@jridgewell/trace-mapping" "^0.3.9"
+
+"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.18.6", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.8.3":
+ version "7.22.13"
+ resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz"
+ integrity sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==
+ dependencies:
+ "@babel/highlight" "^7.22.13"
+ chalk "^2.4.2"
+
+"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.0", "@babel/compat-data@^7.20.1":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.20.1.tgz"
+ integrity sha512-EWZ4mE2diW3QALKvDMiXnbZpRvlj+nayZ112nK93SnhqOtpdsbVD4W+2tEoT3YNBAG9RBR0ISY758ZkOgsn6pQ==
+
+"@babel/core@7.12.9":
+ version "7.12.9"
+ resolved "https://registry.npmmirror.com/@babel/core/-/core-7.12.9.tgz"
+ integrity sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==
+ dependencies:
+ "@babel/code-frame" "^7.10.4"
+ "@babel/generator" "^7.12.5"
+ "@babel/helper-module-transforms" "^7.12.1"
+ "@babel/helpers" "^7.12.5"
+ "@babel/parser" "^7.12.7"
+ "@babel/template" "^7.12.7"
+ "@babel/traverse" "^7.12.9"
+ "@babel/types" "^7.12.7"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.1"
+ json5 "^2.1.2"
+ lodash "^4.17.19"
+ resolve "^1.3.2"
+ semver "^5.4.1"
+ source-map "^0.5.0"
+
+"@babel/core@^7.12.16", "@babel/core@^7.12.3", "@babel/core@^7.19.6":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/core/-/core-7.20.2.tgz"
+ integrity sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==
+ dependencies:
+ "@ampproject/remapping" "^2.1.0"
+ "@babel/code-frame" "^7.18.6"
+ "@babel/generator" "^7.20.2"
+ "@babel/helper-compilation-targets" "^7.20.0"
+ "@babel/helper-module-transforms" "^7.20.2"
+ "@babel/helpers" "^7.20.1"
+ "@babel/parser" "^7.20.2"
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.20.1"
+ "@babel/types" "^7.20.2"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.1"
+ semver "^6.3.0"
+
+"@babel/generator@^7.12.15", "@babel/generator@^7.12.5", "@babel/generator@^7.20.2", "@babel/generator@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz"
+ integrity sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==
+ dependencies:
+ "@babel/types" "^7.23.0"
+ "@jridgewell/gen-mapping" "^0.3.2"
+ "@jridgewell/trace-mapping" "^0.3.17"
+ jsesc "^2.5.1"
+
+"@babel/helper-annotate-as-pure@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz"
+ integrity sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-builder-binary-assignment-operator-visitor@^7.18.6":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz"
+ integrity sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==
+ dependencies:
+ "@babel/helper-explode-assignable-expression" "^7.18.6"
+ "@babel/types" "^7.18.9"
+
+"@babel/helper-compilation-targets@^7.17.7", "@babel/helper-compilation-targets@^7.18.9", "@babel/helper-compilation-targets@^7.20.0":
+ version "7.20.0"
+ resolved "https://registry.npmmirror.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.20.0.tgz"
+ integrity sha512-0jp//vDGp9e8hZzBc6N/KwA5ZK3Wsm/pfm4CrY7vzegkVxc65SgSn6wYOnwHe9Js9HRQ1YTCKLGPzDtaS3RoLQ==
+ dependencies:
+ "@babel/compat-data" "^7.20.0"
+ "@babel/helper-validator-option" "^7.18.6"
+ browserslist "^4.21.3"
+ semver "^6.3.0"
+
+"@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.20.2.tgz"
+ integrity sha512-k22GoYRAHPYr9I+Gvy2ZQlAe5mGy8BqWst2wRt8cwIufWTxrsVshhIBvYNqC80N0GSFWTsqRVexOtfzlgOEDvA==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-function-name" "^7.19.0"
+ "@babel/helper-member-expression-to-functions" "^7.18.9"
+ "@babel/helper-optimise-call-expression" "^7.18.6"
+ "@babel/helper-replace-supers" "^7.19.1"
+ "@babel/helper-split-export-declaration" "^7.18.6"
+
+"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.npmmirror.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.19.0.tgz"
+ integrity sha512-htnV+mHX32DF81amCDrwIDr8nrp1PTm+3wfBN9/v8QJOLEioOCOG7qNyq0nHeFiWbT3Eb7gsPwEmV64UCQ1jzw==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ regexpu-core "^5.1.0"
+
+"@babel/helper-define-polyfill-provider@^0.3.3":
+ version "0.3.3"
+ resolved "https://registry.npmmirror.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz"
+ integrity sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==
+ dependencies:
+ "@babel/helper-compilation-targets" "^7.17.7"
+ "@babel/helper-plugin-utils" "^7.16.7"
+ debug "^4.1.1"
+ lodash.debounce "^4.0.8"
+ resolve "^1.14.2"
+ semver "^6.1.2"
+
+"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.20":
+ version "7.22.20"
+ resolved "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz"
+ integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==
+
+"@babel/helper-explode-assignable-expression@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz"
+ integrity sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0", "@babel/helper-function-name@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz"
+ integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==
+ dependencies:
+ "@babel/template" "^7.22.15"
+ "@babel/types" "^7.23.0"
+
+"@babel/helper-hoist-variables@^7.18.6", "@babel/helper-hoist-variables@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz"
+ integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
+"@babel/helper-member-expression-to-functions@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz"
+ integrity sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg==
+ dependencies:
+ "@babel/types" "^7.18.9"
+
+"@babel/helper-module-imports@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz"
+ integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.19.6", "@babel/helper-module-transforms@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/helper-module-transforms/-/helper-module-transforms-7.20.2.tgz"
+ integrity sha512-zvBKyJXRbmK07XhMuujYoJ48B5yvvmM6+wcpv6Ivj4Yg6qO7NOZOSnvZN9CRl1zz1Z4cKf8YejmCMh8clOoOeA==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-module-imports" "^7.18.6"
+ "@babel/helper-simple-access" "^7.20.2"
+ "@babel/helper-split-export-declaration" "^7.18.6"
+ "@babel/helper-validator-identifier" "^7.19.1"
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.20.1"
+ "@babel/types" "^7.20.2"
+
+"@babel/helper-optimise-call-expression@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz"
+ integrity sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-plugin-utils@7.10.4":
+ version "7.10.4"
+ resolved "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz"
+ integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==
+
+"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.18.9", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz"
+ integrity sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==
+
+"@babel/helper-remap-async-to-generator@^7.18.6", "@babel/helper-remap-async-to-generator@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz"
+ integrity sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-wrap-function" "^7.18.9"
+ "@babel/types" "^7.18.9"
+
+"@babel/helper-replace-supers@^7.18.6", "@babel/helper-replace-supers@^7.19.1":
+ version "7.19.1"
+ resolved "https://registry.npmmirror.com/@babel/helper-replace-supers/-/helper-replace-supers-7.19.1.tgz"
+ integrity sha512-T7ahH7wV0Hfs46SFh5Jz3s0B6+o8g3c+7TMxu7xKfmHikg7EAZ3I2Qk9LFhjxXq8sL7UkP5JflezNwoZa8WvWw==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-member-expression-to-functions" "^7.18.9"
+ "@babel/helper-optimise-call-expression" "^7.18.6"
+ "@babel/traverse" "^7.19.1"
+ "@babel/types" "^7.19.0"
+
+"@babel/helper-simple-access@^7.19.4", "@babel/helper-simple-access@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz"
+ integrity sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==
+ dependencies:
+ "@babel/types" "^7.20.2"
+
+"@babel/helper-skip-transparent-expression-wrappers@^7.18.9":
+ version "7.20.0"
+ resolved "https://registry.npmmirror.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz"
+ integrity sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg==
+ dependencies:
+ "@babel/types" "^7.20.0"
+
+"@babel/helper-split-export-declaration@^7.18.6", "@babel/helper-split-export-declaration@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz"
+ integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
+"@babel/helper-string-parser@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz"
+ integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==
+
+"@babel/helper-validator-identifier@^7.19.1", "@babel/helper-validator-identifier@^7.22.20":
+ version "7.22.20"
+ resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz"
+ integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==
+
+"@babel/helper-validator-option@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz"
+ integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==
+
+"@babel/helper-wrap-function@^7.18.9":
+ version "7.19.0"
+ resolved "https://registry.npmmirror.com/@babel/helper-wrap-function/-/helper-wrap-function-7.19.0.tgz"
+ integrity sha512-txX8aN8CZyYGTwcLhlk87KRqncAzhh5TpQamZUa0/u3an36NtDpUP6bQgBCBcLeBs09R/OwQu3OjK0k/HwfNDg==
+ dependencies:
+ "@babel/helper-function-name" "^7.19.0"
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.19.0"
+ "@babel/types" "^7.19.0"
+
+"@babel/helpers@^7.12.5", "@babel/helpers@^7.20.1":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.20.1.tgz"
+ integrity sha512-J77mUVaDTUJFZ5BpP6mMn6OIl3rEWymk2ZxDBQJUG3P+PbmyMcF3bYWvz0ma69Af1oobDqT/iAsvzhB58xhQUg==
+ dependencies:
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.20.1"
+ "@babel/types" "^7.20.0"
+
+"@babel/highlight@^7.22.13":
+ version "7.22.20"
+ resolved "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz"
+ integrity sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.22.20"
+ chalk "^2.4.2"
+ js-tokens "^4.0.0"
+
+"@babel/parser@^7.12.16", "@babel/parser@^7.12.7", "@babel/parser@^7.20.2", "@babel/parser@^7.22.15", "@babel/parser@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz"
+ integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==
+
+"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz"
+ integrity sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz"
+ integrity sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+ "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9"
+ "@babel/plugin-proposal-optional-chaining" "^7.18.9"
+
+"@babel/plugin-proposal-async-generator-functions@^7.20.1":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.1.tgz"
+ integrity sha512-Gh5rchzSwE4kC+o/6T8waD0WHEQIsDmjltY8WnWRXHUdH8axZhuH86Ov9M72YhJfDrZseQwuuWaaIT/TmePp3g==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-remap-async-to-generator" "^7.18.9"
+ "@babel/plugin-syntax-async-generators" "^7.8.4"
+
+"@babel/plugin-proposal-class-properties@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz"
+ integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==
+ dependencies:
+ "@babel/helper-create-class-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-proposal-class-static-block@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz"
+ integrity sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw==
+ dependencies:
+ "@babel/helper-create-class-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-class-static-block" "^7.14.5"
+
+"@babel/plugin-proposal-dynamic-import@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz"
+ integrity sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-dynamic-import" "^7.8.3"
+
+"@babel/plugin-proposal-export-namespace-from@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz"
+ integrity sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+ "@babel/plugin-syntax-export-namespace-from" "^7.8.3"
+
+"@babel/plugin-proposal-json-strings@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz"
+ integrity sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-json-strings" "^7.8.3"
+
+"@babel/plugin-proposal-logical-assignment-operators@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz"
+ integrity sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+ "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
+
+"@babel/plugin-proposal-nullish-coalescing-operator@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz"
+ integrity sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
+
+"@babel/plugin-proposal-numeric-separator@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz"
+ integrity sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-numeric-separator" "^7.10.4"
+
+"@babel/plugin-proposal-object-rest-spread@7.12.1":
+ version "7.12.1"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz"
+ integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.10.4"
+ "@babel/plugin-syntax-object-rest-spread" "^7.8.0"
+ "@babel/plugin-transform-parameters" "^7.12.1"
+
+"@babel/plugin-proposal-object-rest-spread@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.2.tgz"
+ integrity sha512-Ks6uej9WFK+fvIMesSqbAto5dD8Dz4VuuFvGJFKgIGSkJuRGcrwGECPA1fDgQK3/DbExBJpEkTeYeB8geIFCSQ==
+ dependencies:
+ "@babel/compat-data" "^7.20.1"
+ "@babel/helper-compilation-targets" "^7.20.0"
+ "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/plugin-syntax-object-rest-spread" "^7.8.3"
+ "@babel/plugin-transform-parameters" "^7.20.1"
+
+"@babel/plugin-proposal-optional-catch-binding@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz"
+ integrity sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
+
+"@babel/plugin-proposal-optional-chaining@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz"
+ integrity sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+ "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9"
+ "@babel/plugin-syntax-optional-chaining" "^7.8.3"
+
+"@babel/plugin-proposal-private-methods@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz"
+ integrity sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==
+ dependencies:
+ "@babel/helper-create-class-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-proposal-private-property-in-object@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz"
+ integrity sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-create-class-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/plugin-syntax-private-property-in-object" "^7.14.5"
+
+"@babel/plugin-proposal-unicode-property-regex@^7.18.6", "@babel/plugin-proposal-unicode-property-regex@^7.4.4":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz"
+ integrity sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==
+ dependencies:
+ "@babel/helper-create-regexp-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-syntax-async-generators@^7.8.4":
+ version "7.8.4"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz"
+ integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-class-properties@^7.12.13":
+ version "7.12.13"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz"
+ integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.12.13"
+
+"@babel/plugin-syntax-class-static-block@^7.14.5":
+ version "7.14.5"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz"
+ integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.14.5"
+
+"@babel/plugin-syntax-dynamic-import@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz"
+ integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-export-namespace-from@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz"
+ integrity sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.3"
+
+"@babel/plugin-syntax-import-assertions@^7.20.0":
+ version "7.20.0"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz"
+ integrity sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.19.0"
+
+"@babel/plugin-syntax-json-strings@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz"
+ integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-jsx@7.12.1":
+ version "7.12.1"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz"
+ integrity sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.10.4"
+
+"@babel/plugin-syntax-jsx@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz"
+ integrity sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-syntax-logical-assignment-operators@^7.10.4":
+ version "7.10.4"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz"
+ integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.10.4"
+
+"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz"
+ integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-numeric-separator@^7.10.4":
+ version "7.10.4"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz"
+ integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.10.4"
+
+"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz"
+ integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-optional-catch-binding@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz"
+ integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-optional-chaining@^7.8.3":
+ version "7.8.3"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz"
+ integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.8.0"
+
+"@babel/plugin-syntax-private-property-in-object@^7.14.5":
+ version "7.14.5"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz"
+ integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.14.5"
+
+"@babel/plugin-syntax-top-level-await@^7.14.5":
+ version "7.14.5"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz"
+ integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.14.5"
+
+"@babel/plugin-syntax-typescript@^7.20.0":
+ version "7.20.0"
+ resolved "https://registry.npmmirror.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.20.0.tgz"
+ integrity sha512-rd9TkG+u1CExzS4SM1BlMEhMXwFLKVjOAFFCDx9PbX5ycJWDoWMcwdJH9RhkPu1dOgn5TrxLot/Gx6lWFuAUNQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.19.0"
+
+"@babel/plugin-transform-arrow-functions@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz"
+ integrity sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-async-to-generator@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz"
+ integrity sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag==
+ dependencies:
+ "@babel/helper-module-imports" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-remap-async-to-generator" "^7.18.6"
+
+"@babel/plugin-transform-block-scoped-functions@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz"
+ integrity sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-block-scoping@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.20.2.tgz"
+ integrity sha512-y5V15+04ry69OV2wULmwhEA6jwSWXO1TwAtIwiPXcvHcoOQUqpyMVd2bDsQJMW8AurjulIyUV8kDqtjSwHy1uQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.20.2"
+
+"@babel/plugin-transform-classes@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.20.2.tgz"
+ integrity sha512-9rbPp0lCVVoagvtEyQKSo5L8oo0nQS/iif+lwlAz29MccX2642vWDlSZK+2T2buxbopotId2ld7zZAzRfz9j1g==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-compilation-targets" "^7.20.0"
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-function-name" "^7.19.0"
+ "@babel/helper-optimise-call-expression" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/helper-replace-supers" "^7.19.1"
+ "@babel/helper-split-export-declaration" "^7.18.6"
+ globals "^11.1.0"
+
+"@babel/plugin-transform-computed-properties@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz"
+ integrity sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-destructuring@^7.20.2":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.20.2.tgz"
+ integrity sha512-mENM+ZHrvEgxLTBXUiQ621rRXZes3KWUv6NdQlrnr1TkWVw+hUjQBZuP2X32qKlrlG2BzgR95gkuCRSkJl8vIw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.20.2"
+
+"@babel/plugin-transform-dotall-regex@^7.18.6", "@babel/plugin-transform-dotall-regex@^7.4.4":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz"
+ integrity sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==
+ dependencies:
+ "@babel/helper-create-regexp-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-duplicate-keys@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz"
+ integrity sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-exponentiation-operator@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz"
+ integrity sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==
+ dependencies:
+ "@babel/helper-builder-binary-assignment-operator-visitor" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-for-of@^7.18.8":
+ version "7.18.8"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz"
+ integrity sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-function-name@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz"
+ integrity sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==
+ dependencies:
+ "@babel/helper-compilation-targets" "^7.18.9"
+ "@babel/helper-function-name" "^7.18.9"
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-literals@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz"
+ integrity sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-member-expression-literals@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz"
+ integrity sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-modules-amd@^7.19.6":
+ version "7.19.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.19.6.tgz"
+ integrity sha512-uG3od2mXvAtIFQIh0xrpLH6r5fpSQN04gIVovl+ODLdUMANokxQLZnPBHcjmv3GxRjnqwLuHvppjjcelqUFZvg==
+ dependencies:
+ "@babel/helper-module-transforms" "^7.19.6"
+ "@babel/helper-plugin-utils" "^7.19.0"
+
+"@babel/plugin-transform-modules-commonjs@^7.19.6":
+ version "7.19.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.19.6.tgz"
+ integrity sha512-8PIa1ym4XRTKuSsOUXqDG0YaOlEuTVvHMe5JCfgBMOtHvJKw/4NGovEGN33viISshG/rZNVrACiBmPQLvWN8xQ==
+ dependencies:
+ "@babel/helper-module-transforms" "^7.19.6"
+ "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-simple-access" "^7.19.4"
+
+"@babel/plugin-transform-modules-systemjs@^7.19.6":
+ version "7.19.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.19.6.tgz"
+ integrity sha512-fqGLBepcc3kErfR9R3DnVpURmckXP7gj7bAlrTQyBxrigFqszZCkFkcoxzCp2v32XmwXLvbw+8Yq9/b+QqksjQ==
+ dependencies:
+ "@babel/helper-hoist-variables" "^7.18.6"
+ "@babel/helper-module-transforms" "^7.19.6"
+ "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-validator-identifier" "^7.19.1"
+
+"@babel/plugin-transform-modules-umd@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz"
+ integrity sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==
+ dependencies:
+ "@babel/helper-module-transforms" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-named-capturing-groups-regex@^7.19.1":
+ version "7.19.1"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.19.1.tgz"
+ integrity sha512-oWk9l9WItWBQYS4FgXD4Uyy5kq898lvkXpXQxoJEY1RnvPk4R/Dvu2ebXU9q8lP+rlMwUQTFf2Ok6d78ODa0kw==
+ dependencies:
+ "@babel/helper-create-regexp-features-plugin" "^7.19.0"
+ "@babel/helper-plugin-utils" "^7.19.0"
+
+"@babel/plugin-transform-new-target@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz"
+ integrity sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-object-super@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz"
+ integrity sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-replace-supers" "^7.18.6"
+
+"@babel/plugin-transform-parameters@^7.12.1", "@babel/plugin-transform-parameters@^7.20.1":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.20.1.tgz"
+ integrity sha512-nDvKLrAvl+kf6BOy1UJ3MGwzzfTMgppxwiD2Jb4LO3xjYyZq30oQzDNJbCQpMdG9+j2IXHoiMrw5Cm/L6ZoxXQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.19.0"
+
+"@babel/plugin-transform-property-literals@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz"
+ integrity sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-react-constant-elements@^7.12.1", "@babel/plugin-transform-react-constant-elements@^7.18.12":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.20.2.tgz"
+ integrity sha512-KS/G8YI8uwMGKErLFOHS/ekhqdHhpEloxs43NecQHVgo2QuQSyJhGIY1fL8UGl9wy5ItVwwoUL4YxVqsplGq2g==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.20.2"
+
+"@babel/plugin-transform-react-display-name@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz"
+ integrity sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-react-jsx-development@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz"
+ integrity sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA==
+ dependencies:
+ "@babel/plugin-transform-react-jsx" "^7.18.6"
+
+"@babel/plugin-transform-react-jsx@^7.18.6":
+ version "7.19.0"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.19.0.tgz"
+ integrity sha512-UVEvX3tXie3Szm3emi1+G63jyw1w5IcMY0FSKM+CRnKRI5Mr1YbCNgsSTwoTwKphQEG9P+QqmuRFneJPZuHNhg==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-module-imports" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/plugin-syntax-jsx" "^7.18.6"
+ "@babel/types" "^7.19.0"
+
+"@babel/plugin-transform-react-pure-annotations@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz"
+ integrity sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ==
+ dependencies:
+ "@babel/helper-annotate-as-pure" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-regenerator@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz"
+ integrity sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ regenerator-transform "^0.15.0"
+
+"@babel/plugin-transform-reserved-words@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz"
+ integrity sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-runtime@^7.15.0":
+ version "7.19.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.19.6.tgz"
+ integrity sha512-PRH37lz4JU156lYFW1p8OxE5i7d6Sl/zV58ooyr+q1J1lnQPyg5tIiXlIwNVhJaY4W3TmOtdc8jqdXQcB1v5Yw==
+ dependencies:
+ "@babel/helper-module-imports" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.19.0"
+ babel-plugin-polyfill-corejs2 "^0.3.3"
+ babel-plugin-polyfill-corejs3 "^0.6.0"
+ babel-plugin-polyfill-regenerator "^0.4.1"
+ semver "^6.3.0"
+
+"@babel/plugin-transform-shorthand-properties@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz"
+ integrity sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-spread@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.19.0.tgz"
+ integrity sha512-RsuMk7j6n+r752EtzyScnWkQyuJdli6LdO5Klv8Yx0OfPVTcQkIUfS8clx5e9yHXzlnhOZF3CbQ8C2uP5j074w==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9"
+
+"@babel/plugin-transform-sticky-regex@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz"
+ integrity sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/plugin-transform-template-literals@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz"
+ integrity sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-typeof-symbol@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz"
+ integrity sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-typescript@^7.18.6":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.20.2.tgz"
+ integrity sha512-jvS+ngBfrnTUBfOQq8NfGnSbF9BrqlR6hjJ2yVxMkmO5nL/cdifNbI30EfjRlN4g5wYWNnMPyj5Sa6R1pbLeag==
+ dependencies:
+ "@babel/helper-create-class-features-plugin" "^7.20.2"
+ "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/plugin-syntax-typescript" "^7.20.0"
+
+"@babel/plugin-transform-unicode-escapes@^7.18.10":
+ version "7.18.10"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz"
+ integrity sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.9"
+
+"@babel/plugin-transform-unicode-regex@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz"
+ integrity sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==
+ dependencies:
+ "@babel/helper-create-regexp-features-plugin" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.18.6"
+
+"@babel/preset-env@^7.12.1", "@babel/preset-env@^7.15.6", "@babel/preset-env@^7.19.4":
+ version "7.20.2"
+ resolved "https://registry.npmmirror.com/@babel/preset-env/-/preset-env-7.20.2.tgz"
+ integrity sha512-1G0efQEWR1EHkKvKHqbG+IN/QdgwfByUpM5V5QroDzGV2t3S/WXNQd693cHiHTlCFMpr9B6FkPFXDA2lQcKoDg==
+ dependencies:
+ "@babel/compat-data" "^7.20.1"
+ "@babel/helper-compilation-targets" "^7.20.0"
+ "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/helper-validator-option" "^7.18.6"
+ "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.18.6"
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.18.9"
+ "@babel/plugin-proposal-async-generator-functions" "^7.20.1"
+ "@babel/plugin-proposal-class-properties" "^7.18.6"
+ "@babel/plugin-proposal-class-static-block" "^7.18.6"
+ "@babel/plugin-proposal-dynamic-import" "^7.18.6"
+ "@babel/plugin-proposal-export-namespace-from" "^7.18.9"
+ "@babel/plugin-proposal-json-strings" "^7.18.6"
+ "@babel/plugin-proposal-logical-assignment-operators" "^7.18.9"
+ "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.6"
+ "@babel/plugin-proposal-numeric-separator" "^7.18.6"
+ "@babel/plugin-proposal-object-rest-spread" "^7.20.2"
+ "@babel/plugin-proposal-optional-catch-binding" "^7.18.6"
+ "@babel/plugin-proposal-optional-chaining" "^7.18.9"
+ "@babel/plugin-proposal-private-methods" "^7.18.6"
+ "@babel/plugin-proposal-private-property-in-object" "^7.18.6"
+ "@babel/plugin-proposal-unicode-property-regex" "^7.18.6"
+ "@babel/plugin-syntax-async-generators" "^7.8.4"
+ "@babel/plugin-syntax-class-properties" "^7.12.13"
+ "@babel/plugin-syntax-class-static-block" "^7.14.5"
+ "@babel/plugin-syntax-dynamic-import" "^7.8.3"
+ "@babel/plugin-syntax-export-namespace-from" "^7.8.3"
+ "@babel/plugin-syntax-import-assertions" "^7.20.0"
+ "@babel/plugin-syntax-json-strings" "^7.8.3"
+ "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
+ "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
+ "@babel/plugin-syntax-numeric-separator" "^7.10.4"
+ "@babel/plugin-syntax-object-rest-spread" "^7.8.3"
+ "@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
+ "@babel/plugin-syntax-optional-chaining" "^7.8.3"
+ "@babel/plugin-syntax-private-property-in-object" "^7.14.5"
+ "@babel/plugin-syntax-top-level-await" "^7.14.5"
+ "@babel/plugin-transform-arrow-functions" "^7.18.6"
+ "@babel/plugin-transform-async-to-generator" "^7.18.6"
+ "@babel/plugin-transform-block-scoped-functions" "^7.18.6"
+ "@babel/plugin-transform-block-scoping" "^7.20.2"
+ "@babel/plugin-transform-classes" "^7.20.2"
+ "@babel/plugin-transform-computed-properties" "^7.18.9"
+ "@babel/plugin-transform-destructuring" "^7.20.2"
+ "@babel/plugin-transform-dotall-regex" "^7.18.6"
+ "@babel/plugin-transform-duplicate-keys" "^7.18.9"
+ "@babel/plugin-transform-exponentiation-operator" "^7.18.6"
+ "@babel/plugin-transform-for-of" "^7.18.8"
+ "@babel/plugin-transform-function-name" "^7.18.9"
+ "@babel/plugin-transform-literals" "^7.18.9"
+ "@babel/plugin-transform-member-expression-literals" "^7.18.6"
+ "@babel/plugin-transform-modules-amd" "^7.19.6"
+ "@babel/plugin-transform-modules-commonjs" "^7.19.6"
+ "@babel/plugin-transform-modules-systemjs" "^7.19.6"
+ "@babel/plugin-transform-modules-umd" "^7.18.6"
+ "@babel/plugin-transform-named-capturing-groups-regex" "^7.19.1"
+ "@babel/plugin-transform-new-target" "^7.18.6"
+ "@babel/plugin-transform-object-super" "^7.18.6"
+ "@babel/plugin-transform-parameters" "^7.20.1"
+ "@babel/plugin-transform-property-literals" "^7.18.6"
+ "@babel/plugin-transform-regenerator" "^7.18.6"
+ "@babel/plugin-transform-reserved-words" "^7.18.6"
+ "@babel/plugin-transform-shorthand-properties" "^7.18.6"
+ "@babel/plugin-transform-spread" "^7.19.0"
+ "@babel/plugin-transform-sticky-regex" "^7.18.6"
+ "@babel/plugin-transform-template-literals" "^7.18.9"
+ "@babel/plugin-transform-typeof-symbol" "^7.18.9"
+ "@babel/plugin-transform-unicode-escapes" "^7.18.10"
+ "@babel/plugin-transform-unicode-regex" "^7.18.6"
+ "@babel/preset-modules" "^0.1.5"
+ "@babel/types" "^7.20.2"
+ babel-plugin-polyfill-corejs2 "^0.3.3"
+ babel-plugin-polyfill-corejs3 "^0.6.0"
+ babel-plugin-polyfill-regenerator "^0.4.1"
+ core-js-compat "^3.25.1"
+ semver "^6.3.0"
+
+"@babel/preset-modules@^0.1.5":
+ version "0.1.5"
+ resolved "https://registry.npmmirror.com/@babel/preset-modules/-/preset-modules-0.1.5.tgz"
+ integrity sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.0.0"
+ "@babel/plugin-proposal-unicode-property-regex" "^7.4.4"
+ "@babel/plugin-transform-dotall-regex" "^7.4.4"
+ "@babel/types" "^7.4.4"
+ esutils "^2.0.2"
+
+"@babel/preset-react@^7.12.13", "@babel/preset-react@^7.12.5", "@babel/preset-react@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/preset-react/-/preset-react-7.18.6.tgz"
+ integrity sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-validator-option" "^7.18.6"
+ "@babel/plugin-transform-react-display-name" "^7.18.6"
+ "@babel/plugin-transform-react-jsx" "^7.18.6"
+ "@babel/plugin-transform-react-jsx-development" "^7.18.6"
+ "@babel/plugin-transform-react-pure-annotations" "^7.18.6"
+
+"@babel/preset-typescript@^7.12.16", "@babel/preset-typescript@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.npmmirror.com/@babel/preset-typescript/-/preset-typescript-7.18.6.tgz"
+ integrity sha512-s9ik86kXBAnD760aybBucdpnLsAt0jK1xqJn2juOn9lkOvSHV60os5hxoVJsPzMQxvnUJFAlkont2DvvaYEBtQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-validator-option" "^7.18.6"
+ "@babel/plugin-transform-typescript" "^7.18.6"
+
+"@babel/runtime-corejs3@^7.15.4":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/runtime-corejs3/-/runtime-corejs3-7.20.1.tgz"
+ integrity sha512-CGulbEDcg/ND1Im7fUNRZdGXmX2MTWVVZacQi/6DiKE5HNwZ3aVTm5PV4lO8HHz0B2h8WQyvKKjbX5XgTtydsg==
+ dependencies:
+ core-js-pure "^3.25.1"
+ regenerator-runtime "^0.13.10"
+
+"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.15.4", "@babel/runtime@^7.8.4":
+ version "7.20.1"
+ resolved "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.20.1.tgz"
+ integrity sha512-mrzLkl6U9YLF8qpqI7TB82PESyEGjm/0Ly91jG575eVxMMlb8fYfOXFZIJ8XfLrJZQbm7dlKry2bJmXBUEkdFg==
+ dependencies:
+ regenerator-runtime "^0.13.10"
+
+"@babel/template@^7.12.7", "@babel/template@^7.18.10", "@babel/template@^7.22.15":
+ version "7.22.15"
+ resolved "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz"
+ integrity sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==
+ dependencies:
+ "@babel/code-frame" "^7.22.13"
+ "@babel/parser" "^7.22.15"
+ "@babel/types" "^7.22.15"
+
+"@babel/traverse@^7.12.13", "@babel/traverse@^7.12.9", "@babel/traverse@^7.19.0", "@babel/traverse@^7.19.1", "@babel/traverse@^7.20.1":
+ version "7.23.2"
+ resolved "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz"
+ integrity sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==
+ dependencies:
+ "@babel/code-frame" "^7.22.13"
+ "@babel/generator" "^7.23.0"
+ "@babel/helper-environment-visitor" "^7.22.20"
+ "@babel/helper-function-name" "^7.23.0"
+ "@babel/helper-hoist-variables" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.6"
+ "@babel/parser" "^7.23.0"
+ "@babel/types" "^7.23.0"
+ debug "^4.1.0"
+ globals "^11.1.0"
+
+"@babel/types@^7.12.6", "@babel/types@^7.12.7", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.19.0", "@babel/types@^7.20.0", "@babel/types@^7.20.2", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.4.4":
+ version "7.23.0"
+ resolved "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz"
+ integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==
+ dependencies:
+ "@babel/helper-string-parser" "^7.22.5"
+ "@babel/helper-validator-identifier" "^7.22.20"
+ to-fast-properties "^2.0.0"
+
+"@docsearch/css@3.3.0":
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/@docsearch/css/-/css-3.3.0.tgz"
+ integrity sha512-rODCdDtGyudLj+Va8b6w6Y85KE85bXRsps/R4Yjwt5vueXKXZQKYw0aA9knxLBT6a/bI/GMrAcmCR75KYOM6hg==
+
+"@docsearch/react@^3.0.0-alpha.39":
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/@docsearch/react/-/react-3.3.0.tgz"
+ integrity sha512-fhS5adZkae2SSdMYEMVg6pxI5a/cE+tW16ki1V0/ur4Fdok3hBRkmN/H8VvlXnxzggkQIIRIVvYPn00JPjen3A==
+ dependencies:
+ "@algolia/autocomplete-core" "1.7.2"
+ "@algolia/autocomplete-preset-algolia" "1.7.2"
+ "@docsearch/css" "3.3.0"
+ algoliasearch "^4.0.0"
+
+"@docusaurus/core@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/core/-/core-0.0.0-4193.tgz"
+ integrity sha512-mC3YLaFgK8JqW3E7b2lCtIlQOVnzqOP0FwtI0+ilkx9v9F+DfgNQzJJ7Kk2RIXeDKu0e2AnjS7YBmRUwpgHRRg==
+ dependencies:
+ "@babel/core" "^7.12.16"
+ "@babel/generator" "^7.12.15"
+ "@babel/plugin-syntax-dynamic-import" "^7.8.3"
+ "@babel/plugin-transform-runtime" "^7.15.0"
+ "@babel/preset-env" "^7.15.6"
+ "@babel/preset-react" "^7.12.13"
+ "@babel/preset-typescript" "^7.12.16"
+ "@babel/runtime" "^7.15.4"
+ "@babel/runtime-corejs3" "^7.15.4"
+ "@babel/traverse" "^7.12.13"
+ "@docusaurus/cssnano-preset" "0.0.0-4193"
+ "@docusaurus/react-loadable" "5.5.2"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-common" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ "@slorber/static-site-generator-webpack-plugin" "^4.0.0"
+ "@svgr/webpack" "^5.5.0"
+ autoprefixer "^10.3.5"
+ babel-loader "^8.2.2"
+ babel-plugin-dynamic-import-node "2.3.0"
+ boxen "^5.0.1"
+ chalk "^4.1.2"
+ chokidar "^3.5.2"
+ clean-css "^5.1.5"
+ commander "^5.1.0"
+ copy-webpack-plugin "^9.0.1"
+ core-js "^3.18.0"
+ css-loader "^5.1.1"
+ css-minimizer-webpack-plugin "^3.0.2"
+ cssnano "^5.0.8"
+ del "^6.0.0"
+ detect-port "^1.3.0"
+ escape-html "^1.0.3"
+ eta "^1.12.3"
+ file-loader "^6.2.0"
+ fs-extra "^10.0.0"
+ github-slugger "^1.4.0"
+ globby "^11.0.2"
+ html-minifier-terser "^6.0.2"
+ html-tags "^3.1.0"
+ html-webpack-plugin "^5.4.0"
+ import-fresh "^3.3.0"
+ is-root "^2.1.0"
+ leven "^3.1.0"
+ lodash "^4.17.20"
+ mini-css-extract-plugin "^1.6.0"
+ nprogress "^0.2.0"
+ postcss "^8.3.7"
+ postcss-loader "^6.1.1"
+ prompts "^2.4.1"
+ react-dev-utils "12.0.0-next.47"
+ react-error-overlay "^6.0.9"
+ react-helmet "^6.1.0"
+ react-loadable "npm:@docusaurus/react-loadable@5.5.2"
+ react-loadable-ssr-addon-v5-slorber "^1.0.1"
+ react-router "^5.2.0"
+ react-router-config "^5.1.1"
+ react-router-dom "^5.2.0"
+ remark-admonitions "^1.2.1"
+ resolve-pathname "^3.0.0"
+ rtl-detect "^1.0.4"
+ semver "^7.3.4"
+ serve-handler "^6.1.3"
+ shelljs "^0.8.4"
+ std-env "^2.2.1"
+ strip-ansi "^6.0.0"
+ terser-webpack-plugin "^5.2.4"
+ tslib "^2.3.1"
+ update-notifier "^5.1.0"
+ url-loader "^4.1.1"
+ wait-on "^6.0.0"
+ webpack "^5.61.0"
+ webpack-bundle-analyzer "^4.4.2"
+ webpack-dev-server "^4.4.0"
+ webpack-merge "^5.8.0"
+ webpackbar "^5.0.0-3"
+
+"@docusaurus/cssnano-preset@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/cssnano-preset/-/cssnano-preset-0.0.0-4193.tgz"
+ integrity sha512-aB8JNvHU/BW6YnBk7p9HljTOp0DIIVbeCa4WCS3Brp8U6MfM1XlMLqcBUWcCXl4dnvNGhIPKdCDToTdKm0M2MA==
+ dependencies:
+ cssnano-preset-advanced "^5.1.4"
+ postcss "^8.3.7"
+ postcss-sort-media-queries "^4.1.0"
+
+"@docusaurus/logger@2.2.0":
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/@docusaurus/logger/-/logger-2.2.0.tgz"
+ integrity sha512-DF3j1cA5y2nNsu/vk8AG7xwpZu6f5MKkPPMaaIbgXLnWGfm6+wkOeW7kNrxnM95YOhKUkJUophX69nGUnLsm0A==
+ dependencies:
+ chalk "^4.1.2"
+ tslib "^2.4.0"
+
+"@docusaurus/mdx-loader@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/mdx-loader/-/mdx-loader-0.0.0-4193.tgz"
+ integrity sha512-EN/Q/GDMirYsxdQ+ElhqWCfKQer2MQNrmKUYc+e+4lZeAUDT9E6M7HinGoImtYQbZpN5uSjDnzTYP793DGsMjQ==
+ dependencies:
+ "@babel/parser" "^7.12.16"
+ "@babel/traverse" "^7.12.13"
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@mdx-js/mdx" "^1.6.21"
+ "@mdx-js/react" "^1.6.21"
+ chalk "^4.1.2"
+ escape-html "^1.0.3"
+ file-loader "^6.2.0"
+ fs-extra "^10.0.0"
+ github-slugger "^1.4.0"
+ gray-matter "^4.0.3"
+ mdast-util-to-string "^2.0.0"
+ remark-emoji "^2.1.0"
+ stringify-object "^3.3.0"
+ unist-util-visit "^2.0.2"
+ url-loader "^4.1.1"
+ webpack "^5.61.0"
+
+"@docusaurus/plugin-content-blog@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-0.0.0-4193.tgz"
+ integrity sha512-5GLRK3ftr1QmcBEQokn7frauAh6g9Jsc3tQUeMUR2iWZk7C96rAfVMrY3W5Jv+6gfgGzbTG+AKAUD9e3gcGHfg==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/mdx-loader" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ chalk "^4.1.2"
+ escape-string-regexp "^4.0.0"
+ feed "^4.2.2"
+ fs-extra "^10.0.0"
+ globby "^11.0.2"
+ js-yaml "^4.0.0"
+ loader-utils "^2.0.0"
+ lodash "^4.17.20"
+ reading-time "^1.5.0"
+ remark-admonitions "^1.2.1"
+ tslib "^2.3.1"
+ utility-types "^3.10.0"
+ webpack "^5.61.0"
+
+"@docusaurus/plugin-content-docs@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-0.0.0-4193.tgz"
+ integrity sha512-UDccCvOk/vIKtbLwkWz8oI8SWrJXmKrkI1IKO038EfcJT2OAA/Gy96jar659Z2c1bqS9QurJ5M2RHA3JBS6xrg==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/mdx-loader" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ chalk "^4.1.2"
+ combine-promises "^1.1.0"
+ escape-string-regexp "^4.0.0"
+ fs-extra "^10.0.0"
+ globby "^11.0.2"
+ import-fresh "^3.2.2"
+ js-yaml "^4.0.0"
+ loader-utils "^2.0.0"
+ lodash "^4.17.20"
+ remark-admonitions "^1.2.1"
+ shelljs "^0.8.4"
+ tslib "^2.3.1"
+ utility-types "^3.10.0"
+ webpack "^5.61.0"
+
+"@docusaurus/plugin-content-pages@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-0.0.0-4193.tgz"
+ integrity sha512-E/vP3vGGEWtswFbUco+5elM5O/vch6vB1Lq5Zwq6ybtumalF1kXa67ZN8dlMspI4nhAPv45ZTZPT8UlRLrlqaQ==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/mdx-loader" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ globby "^11.0.2"
+ lodash "^4.17.20"
+ remark-admonitions "^1.2.1"
+ tslib "^2.3.1"
+ webpack "^5.61.0"
+
+"@docusaurus/plugin-debug@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-debug/-/plugin-debug-0.0.0-4193.tgz"
+ integrity sha512-3+rkYliCSVKNaRR/AXatbk4xpvIax7VtEtjGBTz7T6lxo8CujqOcw+j8R2HxlQSC0PgmkKnTLMDP1umZuqnIHg==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ fs-extra "^10.0.0"
+ react-json-view "^1.21.3"
+ tslib "^2.3.1"
+
+"@docusaurus/plugin-google-analytics@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-0.0.0-4193.tgz"
+ integrity sha512-wWZbr3nSFh7hEWTCmef3eih5S1s4z5+jJTCCZsq+gkHK4b9V9u81BvESB4N7T6dxt9vNEKIoYEb58iuYzUkLbw==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+
+"@docusaurus/plugin-google-gtag@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-0.0.0-4193.tgz"
+ integrity sha512-k7JYw7PvfarFJLDUcSOSzb3MTOL5/yi6ZIs3mTBpAaC7U+MTGVLVejVuoMKAprbtAg2RUZq5unGnBoji7xaDyQ==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+
+"@docusaurus/plugin-sitemap@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-0.0.0-4193.tgz"
+ integrity sha512-819ln3gj/ozT/BioQl2OrOcRvj5hvxta7h0m7TW/UbAZVx6X9QNAN9TZd49dyuTF/7y3JswU/7BxbeeOT6c/4Q==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-common" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ fs-extra "^10.0.0"
+ sitemap "^7.0.0"
+ tslib "^2.3.1"
+
+"@docusaurus/preset-classic@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/preset-classic/-/preset-classic-0.0.0-4193.tgz"
+ integrity sha512-R4hGzXQvCXVeH6+AbgnXBNoNagZ9Wc3WikS/MUAuu5F1vY44U2nFZo6rCDeJgupAGdqjIicA10acSUVTMslbXw==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/plugin-content-blog" "0.0.0-4193"
+ "@docusaurus/plugin-content-docs" "0.0.0-4193"
+ "@docusaurus/plugin-content-pages" "0.0.0-4193"
+ "@docusaurus/plugin-debug" "0.0.0-4193"
+ "@docusaurus/plugin-google-analytics" "0.0.0-4193"
+ "@docusaurus/plugin-google-gtag" "0.0.0-4193"
+ "@docusaurus/plugin-sitemap" "0.0.0-4193"
+ "@docusaurus/theme-classic" "0.0.0-4193"
+ "@docusaurus/theme-search-algolia" "0.0.0-4193"
+
+"@docusaurus/react-loadable@5.5.2":
+ version "5.5.2"
+ resolved "https://registry.npmmirror.com/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz"
+ integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==
+ dependencies:
+ "@types/react" "*"
+ prop-types "^15.6.2"
+
+"@docusaurus/theme-classic@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/theme-classic/-/theme-classic-0.0.0-4193.tgz"
+ integrity sha512-jPu5EkknsnqAyFNV5AYtM9+GOfZ3l3rVJYiGRNy/v/VvmBTqoWUVXm8Xylf79yMFEOjlGgFObfR58IPThZvvXw==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/plugin-content-blog" "0.0.0-4193"
+ "@docusaurus/plugin-content-docs" "0.0.0-4193"
+ "@docusaurus/plugin-content-pages" "0.0.0-4193"
+ "@docusaurus/theme-common" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ "@mdx-js/mdx" "^1.6.21"
+ "@mdx-js/react" "^1.6.21"
+ chalk "^4.1.2"
+ clsx "^1.1.1"
+ copy-text-to-clipboard "^3.0.1"
+ fs-extra "^10.0.0"
+ globby "^11.0.2"
+ infima "0.2.0-alpha.34"
+ lodash "^4.17.20"
+ postcss "^8.3.7"
+ prism-react-renderer "^1.2.1"
+ prismjs "^1.23.0"
+ prop-types "^15.7.2"
+ react-router-dom "^5.2.0"
+ rtlcss "^3.3.0"
+
+"@docusaurus/theme-common@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/theme-common/-/theme-common-0.0.0-4193.tgz"
+ integrity sha512-6yJEVlKrm9KIgMo1pmsAA5EL7FcrpeMPfkwhChAH0W5s6i4aQENrefQcEXBBBIv1KbNx4uNUG779plvnoSfNWA==
+ dependencies:
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/plugin-content-blog" "0.0.0-4193"
+ "@docusaurus/plugin-content-docs" "0.0.0-4193"
+ "@docusaurus/plugin-content-pages" "0.0.0-4193"
+ "@docusaurus/types" "0.0.0-4193"
+ clsx "^1.1.1"
+ fs-extra "^10.0.0"
+ parse-numeric-range "^1.3.0"
+ tslib "^2.3.1"
+ utility-types "^3.10.0"
+
+"@docusaurus/theme-search-algolia@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-0.0.0-4193.tgz"
+ integrity sha512-we2Z7nhZy5bgoYqF8ZXJO2jt8rBsqnDgXoRPkzoaacQww3CO3+Ez9Q+Dcpxy7lunNY3fShv+9SC13K9yClCUEQ==
+ dependencies:
+ "@docsearch/react" "^3.0.0-alpha.39"
+ "@docusaurus/core" "0.0.0-4193"
+ "@docusaurus/theme-common" "0.0.0-4193"
+ "@docusaurus/utils" "0.0.0-4193"
+ "@docusaurus/utils-validation" "0.0.0-4193"
+ algoliasearch "^4.10.5"
+ algoliasearch-helper "^3.5.5"
+ clsx "^1.1.1"
+ eta "^1.12.3"
+ lodash "^4.17.20"
+
+"@docusaurus/types@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/types/-/types-0.0.0-4193.tgz"
+ integrity sha512-2y+D3yYzEbBAmK74Me4g3pVe1sSRXZDDyKzf/Ojb729F7lYx9dvUTj0I/YlNDcPg5FUKCorfnV+3RfsyDb8lKA==
+ dependencies:
+ commander "^5.1.0"
+ joi "^17.4.2"
+ querystring "0.2.0"
+ utility-types "^3.10.0"
+ webpack "^5.61.0"
+ webpack-merge "^5.8.0"
+
+"@docusaurus/utils-common@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/utils-common/-/utils-common-0.0.0-4193.tgz"
+ integrity sha512-31kHFbhubA8cKZIjztNBsIUBbD+gHInHfVuvzBdcOnZgFfTdL9sUrROmjGnAAopAoJ/YyO5bsu5GGnrn/hexDA==
+ dependencies:
+ "@docusaurus/types" "0.0.0-4193"
+ tslib "^2.3.1"
+
+"@docusaurus/utils-validation@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/utils-validation/-/utils-validation-0.0.0-4193.tgz"
+ integrity sha512-ppVmx3KOHKyxta51O76VAVLttR46ItigEVpbaw5nfxs8muGKXmddWQ2lA78ga1zTDzwLT/7d18kngn46Qva+dw==
+ dependencies:
+ "@docusaurus/utils" "0.0.0-4193"
+ chalk "^4.1.2"
+ joi "^17.4.2"
+ tslib "^2.3.1"
+
+"@docusaurus/utils-validation@^2.0.0-beta.4":
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/@docusaurus/utils-validation/-/utils-validation-2.2.0.tgz"
+ integrity sha512-I1hcsG3yoCkasOL5qQAYAfnmVoLei7apugT6m4crQjmDGxq+UkiRrq55UqmDDyZlac/6ax/JC0p+usZ6W4nVyg==
+ dependencies:
+ "@docusaurus/logger" "2.2.0"
+ "@docusaurus/utils" "2.2.0"
+ joi "^17.6.0"
+ js-yaml "^4.1.0"
+ tslib "^2.4.0"
+
+"@docusaurus/utils@0.0.0-4193":
+ version "0.0.0-4193"
+ resolved "https://registry.npmmirror.com/@docusaurus/utils/-/utils-0.0.0-4193.tgz"
+ integrity sha512-c+1c735JzKqE2pRAkHnKyz81a6RE5HcVL3J1tpw9ACKjMyDq1qv3XeYa/ZKv/09qi/FI2QiZ32eiYHMXNZE1Sw==
+ dependencies:
+ "@docusaurus/types" "0.0.0-4193"
+ "@mdx-js/runtime" "^1.6.22"
+ "@types/github-slugger" "^1.3.0"
+ chalk "^4.1.2"
+ escape-string-regexp "^4.0.0"
+ fs-extra "^10.0.0"
+ globby "^11.0.4"
+ gray-matter "^4.0.3"
+ lodash "^4.17.20"
+ micromatch "^4.0.4"
+ remark-mdx-remove-exports "^1.6.22"
+ remark-mdx-remove-imports "^1.6.22"
+ resolve-pathname "^3.0.0"
+ tslib "^2.3.1"
+
+"@docusaurus/utils@2.2.0", "@docusaurus/utils@^2.0.0-beta.4":
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/@docusaurus/utils/-/utils-2.2.0.tgz"
+ integrity sha512-oNk3cjvx7Tt1Lgh/aeZAmFpGV2pDr5nHKrBVx6hTkzGhrnMuQqLt6UPlQjdYQ3QHXwyF/ZtZMO1D5Pfi0lu7SA==
+ dependencies:
+ "@docusaurus/logger" "2.2.0"
+ "@svgr/webpack" "^6.2.1"
+ file-loader "^6.2.0"
+ fs-extra "^10.1.0"
+ github-slugger "^1.4.0"
+ globby "^11.1.0"
+ gray-matter "^4.0.3"
+ js-yaml "^4.1.0"
+ lodash "^4.17.21"
+ micromatch "^4.0.5"
+ resolve-pathname "^3.0.0"
+ shelljs "^0.8.5"
+ tslib "^2.4.0"
+ url-loader "^4.1.1"
+ webpack "^5.73.0"
+
+"@easyops-cn/autocomplete.js@^0.38.1":
+ version "0.38.1"
+ resolved "https://registry.npmmirror.com/@easyops-cn/autocomplete.js/-/autocomplete.js-0.38.1.tgz"
+ integrity sha512-drg76jS6syilOUmVNkyo1c7ZEBPcPuK+aJA7AksM5ZIIbV57DMHCywiCr+uHyv8BE5jUTU98j/H7gVrkHrWW3Q==
+ dependencies:
+ cssesc "^3.0.0"
+ immediate "^3.2.3"
+
+"@easyops-cn/docusaurus-search-local@^0.21.1":
+ version "0.21.4"
+ resolved "https://registry.npmmirror.com/@easyops-cn/docusaurus-search-local/-/docusaurus-search-local-0.21.4.tgz"
+ integrity sha512-sUYxRKLfN/rInn1awf3Z6M5lefk9gSsrQp/6nKUTgaJI/NUmvZY8Hk3nRk0BPIHK8jjjzm+gWOhz8O0SBq8ihw==
+ dependencies:
+ "@docusaurus/utils" "^2.0.0-beta.4"
+ "@docusaurus/utils-validation" "^2.0.0-beta.4"
+ "@easyops-cn/autocomplete.js" "^0.38.1"
+ cheerio "^1.0.0-rc.3"
+ clsx "^1.1.1"
+ debug "^4.2.0"
+ fs-extra "^9.0.1"
+ klaw-sync "^6.0.0"
+ lunr "^2.3.9"
+ lunr-languages "^1.4.0"
+ mark.js "^8.11.1"
+ tslib "^2.2.0"
+
+"@hapi/hoek@^9.0.0":
+ version "9.3.0"
+ resolved "https://registry.npmmirror.com/@hapi/hoek/-/hoek-9.3.0.tgz"
+ integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==
+
+"@hapi/topo@^5.0.0":
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/@hapi/topo/-/topo-5.1.0.tgz"
+ integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==
+ dependencies:
+ "@hapi/hoek" "^9.0.0"
+
+"@jridgewell/gen-mapping@^0.1.0":
+ version "0.1.1"
+ resolved "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz"
+ integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==
+ dependencies:
+ "@jridgewell/set-array" "^1.0.0"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+
+"@jridgewell/gen-mapping@^0.3.2":
+ version "0.3.2"
+ resolved "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz"
+ integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
+ dependencies:
+ "@jridgewell/set-array" "^1.0.1"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+ "@jridgewell/trace-mapping" "^0.3.9"
+
+"@jridgewell/gen-mapping@^0.3.5":
+ version "0.3.5"
+ resolved "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz"
+ integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==
+ dependencies:
+ "@jridgewell/set-array" "^1.2.1"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+ "@jridgewell/trace-mapping" "^0.3.24"
+
+"@jridgewell/resolve-uri@^3.1.0":
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz"
+ integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
+
+"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1", "@jridgewell/set-array@^1.2.1":
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz"
+ integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==
+
+"@jridgewell/source-map@^0.3.3":
+ version "0.3.6"
+ resolved "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz"
+ integrity sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==
+ dependencies:
+ "@jridgewell/gen-mapping" "^0.3.5"
+ "@jridgewell/trace-mapping" "^0.3.25"
+
+"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14":
+ version "1.4.14"
+ resolved "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz"
+ integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
+
+"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25", "@jridgewell/trace-mapping@^0.3.9":
+ version "0.3.25"
+ resolved "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz"
+ integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==
+ dependencies:
+ "@jridgewell/resolve-uri" "^3.1.0"
+ "@jridgewell/sourcemap-codec" "^1.4.14"
+
+"@leichtgewicht/ip-codec@^2.0.1":
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz"
+ integrity sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==
+
+"@mdx-js/mdx@1.6.22", "@mdx-js/mdx@^1.6.21":
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/@mdx-js/mdx/-/mdx-1.6.22.tgz"
+ integrity sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==
+ dependencies:
+ "@babel/core" "7.12.9"
+ "@babel/plugin-syntax-jsx" "7.12.1"
+ "@babel/plugin-syntax-object-rest-spread" "7.8.3"
+ "@mdx-js/util" "1.6.22"
+ babel-plugin-apply-mdx-type-prop "1.6.22"
+ babel-plugin-extract-import-names "1.6.22"
+ camelcase-css "2.0.1"
+ detab "2.0.4"
+ hast-util-raw "6.0.1"
+ lodash.uniq "4.5.0"
+ mdast-util-to-hast "10.0.1"
+ remark-footnotes "2.0.0"
+ remark-mdx "1.6.22"
+ remark-parse "8.0.3"
+ remark-squeeze-paragraphs "4.0.0"
+ style-to-object "0.3.0"
+ unified "9.2.0"
+ unist-builder "2.0.3"
+ unist-util-visit "2.0.3"
+
+"@mdx-js/react@1.6.22", "@mdx-js/react@^1.6.21":
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/@mdx-js/react/-/react-1.6.22.tgz"
+ integrity sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==
+
+"@mdx-js/runtime@^1.6.22":
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/@mdx-js/runtime/-/runtime-1.6.22.tgz"
+ integrity sha512-p17spaO2+55VLCuxXA3LVHC4phRx60NR2XMdZ+qgVU1lKvEX4y88dmFNOzGDCPLJ03IZyKrJ/rPWWRiBrd9JrQ==
+ dependencies:
+ "@mdx-js/mdx" "1.6.22"
+ "@mdx-js/react" "1.6.22"
+ buble-jsx-only "^0.19.8"
+
+"@mdx-js/util@1.6.22":
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/@mdx-js/util/-/util-1.6.22.tgz"
+ integrity sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==
+
+"@nodelib/fs.scandir@2.1.5":
+ version "2.1.5"
+ resolved "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz"
+ integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==
+ dependencies:
+ "@nodelib/fs.stat" "2.0.5"
+ run-parallel "^1.1.9"
+
+"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
+ version "2.0.5"
+ resolved "https://registry.npmmirror.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
+ integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
+
+"@nodelib/fs.walk@^1.2.3":
+ version "1.2.8"
+ resolved "https://registry.npmmirror.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz"
+ integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==
+ dependencies:
+ "@nodelib/fs.scandir" "2.1.5"
+ fastq "^1.6.0"
+
+"@polka/url@^1.0.0-next.20":
+ version "1.0.0-next.21"
+ resolved "https://registry.npmmirror.com/@polka/url/-/url-1.0.0-next.21.tgz"
+ integrity sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==
+
+"@sideway/address@^4.1.3":
+ version "4.1.4"
+ resolved "https://registry.npmmirror.com/@sideway/address/-/address-4.1.4.tgz"
+ integrity sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==
+ dependencies:
+ "@hapi/hoek" "^9.0.0"
+
+"@sideway/formula@3.0.1", "@sideway/formula@^3.0.0":
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.1.tgz#80fcbcbaf7ce031e0ef2dd29b1bfc7c3f583611f"
+ integrity sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==
+
+"@sideway/pinpoint@^2.0.0":
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz"
+ integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==
+
+"@sindresorhus/is@^4.0.0":
+ version "4.6.0"
+ resolved "https://registry.npmmirror.com/@sindresorhus/is/-/is-4.6.0.tgz"
+ integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==
+
+"@slorber/static-site-generator-webpack-plugin@^4.0.0":
+ version "4.0.7"
+ resolved "https://registry.npmmirror.com/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz"
+ integrity sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==
+ dependencies:
+ eval "^0.1.8"
+ p-map "^4.0.0"
+ webpack-sources "^3.2.2"
+
+"@svgr/babel-plugin-add-jsx-attribute@^5.4.0":
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-5.4.0.tgz"
+ integrity sha512-ZFf2gs/8/6B8PnSofI0inYXr2SDNTDScPXhN7k5EqD4aZ3gi6u+rbmZHVB8IM3wDyx8ntKACZbtXSm7oZGRqVg==
+
+"@svgr/babel-plugin-add-jsx-attribute@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz"
+ integrity sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==
+
+"@svgr/babel-plugin-remove-jsx-attribute@*", "@svgr/babel-plugin-remove-jsx-attribute@^5.4.0":
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-5.4.0.tgz"
+ integrity sha512-yaS4o2PgUtwLFGTKbsiAy6D0o3ugcUhWK0Z45umJ66EPWunAz9fuFw2gJuje6wqQvQWOTJvIahUwndOXb7QCPg==
+
+"@svgr/babel-plugin-remove-jsx-empty-expression@*", "@svgr/babel-plugin-remove-jsx-empty-expression@^5.0.1":
+ version "5.0.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-5.0.1.tgz"
+ integrity sha512-LA72+88A11ND/yFIMzyuLRSMJ+tRKeYKeQ+mR3DcAZ5I4h5CPWN9AHyUzJbWSYp/u2u0xhmgOe0+E41+GjEueA==
+
+"@svgr/babel-plugin-replace-jsx-attribute-value@^5.0.1":
+ version "5.0.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-5.0.1.tgz"
+ integrity sha512-PoiE6ZD2Eiy5mK+fjHqwGOS+IXX0wq/YDtNyIgOrc6ejFnxN4b13pRpiIPbtPwHEc+NT2KCjteAcq33/F1Y9KQ==
+
+"@svgr/babel-plugin-replace-jsx-attribute-value@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz"
+ integrity sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==
+
+"@svgr/babel-plugin-svg-dynamic-title@^5.4.0":
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-5.4.0.tgz"
+ integrity sha512-zSOZH8PdZOpuG1ZVx/cLVePB2ibo3WPpqo7gFIjLV9a0QsuQAzJiwwqmuEdTaW2pegyBE17Uu15mOgOcgabQZg==
+
+"@svgr/babel-plugin-svg-dynamic-title@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz"
+ integrity sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==
+
+"@svgr/babel-plugin-svg-em-dimensions@^5.4.0":
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-5.4.0.tgz"
+ integrity sha512-cPzDbDA5oT/sPXDCUYoVXEmm3VIoAWAPT6mSPTJNbQaBNUuEKVKyGH93oDY4e42PYHRW67N5alJx/eEol20abw==
+
+"@svgr/babel-plugin-svg-em-dimensions@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz"
+ integrity sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==
+
+"@svgr/babel-plugin-transform-react-native-svg@^5.4.0":
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-5.4.0.tgz"
+ integrity sha512-3eYP/SaopZ41GHwXma7Rmxcv9uRslRDTY1estspeB1w1ueZWd/tPlMfEOoccYpEMZU3jD4OU7YitnXcF5hLW2Q==
+
+"@svgr/babel-plugin-transform-react-native-svg@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz"
+ integrity sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==
+
+"@svgr/babel-plugin-transform-svg-component@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-5.5.0.tgz"
+ integrity sha512-q4jSH1UUvbrsOtlo/tKcgSeiCHRSBdXoIoqX1pgcKK/aU3JD27wmMKwGtpB8qRYUYoyXvfGxUVKchLuR5pB3rQ==
+
+"@svgr/babel-plugin-transform-svg-component@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz"
+ integrity sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==
+
+"@svgr/babel-preset@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/babel-preset/-/babel-preset-5.5.0.tgz"
+ integrity sha512-4FiXBjvQ+z2j7yASeGPEi8VD/5rrGQk4Xrq3EdJmoZgz/tpqChpo5hgXDvmEauwtvOc52q8ghhZK4Oy7qph4ig==
+ dependencies:
+ "@svgr/babel-plugin-add-jsx-attribute" "^5.4.0"
+ "@svgr/babel-plugin-remove-jsx-attribute" "^5.4.0"
+ "@svgr/babel-plugin-remove-jsx-empty-expression" "^5.0.1"
+ "@svgr/babel-plugin-replace-jsx-attribute-value" "^5.0.1"
+ "@svgr/babel-plugin-svg-dynamic-title" "^5.4.0"
+ "@svgr/babel-plugin-svg-em-dimensions" "^5.4.0"
+ "@svgr/babel-plugin-transform-react-native-svg" "^5.4.0"
+ "@svgr/babel-plugin-transform-svg-component" "^5.5.0"
+
+"@svgr/babel-preset@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/babel-preset/-/babel-preset-6.5.1.tgz"
+ integrity sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==
+ dependencies:
+ "@svgr/babel-plugin-add-jsx-attribute" "^6.5.1"
+ "@svgr/babel-plugin-remove-jsx-attribute" "*"
+ "@svgr/babel-plugin-remove-jsx-empty-expression" "*"
+ "@svgr/babel-plugin-replace-jsx-attribute-value" "^6.5.1"
+ "@svgr/babel-plugin-svg-dynamic-title" "^6.5.1"
+ "@svgr/babel-plugin-svg-em-dimensions" "^6.5.1"
+ "@svgr/babel-plugin-transform-react-native-svg" "^6.5.1"
+ "@svgr/babel-plugin-transform-svg-component" "^6.5.1"
+
+"@svgr/core@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/core/-/core-5.5.0.tgz"
+ integrity sha512-q52VOcsJPvV3jO1wkPtzTuKlvX7Y3xIcWRpCMtBF3MrteZJtBfQw/+u0B1BHy5ColpQc1/YVTrPEtSYIMNZlrQ==
+ dependencies:
+ "@svgr/plugin-jsx" "^5.5.0"
+ camelcase "^6.2.0"
+ cosmiconfig "^7.0.0"
+
+"@svgr/core@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/core/-/core-6.5.1.tgz"
+ integrity sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==
+ dependencies:
+ "@babel/core" "^7.19.6"
+ "@svgr/babel-preset" "^6.5.1"
+ "@svgr/plugin-jsx" "^6.5.1"
+ camelcase "^6.2.0"
+ cosmiconfig "^7.0.1"
+
+"@svgr/hast-util-to-babel-ast@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-5.5.0.tgz"
+ integrity sha512-cAaR/CAiZRB8GP32N+1jocovUtvlj0+e65TB50/6Lcime+EA49m/8l+P2ko+XPJ4dw3xaPS3jOL4F2X4KWxoeQ==
+ dependencies:
+ "@babel/types" "^7.12.6"
+
+"@svgr/hast-util-to-babel-ast@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz"
+ integrity sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==
+ dependencies:
+ "@babel/types" "^7.20.0"
+ entities "^4.4.0"
+
+"@svgr/plugin-jsx@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/plugin-jsx/-/plugin-jsx-5.5.0.tgz"
+ integrity sha512-V/wVh33j12hGh05IDg8GpIUXbjAPnTdPTKuP4VNLggnwaHMPNQNae2pRnyTAILWCQdz5GyMqtO488g7CKM8CBA==
+ dependencies:
+ "@babel/core" "^7.12.3"
+ "@svgr/babel-preset" "^5.5.0"
+ "@svgr/hast-util-to-babel-ast" "^5.5.0"
+ svg-parser "^2.0.2"
+
+"@svgr/plugin-jsx@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz"
+ integrity sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==
+ dependencies:
+ "@babel/core" "^7.19.6"
+ "@svgr/babel-preset" "^6.5.1"
+ "@svgr/hast-util-to-babel-ast" "^6.5.1"
+ svg-parser "^2.0.4"
+
+"@svgr/plugin-svgo@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/plugin-svgo/-/plugin-svgo-5.5.0.tgz"
+ integrity sha512-r5swKk46GuQl4RrVejVwpeeJaydoxkdwkM1mBKOgJLBUJPGaLci6ylg/IjhrRsREKDkr4kbMWdgOtbXEh0fyLQ==
+ dependencies:
+ cosmiconfig "^7.0.0"
+ deepmerge "^4.2.2"
+ svgo "^1.2.2"
+
+"@svgr/plugin-svgo@^6.5.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz"
+ integrity sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==
+ dependencies:
+ cosmiconfig "^7.0.1"
+ deepmerge "^4.2.2"
+ svgo "^2.8.0"
+
+"@svgr/webpack@^5.5.0":
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/@svgr/webpack/-/webpack-5.5.0.tgz"
+ integrity sha512-DOBOK255wfQxguUta2INKkzPj6AIS6iafZYiYmHn6W3pHlycSRRlvWKCfLDG10fXfLWqE3DJHgRUOyJYmARa7g==
+ dependencies:
+ "@babel/core" "^7.12.3"
+ "@babel/plugin-transform-react-constant-elements" "^7.12.1"
+ "@babel/preset-env" "^7.12.1"
+ "@babel/preset-react" "^7.12.5"
+ "@svgr/core" "^5.5.0"
+ "@svgr/plugin-jsx" "^5.5.0"
+ "@svgr/plugin-svgo" "^5.5.0"
+ loader-utils "^2.0.0"
+
+"@svgr/webpack@^6.2.1":
+ version "6.5.1"
+ resolved "https://registry.npmmirror.com/@svgr/webpack/-/webpack-6.5.1.tgz"
+ integrity sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==
+ dependencies:
+ "@babel/core" "^7.19.6"
+ "@babel/plugin-transform-react-constant-elements" "^7.18.12"
+ "@babel/preset-env" "^7.19.4"
+ "@babel/preset-react" "^7.18.6"
+ "@babel/preset-typescript" "^7.18.6"
+ "@svgr/core" "^6.5.1"
+ "@svgr/plugin-jsx" "^6.5.1"
+ "@svgr/plugin-svgo" "^6.5.1"
+
+"@szmarczak/http-timer@^4.0.5":
+ version "4.0.6"
+ resolved "https://registry.npmmirror.com/@szmarczak/http-timer/-/http-timer-4.0.6.tgz"
+ integrity sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==
+ dependencies:
+ defer-to-connect "^2.0.0"
+
+"@trysound/sax@0.2.0":
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/@trysound/sax/-/sax-0.2.0.tgz"
+ integrity sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==
+
+"@types/body-parser@*":
+ version "1.19.2"
+ resolved "https://registry.npmmirror.com/@types/body-parser/-/body-parser-1.19.2.tgz"
+ integrity sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==
+ dependencies:
+ "@types/connect" "*"
+ "@types/node" "*"
+
+"@types/bonjour@^3.5.9":
+ version "3.5.10"
+ resolved "https://registry.npmmirror.com/@types/bonjour/-/bonjour-3.5.10.tgz"
+ integrity sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==
+ dependencies:
+ "@types/node" "*"
+
+"@types/cacheable-request@^6.0.1":
+ version "6.0.2"
+ resolved "https://registry.npmmirror.com/@types/cacheable-request/-/cacheable-request-6.0.2.tgz"
+ integrity sha512-B3xVo+dlKM6nnKTcmm5ZtY/OL8bOAOd2Olee9M1zft65ox50OzjEHW91sDiU9j6cvW8Ejg1/Qkf4xd2kugApUA==
+ dependencies:
+ "@types/http-cache-semantics" "*"
+ "@types/keyv" "*"
+ "@types/node" "*"
+ "@types/responselike" "*"
+
+"@types/connect-history-api-fallback@^1.3.5":
+ version "1.3.5"
+ resolved "https://registry.npmmirror.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz"
+ integrity sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==
+ dependencies:
+ "@types/express-serve-static-core" "*"
+ "@types/node" "*"
+
+"@types/connect@*":
+ version "3.4.35"
+ resolved "https://registry.npmmirror.com/@types/connect/-/connect-3.4.35.tgz"
+ integrity sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==
+ dependencies:
+ "@types/node" "*"
+
+"@types/eslint-scope@^3.7.3":
+ version "3.7.4"
+ resolved "https://registry.npmmirror.com/@types/eslint-scope/-/eslint-scope-3.7.4.tgz"
+ integrity sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==
+ dependencies:
+ "@types/eslint" "*"
+ "@types/estree" "*"
+
+"@types/eslint@*":
+ version "8.4.10"
+ resolved "https://registry.npmmirror.com/@types/eslint/-/eslint-8.4.10.tgz"
+ integrity sha512-Sl/HOqN8NKPmhWo2VBEPm0nvHnu2LL3v9vKo8MEq0EtbJ4eVzGPl41VNPvn5E1i5poMk4/XD8UriLHpJvEP/Nw==
+ dependencies:
+ "@types/estree" "*"
+ "@types/json-schema" "*"
+
+"@types/estree@*", "@types/estree@^1.0.5":
+ version "1.0.5"
+ resolved "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz"
+ integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==
+
+"@types/express-serve-static-core@*", "@types/express-serve-static-core@^4.17.18":
+ version "4.17.31"
+ resolved "https://registry.npmmirror.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.31.tgz"
+ integrity sha512-DxMhY+NAsTwMMFHBTtJFNp5qiHKJ7TeqOo23zVEM9alT1Ml27Q3xcTH0xwxn7Q0BbMcVEJOs/7aQtUWupUQN3Q==
+ dependencies:
+ "@types/node" "*"
+ "@types/qs" "*"
+ "@types/range-parser" "*"
+
+"@types/express@*", "@types/express@^4.17.13":
+ version "4.17.14"
+ resolved "https://registry.npmmirror.com/@types/express/-/express-4.17.14.tgz"
+ integrity sha512-TEbt+vaPFQ+xpxFLFssxUDXj5cWCxZJjIcB7Yg0k0GMHGtgtQgpvx/MUQUeAkNbA9AAGrwkAsoeItdTgS7FMyg==
+ dependencies:
+ "@types/body-parser" "*"
+ "@types/express-serve-static-core" "^4.17.18"
+ "@types/qs" "*"
+ "@types/serve-static" "*"
+
+"@types/github-slugger@^1.3.0":
+ version "1.3.0"
+ resolved "https://registry.npmmirror.com/@types/github-slugger/-/github-slugger-1.3.0.tgz"
+ integrity sha512-J/rMZa7RqiH/rT29TEVZO4nBoDP9XJOjnbbIofg7GQKs4JIduEO3WLpte+6WeUz/TcrXKlY+bM7FYrp8yFB+3g==
+
+"@types/hast@^2.0.0":
+ version "2.3.4"
+ resolved "https://registry.npmmirror.com/@types/hast/-/hast-2.3.4.tgz"
+ integrity sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==
+ dependencies:
+ "@types/unist" "*"
+
+"@types/html-minifier-terser@^6.0.0":
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz"
+ integrity sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==
+
+"@types/http-cache-semantics@*":
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.1.tgz"
+ integrity sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ==
+
+"@types/http-proxy@^1.17.8":
+ version "1.17.9"
+ resolved "https://registry.npmmirror.com/@types/http-proxy/-/http-proxy-1.17.9.tgz"
+ integrity sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw==
+ dependencies:
+ "@types/node" "*"
+
+"@types/json-schema@*", "@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
+ version "7.0.11"
+ resolved "https://registry.npmmirror.com/@types/json-schema/-/json-schema-7.0.11.tgz"
+ integrity sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==
+
+"@types/katex@^0.11.0":
+ version "0.11.1"
+ resolved "https://registry.npmmirror.com/@types/katex/-/katex-0.11.1.tgz"
+ integrity sha512-DUlIj2nk0YnJdlWgsFuVKcX27MLW0KbKmGVoUHmFr+74FYYNUDAaj9ZqTADvsbE8rfxuVmSFc7KczYn5Y09ozg==
+
+"@types/keyv@*":
+ version "4.2.0"
+ resolved "https://registry.npmmirror.com/@types/keyv/-/keyv-4.2.0.tgz"
+ integrity sha512-xoBtGl5R9jeKUhc8ZqeYaRDx04qqJ10yhhXYGmJ4Jr8qKpvMsDQQrNUvF/wUJ4klOtmJeJM+p2Xo3zp9uaC3tw==
+ dependencies:
+ keyv "*"
+
+"@types/mdast@^3.0.0":
+ version "3.0.10"
+ resolved "https://registry.npmmirror.com/@types/mdast/-/mdast-3.0.10.tgz"
+ integrity sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==
+ dependencies:
+ "@types/unist" "*"
+
+"@types/mime@*":
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/@types/mime/-/mime-3.0.1.tgz"
+ integrity sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==
+
+"@types/node@*":
+ version "18.11.9"
+ resolved "https://registry.npmmirror.com/@types/node/-/node-18.11.9.tgz"
+ integrity sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==
+
+"@types/node@^17.0.5":
+ version "17.0.45"
+ resolved "https://registry.npmmirror.com/@types/node/-/node-17.0.45.tgz"
+ integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==
+
+"@types/parse-json@^4.0.0":
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/@types/parse-json/-/parse-json-4.0.0.tgz"
+ integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==
+
+"@types/parse5@^5.0.0":
+ version "5.0.3"
+ resolved "https://registry.npmmirror.com/@types/parse5/-/parse5-5.0.3.tgz"
+ integrity sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==
+
+"@types/prop-types@*":
+ version "15.7.5"
+ resolved "https://registry.npmmirror.com/@types/prop-types/-/prop-types-15.7.5.tgz"
+ integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==
+
+"@types/q@^1.5.1":
+ version "1.5.5"
+ resolved "https://registry.npmmirror.com/@types/q/-/q-1.5.5.tgz"
+ integrity sha512-L28j2FcJfSZOnL1WBjDYp2vUHCeIFlyYI/53EwD/rKUBQ7MtUUfbQWiyKJGpcnv4/WgrhWsFKrcPstcAt/J0tQ==
+
+"@types/qs@*":
+ version "6.9.7"
+ resolved "https://registry.npmmirror.com/@types/qs/-/qs-6.9.7.tgz"
+ integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==
+
+"@types/range-parser@*":
+ version "1.2.4"
+ resolved "https://registry.npmmirror.com/@types/range-parser/-/range-parser-1.2.4.tgz"
+ integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==
+
+"@types/react@*":
+ version "18.0.25"
+ resolved "https://registry.npmmirror.com/@types/react/-/react-18.0.25.tgz"
+ integrity sha512-xD6c0KDT4m7n9uD4ZHi02lzskaiqcBxf4zi+tXZY98a04wvc0hi/TcCPC2FOESZi51Nd7tlUeOJY8RofL799/g==
+ dependencies:
+ "@types/prop-types" "*"
+ "@types/scheduler" "*"
+ csstype "^3.0.2"
+
+"@types/responselike@*", "@types/responselike@^1.0.0":
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/@types/responselike/-/responselike-1.0.0.tgz"
+ integrity sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==
+ dependencies:
+ "@types/node" "*"
+
+"@types/retry@0.12.0":
+ version "0.12.0"
+ resolved "https://registry.npmmirror.com/@types/retry/-/retry-0.12.0.tgz"
+ integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==
+
+"@types/sax@^1.2.1":
+ version "1.2.4"
+ resolved "https://registry.npmmirror.com/@types/sax/-/sax-1.2.4.tgz"
+ integrity sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw==
+ dependencies:
+ "@types/node" "*"
+
+"@types/scheduler@*":
+ version "0.16.2"
+ resolved "https://registry.npmmirror.com/@types/scheduler/-/scheduler-0.16.2.tgz"
+ integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==
+
+"@types/serve-index@^1.9.1":
+ version "1.9.1"
+ resolved "https://registry.npmmirror.com/@types/serve-index/-/serve-index-1.9.1.tgz"
+ integrity sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==
+ dependencies:
+ "@types/express" "*"
+
+"@types/serve-static@*", "@types/serve-static@^1.13.10":
+ version "1.15.0"
+ resolved "https://registry.npmmirror.com/@types/serve-static/-/serve-static-1.15.0.tgz"
+ integrity sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg==
+ dependencies:
+ "@types/mime" "*"
+ "@types/node" "*"
+
+"@types/sockjs@^0.3.33":
+ version "0.3.33"
+ resolved "https://registry.npmmirror.com/@types/sockjs/-/sockjs-0.3.33.tgz"
+ integrity sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==
+ dependencies:
+ "@types/node" "*"
+
+"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2", "@types/unist@^2.0.3":
+ version "2.0.6"
+ resolved "https://registry.npmmirror.com/@types/unist/-/unist-2.0.6.tgz"
+ integrity sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==
+
+"@types/ws@^8.5.1":
+ version "8.5.3"
+ resolved "https://registry.npmmirror.com/@types/ws/-/ws-8.5.3.tgz"
+ integrity sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==
+ dependencies:
+ "@types/node" "*"
+
+"@webassemblyjs/ast@1.12.1", "@webassemblyjs/ast@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz"
+ integrity sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==
+ dependencies:
+ "@webassemblyjs/helper-numbers" "1.11.6"
+ "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
+
+"@webassemblyjs/floating-point-hex-parser@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz"
+ integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==
+
+"@webassemblyjs/helper-api-error@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz"
+ integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==
+
+"@webassemblyjs/helper-buffer@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz"
+ integrity sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==
+
+"@webassemblyjs/helper-numbers@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz"
+ integrity sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==
+ dependencies:
+ "@webassemblyjs/floating-point-hex-parser" "1.11.6"
+ "@webassemblyjs/helper-api-error" "1.11.6"
+ "@xtuc/long" "4.2.2"
+
+"@webassemblyjs/helper-wasm-bytecode@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz"
+ integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==
+
+"@webassemblyjs/helper-wasm-section@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz"
+ integrity sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
+ "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
+ "@webassemblyjs/wasm-gen" "1.12.1"
+
+"@webassemblyjs/ieee754@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz"
+ integrity sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==
+ dependencies:
+ "@xtuc/ieee754" "^1.2.0"
+
+"@webassemblyjs/leb128@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz"
+ integrity sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==
+ dependencies:
+ "@xtuc/long" "4.2.2"
+
+"@webassemblyjs/utf8@1.11.6":
+ version "1.11.6"
+ resolved "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz"
+ integrity sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==
+
+"@webassemblyjs/wasm-edit@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz"
+ integrity sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
+ "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
+ "@webassemblyjs/helper-wasm-section" "1.12.1"
+ "@webassemblyjs/wasm-gen" "1.12.1"
+ "@webassemblyjs/wasm-opt" "1.12.1"
+ "@webassemblyjs/wasm-parser" "1.12.1"
+ "@webassemblyjs/wast-printer" "1.12.1"
+
+"@webassemblyjs/wasm-gen@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz"
+ integrity sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
+ "@webassemblyjs/ieee754" "1.11.6"
+ "@webassemblyjs/leb128" "1.11.6"
+ "@webassemblyjs/utf8" "1.11.6"
+
+"@webassemblyjs/wasm-opt@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz"
+ integrity sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
+ "@webassemblyjs/wasm-gen" "1.12.1"
+ "@webassemblyjs/wasm-parser" "1.12.1"
+
+"@webassemblyjs/wasm-parser@1.12.1", "@webassemblyjs/wasm-parser@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz"
+ integrity sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-api-error" "1.11.6"
+ "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
+ "@webassemblyjs/ieee754" "1.11.6"
+ "@webassemblyjs/leb128" "1.11.6"
+ "@webassemblyjs/utf8" "1.11.6"
+
+"@webassemblyjs/wast-printer@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz"
+ integrity sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
+ "@xtuc/long" "4.2.2"
+
+"@xtuc/ieee754@^1.2.0":
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz"
+ integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==
+
+"@xtuc/long@4.2.2":
+ version "4.2.2"
+ resolved "https://registry.npmmirror.com/@xtuc/long/-/long-4.2.2.tgz"
+ integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==
+
+accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.8:
+ version "1.3.8"
+ resolved "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz"
+ integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==
+ dependencies:
+ mime-types "~2.1.34"
+ negotiator "0.6.3"
+
+acorn-dynamic-import@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/acorn-dynamic-import/-/acorn-dynamic-import-4.0.0.tgz"
+ integrity sha512-d3OEjQV4ROpoflsnUA8HozoIR504TFxNivYEUi6uwz0IYhBkTDXGuWlNdMtybRt3nqVx/L6XqMt0FxkXuWKZhw==
+
+acorn-import-attributes@^1.9.5:
+ version "1.9.5"
+ resolved "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz"
+ integrity sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==
+
+acorn-jsx@^5.0.1:
+ version "5.3.2"
+ resolved "https://registry.npmmirror.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz"
+ integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
+
+acorn-walk@^8.0.0:
+ version "8.2.0"
+ resolved "https://registry.npmmirror.com/acorn-walk/-/acorn-walk-8.2.0.tgz"
+ integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==
+
+acorn@^6.1.1:
+ version "6.4.2"
+ resolved "https://registry.npmmirror.com/acorn/-/acorn-6.4.2.tgz"
+ integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==
+
+acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2:
+ version "8.12.1"
+ resolved "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz"
+ integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==
+
+address@^1.0.1, address@^1.1.2:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/address/-/address-1.2.1.tgz"
+ integrity sha512-B+6bi5D34+fDYENiH5qOlA0cV2rAGKuWZ9LeyUUehbXy8e0VS9e498yO0Jeeh+iM+6KbfudHTFjXw2MmJD4QRA==
+
+aggregate-error@^3.0.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/aggregate-error/-/aggregate-error-3.1.0.tgz"
+ integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==
+ dependencies:
+ clean-stack "^2.0.0"
+ indent-string "^4.0.0"
+
+ajv-formats@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/ajv-formats/-/ajv-formats-2.1.1.tgz"
+ integrity sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==
+ dependencies:
+ ajv "^8.0.0"
+
+ajv-keywords@^3.4.1, ajv-keywords@^3.5.2:
+ version "3.5.2"
+ resolved "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz"
+ integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==
+
+ajv-keywords@^5.0.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz"
+ integrity sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==
+ dependencies:
+ fast-deep-equal "^3.1.3"
+
+ajv@^6.12.2, ajv@^6.12.4, ajv@^6.12.5:
+ version "6.12.6"
+ resolved "https://registry.npmmirror.com/ajv/-/ajv-6.12.6.tgz"
+ integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
+ dependencies:
+ fast-deep-equal "^3.1.1"
+ fast-json-stable-stringify "^2.0.0"
+ json-schema-traverse "^0.4.1"
+ uri-js "^4.2.2"
+
+ajv@^8.0.0, ajv@^8.8.0:
+ version "8.11.0"
+ resolved "https://registry.npmmirror.com/ajv/-/ajv-8.11.0.tgz"
+ integrity sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==
+ dependencies:
+ fast-deep-equal "^3.1.1"
+ json-schema-traverse "^1.0.0"
+ require-from-string "^2.0.2"
+ uri-js "^4.2.2"
+
+algoliasearch-helper@^3.5.5:
+ version "3.11.1"
+ resolved "https://registry.npmmirror.com/algoliasearch-helper/-/algoliasearch-helper-3.11.1.tgz"
+ integrity sha512-mvsPN3eK4E0bZG0/WlWJjeqe/bUD2KOEVOl0GyL/TGXn6wcpZU8NOuztGHCUKXkyg5gq6YzUakVTmnmSSO5Yiw==
+ dependencies:
+ "@algolia/events" "^4.0.1"
+
+algoliasearch@^4.0.0, algoliasearch@^4.10.5:
+ version "4.14.2"
+ resolved "https://registry.npmmirror.com/algoliasearch/-/algoliasearch-4.14.2.tgz"
+ integrity sha512-ngbEQonGEmf8dyEh5f+uOIihv4176dgbuOZspiuhmTTBRBuzWu3KCGHre6uHj5YyuC7pNvQGzB6ZNJyZi0z+Sg==
+ dependencies:
+ "@algolia/cache-browser-local-storage" "4.14.2"
+ "@algolia/cache-common" "4.14.2"
+ "@algolia/cache-in-memory" "4.14.2"
+ "@algolia/client-account" "4.14.2"
+ "@algolia/client-analytics" "4.14.2"
+ "@algolia/client-common" "4.14.2"
+ "@algolia/client-personalization" "4.14.2"
+ "@algolia/client-search" "4.14.2"
+ "@algolia/logger-common" "4.14.2"
+ "@algolia/logger-console" "4.14.2"
+ "@algolia/requester-browser-xhr" "4.14.2"
+ "@algolia/requester-common" "4.14.2"
+ "@algolia/requester-node-http" "4.14.2"
+ "@algolia/transporter" "4.14.2"
+
+ansi-align@^3.0.0:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/ansi-align/-/ansi-align-3.0.1.tgz"
+ integrity sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==
+ dependencies:
+ string-width "^4.1.0"
+
+ansi-html-community@^0.0.8:
+ version "0.0.8"
+ resolved "https://registry.npmmirror.com/ansi-html-community/-/ansi-html-community-0.0.8.tgz"
+ integrity sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==
+
+ansi-regex@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz"
+ integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
+
+ansi-styles@^3.2.1:
+ version "3.2.1"
+ resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-3.2.1.tgz"
+ integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
+ dependencies:
+ color-convert "^1.9.0"
+
+ansi-styles@^4.0.0, ansi-styles@^4.1.0:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz"
+ integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
+ dependencies:
+ color-convert "^2.0.1"
+
+anymatch@~3.1.2:
+ version "3.1.2"
+ resolved "https://registry.npmmirror.com/anymatch/-/anymatch-3.1.2.tgz"
+ integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==
+ dependencies:
+ normalize-path "^3.0.0"
+ picomatch "^2.0.4"
+
+arg@^5.0.0:
+ version "5.0.2"
+ resolved "https://registry.npmmirror.com/arg/-/arg-5.0.2.tgz"
+ integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==
+
+argparse@^1.0.7:
+ version "1.0.10"
+ resolved "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz"
+ integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
+ dependencies:
+ sprintf-js "~1.0.2"
+
+argparse@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/argparse/-/argparse-2.0.1.tgz"
+ integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
+
+array-flatten@1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/array-flatten/-/array-flatten-1.1.1.tgz"
+ integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==
+
+array-flatten@^2.1.2:
+ version "2.1.2"
+ resolved "https://registry.npmmirror.com/array-flatten/-/array-flatten-2.1.2.tgz"
+ integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==
+
+array-union@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/array-union/-/array-union-2.1.0.tgz"
+ integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
+
+array.prototype.reduce@^1.0.4:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/array.prototype.reduce/-/array.prototype.reduce-1.0.5.tgz"
+ integrity sha512-kDdugMl7id9COE8R7MHF5jWk7Dqt/fs4Pv+JXoICnYwqpjjjbUurz6w5fT5IG6brLdJhv6/VoHB0H7oyIBXd+Q==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.20.4"
+ es-array-method-boxes-properly "^1.0.0"
+ is-string "^1.0.7"
+
+asap@~2.0.3:
+ version "2.0.6"
+ resolved "https://registry.npmmirror.com/asap/-/asap-2.0.6.tgz"
+ integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==
+
+at-least-node@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/at-least-node/-/at-least-node-1.0.0.tgz"
+ integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==
+
+autoprefixer@^10.3.5, autoprefixer@^10.4.12:
+ version "10.4.13"
+ resolved "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.13.tgz"
+ integrity sha512-49vKpMqcZYsJjwotvt4+h/BCjJVnhGwcLpDt5xkcaOG3eLrG/HUYLagrihYsQ+qrIBgIzX1Rw7a6L8I/ZA1Atg==
+ dependencies:
+ browserslist "^4.21.4"
+ caniuse-lite "^1.0.30001426"
+ fraction.js "^4.2.0"
+ normalize-range "^0.1.2"
+ picocolors "^1.0.0"
+ postcss-value-parser "^4.2.0"
+
+axios@^0.25.0:
+ version "0.25.0"
+ resolved "https://registry.npmmirror.com/axios/-/axios-0.25.0.tgz"
+ integrity sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==
+ dependencies:
+ follow-redirects "^1.14.7"
+
+babel-loader@^8.2.2:
+ version "8.3.0"
+ resolved "https://registry.npmmirror.com/babel-loader/-/babel-loader-8.3.0.tgz"
+ integrity sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==
+ dependencies:
+ find-cache-dir "^3.3.1"
+ loader-utils "^2.0.0"
+ make-dir "^3.1.0"
+ schema-utils "^2.6.5"
+
+babel-plugin-apply-mdx-type-prop@1.6.22:
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz"
+ integrity sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "7.10.4"
+ "@mdx-js/util" "1.6.22"
+
+babel-plugin-dynamic-import-node@2.3.0:
+ version "2.3.0"
+ resolved "https://registry.npmmirror.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz"
+ integrity sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==
+ dependencies:
+ object.assign "^4.1.0"
+
+babel-plugin-extract-import-names@1.6.22:
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz"
+ integrity sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==
+ dependencies:
+ "@babel/helper-plugin-utils" "7.10.4"
+
+babel-plugin-polyfill-corejs2@^0.3.3:
+ version "0.3.3"
+ resolved "https://registry.npmmirror.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz"
+ integrity sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==
+ dependencies:
+ "@babel/compat-data" "^7.17.7"
+ "@babel/helper-define-polyfill-provider" "^0.3.3"
+ semver "^6.1.1"
+
+babel-plugin-polyfill-corejs3@^0.6.0:
+ version "0.6.0"
+ resolved "https://registry.npmmirror.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz"
+ integrity sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==
+ dependencies:
+ "@babel/helper-define-polyfill-provider" "^0.3.3"
+ core-js-compat "^3.25.1"
+
+babel-plugin-polyfill-regenerator@^0.4.1:
+ version "0.4.1"
+ resolved "https://registry.npmmirror.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz"
+ integrity sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==
+ dependencies:
+ "@babel/helper-define-polyfill-provider" "^0.3.3"
+
+bail@^1.0.0:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/bail/-/bail-1.0.5.tgz"
+ integrity sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==
+
+balanced-match@^1.0.0:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz"
+ integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
+
+base16@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/base16/-/base16-1.0.0.tgz"
+ integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==
+
+batch@0.6.1:
+ version "0.6.1"
+ resolved "https://registry.npmmirror.com/batch/-/batch-0.6.1.tgz"
+ integrity sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==
+
+big.js@^5.2.2:
+ version "5.2.2"
+ resolved "https://registry.npmmirror.com/big.js/-/big.js-5.2.2.tgz"
+ integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==
+
+binary-extensions@^2.0.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/binary-extensions/-/binary-extensions-2.2.0.tgz"
+ integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==
+
+body-parser@1.20.1:
+ version "1.20.1"
+ resolved "https://registry.npmmirror.com/body-parser/-/body-parser-1.20.1.tgz"
+ integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==
+ dependencies:
+ bytes "3.1.2"
+ content-type "~1.0.4"
+ debug "2.6.9"
+ depd "2.0.0"
+ destroy "1.2.0"
+ http-errors "2.0.0"
+ iconv-lite "0.4.24"
+ on-finished "2.4.1"
+ qs "6.11.0"
+ raw-body "2.5.1"
+ type-is "~1.6.18"
+ unpipe "1.0.0"
+
+bonjour-service@^1.0.11:
+ version "1.0.14"
+ resolved "https://registry.npmmirror.com/bonjour-service/-/bonjour-service-1.0.14.tgz"
+ integrity sha512-HIMbgLnk1Vqvs6B4Wq5ep7mxvj9sGz5d1JJyDNSGNIdA/w2MCz6GTjWTdjqOJV1bEPj+6IkxDvWNFKEBxNt4kQ==
+ dependencies:
+ array-flatten "^2.1.2"
+ dns-equal "^1.0.0"
+ fast-deep-equal "^3.1.3"
+ multicast-dns "^7.2.5"
+
+boolbase@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/boolbase/-/boolbase-1.0.0.tgz"
+ integrity sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==
+
+boxen@^5.0.0, boxen@^5.0.1:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/boxen/-/boxen-5.1.2.tgz"
+ integrity sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==
+ dependencies:
+ ansi-align "^3.0.0"
+ camelcase "^6.2.0"
+ chalk "^4.1.0"
+ cli-boxes "^2.2.1"
+ string-width "^4.2.2"
+ type-fest "^0.20.2"
+ widest-line "^3.1.0"
+ wrap-ansi "^7.0.0"
+
+brace-expansion@^1.1.7:
+ version "1.1.11"
+ resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-1.1.11.tgz"
+ integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
+ dependencies:
+ balanced-match "^1.0.0"
+ concat-map "0.0.1"
+
+braces@^3.0.2, braces@~3.0.2:
+ version "3.0.2"
+ resolved "https://registry.npmmirror.com/braces/-/braces-3.0.2.tgz"
+ integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==
+ dependencies:
+ fill-range "^7.0.1"
+
+browserslist@^4.0.0, browserslist@^4.16.5, browserslist@^4.16.6, browserslist@^4.21.10, browserslist@^4.21.3, browserslist@^4.21.4:
+ version "4.23.3"
+ resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz"
+ integrity sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==
+ dependencies:
+ caniuse-lite "^1.0.30001646"
+ electron-to-chromium "^1.5.4"
+ node-releases "^2.0.18"
+ update-browserslist-db "^1.1.0"
+
+buble-jsx-only@^0.19.8:
+ version "0.19.8"
+ resolved "https://registry.npmmirror.com/buble-jsx-only/-/buble-jsx-only-0.19.8.tgz"
+ integrity sha512-7AW19pf7PrKFnGTEDzs6u9+JZqQwM1VnLS19OlqYDhXomtFFknnoQJAPHeg84RMFWAvOhYrG7harizJNwUKJsA==
+ dependencies:
+ acorn "^6.1.1"
+ acorn-dynamic-import "^4.0.0"
+ acorn-jsx "^5.0.1"
+ chalk "^2.4.2"
+ magic-string "^0.25.3"
+ minimist "^1.2.0"
+ regexpu-core "^4.5.4"
+
+buffer-from@^1.0.0:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/buffer-from/-/buffer-from-1.1.2.tgz"
+ integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
+
+bytes@3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/bytes/-/bytes-3.0.0.tgz"
+ integrity sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==
+
+bytes@3.1.2:
+ version "3.1.2"
+ resolved "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz"
+ integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==
+
+cacheable-lookup@^5.0.3:
+ version "5.0.4"
+ resolved "https://registry.npmmirror.com/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz"
+ integrity sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==
+
+cacheable-request@^7.0.2:
+ version "7.0.2"
+ resolved "https://registry.npmmirror.com/cacheable-request/-/cacheable-request-7.0.2.tgz"
+ integrity sha512-pouW8/FmiPQbuGpkXQ9BAPv/Mo5xDGANgSNXzTzJ8DrKGuXOssM4wIQRjfanNRh3Yu5cfYPvcorqbhg2KIJtew==
+ dependencies:
+ clone-response "^1.0.2"
+ get-stream "^5.1.0"
+ http-cache-semantics "^4.0.0"
+ keyv "^4.0.0"
+ lowercase-keys "^2.0.0"
+ normalize-url "^6.0.1"
+ responselike "^2.0.0"
+
+call-bind@^1.0.0, call-bind@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/call-bind/-/call-bind-1.0.2.tgz"
+ integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==
+ dependencies:
+ function-bind "^1.1.1"
+ get-intrinsic "^1.0.2"
+
+callsites@^3.0.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/callsites/-/callsites-3.1.0.tgz"
+ integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
+
+camel-case@^4.1.2:
+ version "4.1.2"
+ resolved "https://registry.npmmirror.com/camel-case/-/camel-case-4.1.2.tgz"
+ integrity sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==
+ dependencies:
+ pascal-case "^3.1.2"
+ tslib "^2.0.3"
+
+camelcase-css@2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/camelcase-css/-/camelcase-css-2.0.1.tgz"
+ integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==
+
+camelcase@^6.2.0:
+ version "6.3.0"
+ resolved "https://registry.npmmirror.com/camelcase/-/camelcase-6.3.0.tgz"
+ integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
+
+caniuse-api@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/caniuse-api/-/caniuse-api-3.0.0.tgz"
+ integrity sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==
+ dependencies:
+ browserslist "^4.0.0"
+ caniuse-lite "^1.0.0"
+ lodash.memoize "^4.1.2"
+ lodash.uniq "^4.5.0"
+
+caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001426, caniuse-lite@^1.0.30001646:
+ version "1.0.30001651"
+ resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001651.tgz"
+ integrity sha512-9Cf+Xv1jJNe1xPZLGuUXLNkE1BoDkqRqYyFJ9TDYSqhduqA4hu4oR9HluGoWYQC/aj8WHjsGVV+bwkh0+tegRg==
+
+ccount@^1.0.0, ccount@^1.0.3:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/ccount/-/ccount-1.1.0.tgz"
+ integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==
+
+chalk@^2.4.1, chalk@^2.4.2:
+ version "2.4.2"
+ resolved "https://registry.npmmirror.com/chalk/-/chalk-2.4.2.tgz"
+ integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
+ dependencies:
+ ansi-styles "^3.2.1"
+ escape-string-regexp "^1.0.5"
+ supports-color "^5.3.0"
+
+chalk@^4.1.0, chalk@^4.1.2:
+ version "4.1.2"
+ resolved "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz"
+ integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==
+ dependencies:
+ ansi-styles "^4.1.0"
+ supports-color "^7.1.0"
+
+character-entities-legacy@^1.0.0:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz"
+ integrity sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==
+
+character-entities@^1.0.0:
+ version "1.2.4"
+ resolved "https://registry.npmmirror.com/character-entities/-/character-entities-1.2.4.tgz"
+ integrity sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==
+
+character-reference-invalid@^1.0.0:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz"
+ integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==
+
+cheerio-select@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/cheerio-select/-/cheerio-select-2.1.0.tgz"
+ integrity sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==
+ dependencies:
+ boolbase "^1.0.0"
+ css-select "^5.1.0"
+ css-what "^6.1.0"
+ domelementtype "^2.3.0"
+ domhandler "^5.0.3"
+ domutils "^3.0.1"
+
+cheerio@^1.0.0-rc.3:
+ version "1.0.0-rc.12"
+ resolved "https://registry.npmmirror.com/cheerio/-/cheerio-1.0.0-rc.12.tgz"
+ integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==
+ dependencies:
+ cheerio-select "^2.1.0"
+ dom-serializer "^2.0.0"
+ domhandler "^5.0.3"
+ domutils "^3.0.1"
+ htmlparser2 "^8.0.1"
+ parse5 "^7.0.0"
+ parse5-htmlparser2-tree-adapter "^7.0.0"
+
+chokidar@^3.4.2, chokidar@^3.5.2, chokidar@^3.5.3:
+ version "3.5.3"
+ resolved "https://registry.npmmirror.com/chokidar/-/chokidar-3.5.3.tgz"
+ integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==
+ dependencies:
+ anymatch "~3.1.2"
+ braces "~3.0.2"
+ glob-parent "~5.1.2"
+ is-binary-path "~2.1.0"
+ is-glob "~4.0.1"
+ normalize-path "~3.0.0"
+ readdirp "~3.6.0"
+ optionalDependencies:
+ fsevents "~2.3.2"
+
+chrome-trace-event@^1.0.2:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz"
+ integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==
+
+ci-info@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/ci-info/-/ci-info-2.0.0.tgz"
+ integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==
+
+ci-info@^3.1.1:
+ version "3.5.0"
+ resolved "https://registry.npmmirror.com/ci-info/-/ci-info-3.5.0.tgz"
+ integrity sha512-yH4RezKOGlOhxkmhbeNuC4eYZKAUsEaGtBuBzDDP1eFUKiccDWzBABxBfOx31IDwDIXMTxWuwAxUGModvkbuVw==
+
+clean-css@^5.1.5, clean-css@^5.2.2:
+ version "5.3.1"
+ resolved "https://registry.npmmirror.com/clean-css/-/clean-css-5.3.1.tgz"
+ integrity sha512-lCr8OHhiWCTw4v8POJovCoh4T7I9U11yVsPjMWWnnMmp9ZowCxyad1Pathle/9HjaDp+fdQKjO9fQydE6RHTZg==
+ dependencies:
+ source-map "~0.6.0"
+
+clean-stack@^2.0.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/clean-stack/-/clean-stack-2.2.0.tgz"
+ integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==
+
+cli-boxes@^2.2.1:
+ version "2.2.1"
+ resolved "https://registry.npmmirror.com/cli-boxes/-/cli-boxes-2.2.1.tgz"
+ integrity sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==
+
+clone-deep@^4.0.1:
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/clone-deep/-/clone-deep-4.0.1.tgz"
+ integrity sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==
+ dependencies:
+ is-plain-object "^2.0.4"
+ kind-of "^6.0.2"
+ shallow-clone "^3.0.0"
+
+clone-response@^1.0.2:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/clone-response/-/clone-response-1.0.3.tgz"
+ integrity sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==
+ dependencies:
+ mimic-response "^1.0.0"
+
+clsx@^1.1.1:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/clsx/-/clsx-1.2.1.tgz"
+ integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==
+
+coa@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.npmmirror.com/coa/-/coa-2.0.2.tgz"
+ integrity sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==
+ dependencies:
+ "@types/q" "^1.5.1"
+ chalk "^2.4.1"
+ q "^1.1.2"
+
+collapse-white-space@^1.0.2:
+ version "1.0.6"
+ resolved "https://registry.npmmirror.com/collapse-white-space/-/collapse-white-space-1.0.6.tgz"
+ integrity sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==
+
+color-convert@^1.9.0:
+ version "1.9.3"
+ resolved "https://registry.npmmirror.com/color-convert/-/color-convert-1.9.3.tgz"
+ integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
+ dependencies:
+ color-name "1.1.3"
+
+color-convert@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz"
+ integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
+ dependencies:
+ color-name "~1.1.4"
+
+color-name@1.1.3:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/color-name/-/color-name-1.1.3.tgz"
+ integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
+
+color-name@~1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz"
+ integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
+colord@^2.9.1:
+ version "2.9.3"
+ resolved "https://registry.npmmirror.com/colord/-/colord-2.9.3.tgz"
+ integrity sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==
+
+colorette@^2.0.10:
+ version "2.0.19"
+ resolved "https://registry.npmmirror.com/colorette/-/colorette-2.0.19.tgz"
+ integrity sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ==
+
+combine-promises@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/combine-promises/-/combine-promises-1.1.0.tgz"
+ integrity sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==
+
+comma-separated-tokens@^1.0.0:
+ version "1.0.8"
+ resolved "https://registry.npmmirror.com/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz"
+ integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==
+
+commander@^2.19.0, commander@^2.20.0:
+ version "2.20.3"
+ resolved "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz"
+ integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
+
+commander@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/commander/-/commander-5.1.0.tgz"
+ integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==
+
+commander@^7.2.0:
+ version "7.2.0"
+ resolved "https://registry.npmmirror.com/commander/-/commander-7.2.0.tgz"
+ integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==
+
+commander@^8.3.0:
+ version "8.3.0"
+ resolved "https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz"
+ integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==
+
+commondir@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/commondir/-/commondir-1.0.1.tgz"
+ integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==
+
+compressible@~2.0.16:
+ version "2.0.18"
+ resolved "https://registry.npmmirror.com/compressible/-/compressible-2.0.18.tgz"
+ integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==
+ dependencies:
+ mime-db ">= 1.43.0 < 2"
+
+compression@^1.7.4:
+ version "1.7.4"
+ resolved "https://registry.npmmirror.com/compression/-/compression-1.7.4.tgz"
+ integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==
+ dependencies:
+ accepts "~1.3.5"
+ bytes "3.0.0"
+ compressible "~2.0.16"
+ debug "2.6.9"
+ on-headers "~1.0.2"
+ safe-buffer "5.1.2"
+ vary "~1.1.2"
+
+concat-map@0.0.1:
+ version "0.0.1"
+ resolved "https://registry.npmmirror.com/concat-map/-/concat-map-0.0.1.tgz"
+ integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
+
+configstore@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.npmmirror.com/configstore/-/configstore-5.0.1.tgz"
+ integrity sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==
+ dependencies:
+ dot-prop "^5.2.0"
+ graceful-fs "^4.1.2"
+ make-dir "^3.0.0"
+ unique-string "^2.0.0"
+ write-file-atomic "^3.0.0"
+ xdg-basedir "^4.0.0"
+
+connect-history-api-fallback@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz"
+ integrity sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==
+
+consola@^2.15.3:
+ version "2.15.3"
+ resolved "https://registry.npmmirror.com/consola/-/consola-2.15.3.tgz"
+ integrity sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==
+
+content-disposition@0.5.2:
+ version "0.5.2"
+ resolved "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.2.tgz"
+ integrity sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==
+
+content-disposition@0.5.4:
+ version "0.5.4"
+ resolved "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.4.tgz"
+ integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==
+ dependencies:
+ safe-buffer "5.2.1"
+
+content-type@~1.0.4:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/content-type/-/content-type-1.0.4.tgz"
+ integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
+
+convert-source-map@^1.7.0:
+ version "1.9.0"
+ resolved "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-1.9.0.tgz"
+ integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==
+
+cookie-signature@1.0.6:
+ version "1.0.6"
+ resolved "https://registry.npmmirror.com/cookie-signature/-/cookie-signature-1.0.6.tgz"
+ integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==
+
+cookie@0.5.0:
+ version "0.5.0"
+ resolved "https://registry.npmmirror.com/cookie/-/cookie-0.5.0.tgz"
+ integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==
+
+copy-text-to-clipboard@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/copy-text-to-clipboard/-/copy-text-to-clipboard-3.0.1.tgz"
+ integrity sha512-rvVsHrpFcL4F2P8ihsoLdFHmd404+CMg71S756oRSeQgqk51U3kicGdnvfkrxva0xXH92SjGS62B0XIJsbh+9Q==
+
+copy-webpack-plugin@^9.0.1:
+ version "9.1.0"
+ resolved "https://registry.npmmirror.com/copy-webpack-plugin/-/copy-webpack-plugin-9.1.0.tgz"
+ integrity sha512-rxnR7PaGigJzhqETHGmAcxKnLZSR5u1Y3/bcIv/1FnqXedcL/E2ewK7ZCNrArJKCiSv8yVXhTqetJh8inDvfsA==
+ dependencies:
+ fast-glob "^3.2.7"
+ glob-parent "^6.0.1"
+ globby "^11.0.3"
+ normalize-path "^3.0.0"
+ schema-utils "^3.1.1"
+ serialize-javascript "^6.0.0"
+
+core-js-compat@^3.25.1:
+ version "3.26.0"
+ resolved "https://registry.npmmirror.com/core-js-compat/-/core-js-compat-3.26.0.tgz"
+ integrity sha512-piOX9Go+Z4f9ZiBFLnZ5VrOpBl0h7IGCkiFUN11QTe6LjAvOT3ifL/5TdoizMh99hcGy5SoLyWbapIY/PIb/3A==
+ dependencies:
+ browserslist "^4.21.4"
+
+core-js-pure@^3.25.1:
+ version "3.26.0"
+ resolved "https://registry.npmmirror.com/core-js-pure/-/core-js-pure-3.26.0.tgz"
+ integrity sha512-LiN6fylpVBVwT8twhhluD9TzXmZQQsr2I2eIKtWNbZI1XMfBT7CV18itaN6RA7EtQd/SDdRx/wzvAShX2HvhQA==
+
+core-js@^3.18.0:
+ version "3.26.0"
+ resolved "https://registry.npmmirror.com/core-js/-/core-js-3.26.0.tgz"
+ integrity sha512-+DkDrhoR4Y0PxDz6rurahuB+I45OsEUv8E1maPTB6OuHRohMMcznBq9TMpdpDMm/hUPob/mJJS3PqgbHpMTQgw==
+
+core-util-is@~1.0.0:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/core-util-is/-/core-util-is-1.0.3.tgz"
+ integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==
+
+cosmiconfig@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz"
+ integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==
+ dependencies:
+ "@types/parse-json" "^4.0.0"
+ import-fresh "^3.1.0"
+ parse-json "^5.0.0"
+ path-type "^4.0.0"
+ yaml "^1.7.2"
+
+cosmiconfig@^7.0.0, cosmiconfig@^7.0.1:
+ version "7.0.1"
+ resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-7.0.1.tgz"
+ integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==
+ dependencies:
+ "@types/parse-json" "^4.0.0"
+ import-fresh "^3.2.1"
+ parse-json "^5.0.0"
+ path-type "^4.0.0"
+ yaml "^1.10.0"
+
+cross-fetch@^3.1.5:
+ version "3.1.5"
+ resolved "https://registry.npmmirror.com/cross-fetch/-/cross-fetch-3.1.5.tgz"
+ integrity sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==
+ dependencies:
+ node-fetch "2.6.7"
+
+cross-spawn@^7.0.3:
+ version "7.0.3"
+ resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.3.tgz"
+ integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
+ dependencies:
+ path-key "^3.1.0"
+ shebang-command "^2.0.0"
+ which "^2.0.1"
+
+crypto-random-string@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz"
+ integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==
+
+css-declaration-sorter@^6.3.1:
+ version "6.3.1"
+ resolved "https://registry.npmmirror.com/css-declaration-sorter/-/css-declaration-sorter-6.3.1.tgz"
+ integrity sha512-fBffmak0bPAnyqc/HO8C3n2sHrp9wcqQz6ES9koRF2/mLOVAx9zIQ3Y7R29sYCteTPqMCwns4WYQoCX91Xl3+w==
+
+css-loader@^5.1.1:
+ version "5.2.7"
+ resolved "https://registry.npmmirror.com/css-loader/-/css-loader-5.2.7.tgz"
+ integrity sha512-Q7mOvpBNBG7YrVGMxRxcBJZFL75o+cH2abNASdibkj/fffYD8qWbInZrD0S9ccI6vZclF3DsHE7njGlLtaHbhg==
+ dependencies:
+ icss-utils "^5.1.0"
+ loader-utils "^2.0.0"
+ postcss "^8.2.15"
+ postcss-modules-extract-imports "^3.0.0"
+ postcss-modules-local-by-default "^4.0.0"
+ postcss-modules-scope "^3.0.0"
+ postcss-modules-values "^4.0.0"
+ postcss-value-parser "^4.1.0"
+ schema-utils "^3.0.0"
+ semver "^7.3.5"
+
+css-minimizer-webpack-plugin@^3.0.2:
+ version "3.4.1"
+ resolved "https://registry.npmmirror.com/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-3.4.1.tgz"
+ integrity sha512-1u6D71zeIfgngN2XNRJefc/hY7Ybsxd74Jm4qngIXyUEk7fss3VUzuHxLAq/R8NAba4QU9OUSaMZlbpRc7bM4Q==
+ dependencies:
+ cssnano "^5.0.6"
+ jest-worker "^27.0.2"
+ postcss "^8.3.5"
+ schema-utils "^4.0.0"
+ serialize-javascript "^6.0.0"
+ source-map "^0.6.1"
+
+css-select-base-adapter@^0.1.1:
+ version "0.1.1"
+ resolved "https://registry.npmmirror.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz"
+ integrity sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==
+
+css-select@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/css-select/-/css-select-2.1.0.tgz"
+ integrity sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==
+ dependencies:
+ boolbase "^1.0.0"
+ css-what "^3.2.1"
+ domutils "^1.7.0"
+ nth-check "^1.0.2"
+
+css-select@^4.1.3:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/css-select/-/css-select-4.3.0.tgz"
+ integrity sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==
+ dependencies:
+ boolbase "^1.0.0"
+ css-what "^6.0.1"
+ domhandler "^4.3.1"
+ domutils "^2.8.0"
+ nth-check "^2.0.1"
+
+css-select@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/css-select/-/css-select-5.1.0.tgz"
+ integrity sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==
+ dependencies:
+ boolbase "^1.0.0"
+ css-what "^6.1.0"
+ domhandler "^5.0.2"
+ domutils "^3.0.1"
+ nth-check "^2.0.1"
+
+css-tree@1.0.0-alpha.37:
+ version "1.0.0-alpha.37"
+ resolved "https://registry.npmmirror.com/css-tree/-/css-tree-1.0.0-alpha.37.tgz"
+ integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==
+ dependencies:
+ mdn-data "2.0.4"
+ source-map "^0.6.1"
+
+css-tree@^1.1.2, css-tree@^1.1.3:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/css-tree/-/css-tree-1.1.3.tgz"
+ integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==
+ dependencies:
+ mdn-data "2.0.14"
+ source-map "^0.6.1"
+
+css-what@^3.2.1:
+ version "3.4.2"
+ resolved "https://registry.npmmirror.com/css-what/-/css-what-3.4.2.tgz"
+ integrity sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==
+
+css-what@^6.0.1, css-what@^6.1.0:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/css-what/-/css-what-6.1.0.tgz"
+ integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==
+
+cssesc@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/cssesc/-/cssesc-3.0.0.tgz"
+ integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==
+
+cssnano-preset-advanced@^5.1.4:
+ version "5.3.9"
+ resolved "https://registry.npmmirror.com/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.9.tgz"
+ integrity sha512-njnh4pp1xCsibJcEHnWZb4EEzni0ePMqPuPNyuWT4Z+YeXmsgqNuTPIljXFEXhxGsWs9183JkXgHxc1TcsahIg==
+ dependencies:
+ autoprefixer "^10.4.12"
+ cssnano-preset-default "^5.2.13"
+ postcss-discard-unused "^5.1.0"
+ postcss-merge-idents "^5.1.1"
+ postcss-reduce-idents "^5.2.0"
+ postcss-zindex "^5.1.0"
+
+cssnano-preset-default@^5.2.13:
+ version "5.2.13"
+ resolved "https://registry.npmmirror.com/cssnano-preset-default/-/cssnano-preset-default-5.2.13.tgz"
+ integrity sha512-PX7sQ4Pb+UtOWuz8A1d+Rbi+WimBIxJTRyBdgGp1J75VU0r/HFQeLnMYgHiCAp6AR4rqrc7Y4R+1Rjk3KJz6DQ==
+ dependencies:
+ css-declaration-sorter "^6.3.1"
+ cssnano-utils "^3.1.0"
+ postcss-calc "^8.2.3"
+ postcss-colormin "^5.3.0"
+ postcss-convert-values "^5.1.3"
+ postcss-discard-comments "^5.1.2"
+ postcss-discard-duplicates "^5.1.0"
+ postcss-discard-empty "^5.1.1"
+ postcss-discard-overridden "^5.1.0"
+ postcss-merge-longhand "^5.1.7"
+ postcss-merge-rules "^5.1.3"
+ postcss-minify-font-values "^5.1.0"
+ postcss-minify-gradients "^5.1.1"
+ postcss-minify-params "^5.1.4"
+ postcss-minify-selectors "^5.2.1"
+ postcss-normalize-charset "^5.1.0"
+ postcss-normalize-display-values "^5.1.0"
+ postcss-normalize-positions "^5.1.1"
+ postcss-normalize-repeat-style "^5.1.1"
+ postcss-normalize-string "^5.1.0"
+ postcss-normalize-timing-functions "^5.1.0"
+ postcss-normalize-unicode "^5.1.1"
+ postcss-normalize-url "^5.1.0"
+ postcss-normalize-whitespace "^5.1.1"
+ postcss-ordered-values "^5.1.3"
+ postcss-reduce-initial "^5.1.1"
+ postcss-reduce-transforms "^5.1.0"
+ postcss-svgo "^5.1.0"
+ postcss-unique-selectors "^5.1.1"
+
+cssnano-utils@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/cssnano-utils/-/cssnano-utils-3.1.0.tgz"
+ integrity sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==
+
+cssnano@^5.0.6, cssnano@^5.0.8:
+ version "5.1.14"
+ resolved "https://registry.npmmirror.com/cssnano/-/cssnano-5.1.14.tgz"
+ integrity sha512-Oou7ihiTocbKqi0J1bB+TRJIQX5RMR3JghA8hcWSw9mjBLQ5Y3RWqEDoYG3sRNlAbCIXpqMoZGbq5KDR3vdzgw==
+ dependencies:
+ cssnano-preset-default "^5.2.13"
+ lilconfig "^2.0.3"
+ yaml "^1.10.2"
+
+csso@^4.0.2, csso@^4.2.0:
+ version "4.2.0"
+ resolved "https://registry.npmmirror.com/csso/-/csso-4.2.0.tgz"
+ integrity sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==
+ dependencies:
+ css-tree "^1.1.2"
+
+csstype@^3.0.2:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/csstype/-/csstype-3.1.1.tgz"
+ integrity sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==
+
+debug@2.6.9, debug@^2.6.0:
+ version "2.6.9"
+ resolved "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz"
+ integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
+ dependencies:
+ ms "2.0.0"
+
+debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.2.0:
+ version "4.3.4"
+ resolved "https://registry.npmmirror.com/debug/-/debug-4.3.4.tgz"
+ integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
+ dependencies:
+ ms "2.1.2"
+
+decompress-response@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/decompress-response/-/decompress-response-6.0.0.tgz"
+ integrity sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==
+ dependencies:
+ mimic-response "^3.1.0"
+
+deep-extend@^0.6.0:
+ version "0.6.0"
+ resolved "https://registry.npmmirror.com/deep-extend/-/deep-extend-0.6.0.tgz"
+ integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==
+
+deepmerge@^4.2.2:
+ version "4.2.2"
+ resolved "https://registry.npmmirror.com/deepmerge/-/deepmerge-4.2.2.tgz"
+ integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==
+
+default-gateway@^6.0.3:
+ version "6.0.3"
+ resolved "https://registry.npmmirror.com/default-gateway/-/default-gateway-6.0.3.tgz"
+ integrity sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==
+ dependencies:
+ execa "^5.0.0"
+
+defer-to-connect@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/defer-to-connect/-/defer-to-connect-2.0.1.tgz"
+ integrity sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==
+
+define-lazy-prop@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz"
+ integrity sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==
+
+define-properties@^1.1.3, define-properties@^1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/define-properties/-/define-properties-1.1.4.tgz"
+ integrity sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==
+ dependencies:
+ has-property-descriptors "^1.0.0"
+ object-keys "^1.1.1"
+
+del@^6.0.0:
+ version "6.1.1"
+ resolved "https://registry.npmmirror.com/del/-/del-6.1.1.tgz"
+ integrity sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==
+ dependencies:
+ globby "^11.0.1"
+ graceful-fs "^4.2.4"
+ is-glob "^4.0.1"
+ is-path-cwd "^2.2.0"
+ is-path-inside "^3.0.2"
+ p-map "^4.0.0"
+ rimraf "^3.0.2"
+ slash "^3.0.0"
+
+depd@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz"
+ integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
+
+depd@~1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/depd/-/depd-1.1.2.tgz"
+ integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
+
+destroy@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/destroy/-/destroy-1.2.0.tgz"
+ integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==
+
+detab@2.0.4:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/detab/-/detab-2.0.4.tgz"
+ integrity sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==
+ dependencies:
+ repeat-string "^1.5.4"
+
+detect-node@^2.0.4:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/detect-node/-/detect-node-2.1.0.tgz"
+ integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==
+
+detect-port-alt@^1.1.6:
+ version "1.1.6"
+ resolved "https://registry.npmmirror.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz"
+ integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==
+ dependencies:
+ address "^1.0.1"
+ debug "^2.6.0"
+
+detect-port@^1.3.0:
+ version "1.5.1"
+ resolved "https://registry.npmmirror.com/detect-port/-/detect-port-1.5.1.tgz"
+ integrity sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==
+ dependencies:
+ address "^1.0.1"
+ debug "4"
+
+dir-glob@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/dir-glob/-/dir-glob-3.0.1.tgz"
+ integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
+ dependencies:
+ path-type "^4.0.0"
+
+dns-equal@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/dns-equal/-/dns-equal-1.0.0.tgz"
+ integrity sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==
+
+dns-packet@^5.2.2:
+ version "5.4.0"
+ resolved "https://registry.npmmirror.com/dns-packet/-/dns-packet-5.4.0.tgz"
+ integrity sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g==
+ dependencies:
+ "@leichtgewicht/ip-codec" "^2.0.1"
+
+dom-converter@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/dom-converter/-/dom-converter-0.2.0.tgz"
+ integrity sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==
+ dependencies:
+ utila "~0.4"
+
+dom-serializer@0:
+ version "0.2.2"
+ resolved "https://registry.npmmirror.com/dom-serializer/-/dom-serializer-0.2.2.tgz"
+ integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==
+ dependencies:
+ domelementtype "^2.0.1"
+ entities "^2.0.0"
+
+dom-serializer@^1.0.1:
+ version "1.4.1"
+ resolved "https://registry.npmmirror.com/dom-serializer/-/dom-serializer-1.4.1.tgz"
+ integrity sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==
+ dependencies:
+ domelementtype "^2.0.1"
+ domhandler "^4.2.0"
+ entities "^2.0.0"
+
+dom-serializer@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/dom-serializer/-/dom-serializer-2.0.0.tgz"
+ integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==
+ dependencies:
+ domelementtype "^2.3.0"
+ domhandler "^5.0.2"
+ entities "^4.2.0"
+
+domelementtype@1:
+ version "1.3.1"
+ resolved "https://registry.npmmirror.com/domelementtype/-/domelementtype-1.3.1.tgz"
+ integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==
+
+domelementtype@^2.0.1, domelementtype@^2.2.0, domelementtype@^2.3.0:
+ version "2.3.0"
+ resolved "https://registry.npmmirror.com/domelementtype/-/domelementtype-2.3.0.tgz"
+ integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==
+
+domhandler@^4.0.0, domhandler@^4.2.0, domhandler@^4.3.1:
+ version "4.3.1"
+ resolved "https://registry.npmmirror.com/domhandler/-/domhandler-4.3.1.tgz"
+ integrity sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==
+ dependencies:
+ domelementtype "^2.2.0"
+
+domhandler@^5.0.1, domhandler@^5.0.2, domhandler@^5.0.3:
+ version "5.0.3"
+ resolved "https://registry.npmmirror.com/domhandler/-/domhandler-5.0.3.tgz"
+ integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==
+ dependencies:
+ domelementtype "^2.3.0"
+
+domutils@^1.7.0:
+ version "1.7.0"
+ resolved "https://registry.npmmirror.com/domutils/-/domutils-1.7.0.tgz"
+ integrity sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==
+ dependencies:
+ dom-serializer "0"
+ domelementtype "1"
+
+domutils@^2.5.2, domutils@^2.8.0:
+ version "2.8.0"
+ resolved "https://registry.npmmirror.com/domutils/-/domutils-2.8.0.tgz"
+ integrity sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==
+ dependencies:
+ dom-serializer "^1.0.1"
+ domelementtype "^2.2.0"
+ domhandler "^4.2.0"
+
+domutils@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/domutils/-/domutils-3.0.1.tgz"
+ integrity sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==
+ dependencies:
+ dom-serializer "^2.0.0"
+ domelementtype "^2.3.0"
+ domhandler "^5.0.1"
+
+dot-case@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmmirror.com/dot-case/-/dot-case-3.0.4.tgz"
+ integrity sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==
+ dependencies:
+ no-case "^3.0.4"
+ tslib "^2.0.3"
+
+dot-prop@^5.2.0:
+ version "5.3.0"
+ resolved "https://registry.npmmirror.com/dot-prop/-/dot-prop-5.3.0.tgz"
+ integrity sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==
+ dependencies:
+ is-obj "^2.0.0"
+
+duplexer@^0.1.1, duplexer@^0.1.2:
+ version "0.1.2"
+ resolved "https://registry.npmmirror.com/duplexer/-/duplexer-0.1.2.tgz"
+ integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==
+
+ee-first@1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/ee-first/-/ee-first-1.1.1.tgz"
+ integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==
+
+electron-to-chromium@^1.5.4:
+ version "1.5.6"
+ resolved "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.6.tgz"
+ integrity sha512-jwXWsM5RPf6j9dPYzaorcBSUg6AiqocPEyMpkchkvntaH9HGfOOMZwxMJjDY/XEs3T5dM7uyH1VhRMkqUU9qVw==
+
+emoji-regex@^8.0.0:
+ version "8.0.0"
+ resolved "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz"
+ integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
+
+emojis-list@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/emojis-list/-/emojis-list-3.0.0.tgz"
+ integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==
+
+emoticon@^3.2.0:
+ version "3.2.0"
+ resolved "https://registry.npmmirror.com/emoticon/-/emoticon-3.2.0.tgz"
+ integrity sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==
+
+encodeurl@~1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/encodeurl/-/encodeurl-1.0.2.tgz"
+ integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==
+
+end-of-stream@^1.1.0:
+ version "1.4.4"
+ resolved "https://registry.npmmirror.com/end-of-stream/-/end-of-stream-1.4.4.tgz"
+ integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==
+ dependencies:
+ once "^1.4.0"
+
+enhanced-resolve@^5.17.0:
+ version "5.17.1"
+ resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz"
+ integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==
+ dependencies:
+ graceful-fs "^4.2.4"
+ tapable "^2.2.0"
+
+entities@^2.0.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/entities/-/entities-2.2.0.tgz"
+ integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==
+
+entities@^4.2.0, entities@^4.3.0, entities@^4.4.0:
+ version "4.4.0"
+ resolved "https://registry.npmmirror.com/entities/-/entities-4.4.0.tgz"
+ integrity sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==
+
+error-ex@^1.3.1:
+ version "1.3.2"
+ resolved "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz"
+ integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==
+ dependencies:
+ is-arrayish "^0.2.1"
+
+es-abstract@^1.17.2, es-abstract@^1.19.0, es-abstract@^1.19.1, es-abstract@^1.19.5, es-abstract@^1.20.1, es-abstract@^1.20.4:
+ version "1.20.4"
+ resolved "https://registry.npmmirror.com/es-abstract/-/es-abstract-1.20.4.tgz"
+ integrity sha512-0UtvRN79eMe2L+UNEF1BwRe364sj/DXhQ/k5FmivgoSdpM90b8Jc0mDzKMGo7QS0BVbOP/bTwBKNnDc9rNzaPA==
+ dependencies:
+ call-bind "^1.0.2"
+ es-to-primitive "^1.2.1"
+ function-bind "^1.1.1"
+ function.prototype.name "^1.1.5"
+ get-intrinsic "^1.1.3"
+ get-symbol-description "^1.0.0"
+ has "^1.0.3"
+ has-property-descriptors "^1.0.0"
+ has-symbols "^1.0.3"
+ internal-slot "^1.0.3"
+ is-callable "^1.2.7"
+ is-negative-zero "^2.0.2"
+ is-regex "^1.1.4"
+ is-shared-array-buffer "^1.0.2"
+ is-string "^1.0.7"
+ is-weakref "^1.0.2"
+ object-inspect "^1.12.2"
+ object-keys "^1.1.1"
+ object.assign "^4.1.4"
+ regexp.prototype.flags "^1.4.3"
+ safe-regex-test "^1.0.0"
+ string.prototype.trimend "^1.0.5"
+ string.prototype.trimstart "^1.0.5"
+ unbox-primitive "^1.0.2"
+
+es-array-method-boxes-properly@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz"
+ integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==
+
+es-module-lexer@^1.2.1:
+ version "1.5.4"
+ resolved "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.4.tgz"
+ integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==
+
+es-to-primitive@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz"
+ integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==
+ dependencies:
+ is-callable "^1.1.4"
+ is-date-object "^1.0.1"
+ is-symbol "^1.0.2"
+
+escalade@^3.1.2:
+ version "3.1.2"
+ resolved "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz"
+ integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==
+
+escape-goat@^2.0.0:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/escape-goat/-/escape-goat-2.1.1.tgz"
+ integrity sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==
+
+escape-html@^1.0.3, escape-html@~1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz"
+ integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==
+
+escape-string-regexp@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz"
+ integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
+
+escape-string-regexp@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz"
+ integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==
+
+escape-string-regexp@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz"
+ integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
+
+eslint-scope@5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-5.1.1.tgz"
+ integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
+ dependencies:
+ esrecurse "^4.3.0"
+ estraverse "^4.1.1"
+
+esprima@^4.0.0:
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/esprima/-/esprima-4.0.1.tgz"
+ integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
+
+esrecurse@^4.3.0:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/esrecurse/-/esrecurse-4.3.0.tgz"
+ integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==
+ dependencies:
+ estraverse "^5.2.0"
+
+estraverse@^4.1.1:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/estraverse/-/estraverse-4.3.0.tgz"
+ integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
+
+estraverse@^5.2.0:
+ version "5.3.0"
+ resolved "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz"
+ integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==
+
+esutils@^2.0.2:
+ version "2.0.3"
+ resolved "https://registry.npmmirror.com/esutils/-/esutils-2.0.3.tgz"
+ integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
+
+eta@2.0.0, eta@^1.12.3:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/eta/-/eta-2.0.0.tgz#376865fadebc899e5b6dfce82fae64cbbe47e594"
+ integrity sha512-NqE7S2VmVwgMS8yBxsH4VgNQjNjLq1gfGU0u9I6Cjh468nPRMoDfGdK9n1p/3Dvsw3ebklDkZsFAnKJ9sefjBA==
+
+etag@~1.8.1:
+ version "1.8.1"
+ resolved "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz"
+ integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==
+
+eval@^0.1.8:
+ version "0.1.8"
+ resolved "https://registry.npmmirror.com/eval/-/eval-0.1.8.tgz"
+ integrity sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==
+ dependencies:
+ "@types/node" "*"
+ require-like ">= 0.1.1"
+
+eventemitter3@^4.0.0:
+ version "4.0.7"
+ resolved "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-4.0.7.tgz"
+ integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==
+
+events@^3.2.0:
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/events/-/events-3.3.0.tgz"
+ integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==
+
+execa@^5.0.0:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz"
+ integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==
+ dependencies:
+ cross-spawn "^7.0.3"
+ get-stream "^6.0.0"
+ human-signals "^2.1.0"
+ is-stream "^2.0.0"
+ merge-stream "^2.0.0"
+ npm-run-path "^4.0.1"
+ onetime "^5.1.2"
+ signal-exit "^3.0.3"
+ strip-final-newline "^2.0.0"
+
+express@^4.17.3:
+ version "4.18.2"
+ resolved "https://registry.npmmirror.com/express/-/express-4.18.2.tgz"
+ integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==
+ dependencies:
+ accepts "~1.3.8"
+ array-flatten "1.1.1"
+ body-parser "1.20.1"
+ content-disposition "0.5.4"
+ content-type "~1.0.4"
+ cookie "0.5.0"
+ cookie-signature "1.0.6"
+ debug "2.6.9"
+ depd "2.0.0"
+ encodeurl "~1.0.2"
+ escape-html "~1.0.3"
+ etag "~1.8.1"
+ finalhandler "1.2.0"
+ fresh "0.5.2"
+ http-errors "2.0.0"
+ merge-descriptors "1.0.1"
+ methods "~1.1.2"
+ on-finished "2.4.1"
+ parseurl "~1.3.3"
+ path-to-regexp "0.1.7"
+ proxy-addr "~2.0.7"
+ qs "6.11.0"
+ range-parser "~1.2.1"
+ safe-buffer "5.2.1"
+ send "0.18.0"
+ serve-static "1.15.0"
+ setprototypeof "1.2.0"
+ statuses "2.0.1"
+ type-is "~1.6.18"
+ utils-merge "1.0.1"
+ vary "~1.1.2"
+
+extend-shallow@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-2.0.1.tgz"
+ integrity sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==
+ dependencies:
+ is-extendable "^0.1.0"
+
+extend@^3.0.0:
+ version "3.0.2"
+ resolved "https://registry.npmmirror.com/extend/-/extend-3.0.2.tgz"
+ integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
+
+fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3:
+ version "3.1.3"
+ resolved "https://registry.npmmirror.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz"
+ integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==
+
+fast-glob@^3.2.7, fast-glob@^3.2.9:
+ version "3.2.12"
+ resolved "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.2.12.tgz"
+ integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==
+ dependencies:
+ "@nodelib/fs.stat" "^2.0.2"
+ "@nodelib/fs.walk" "^1.2.3"
+ glob-parent "^5.1.2"
+ merge2 "^1.3.0"
+ micromatch "^4.0.4"
+
+fast-json-stable-stringify@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz"
+ integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
+
+fast-url-parser@1.1.3:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz"
+ integrity sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==
+ dependencies:
+ punycode "^1.3.2"
+
+fastq@^1.6.0:
+ version "1.13.0"
+ resolved "https://registry.npmmirror.com/fastq/-/fastq-1.13.0.tgz"
+ integrity sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==
+ dependencies:
+ reusify "^1.0.4"
+
+faye-websocket@^0.11.3:
+ version "0.11.4"
+ resolved "https://registry.npmmirror.com/faye-websocket/-/faye-websocket-0.11.4.tgz"
+ integrity sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==
+ dependencies:
+ websocket-driver ">=0.5.1"
+
+fbemitter@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/fbemitter/-/fbemitter-3.0.0.tgz"
+ integrity sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==
+ dependencies:
+ fbjs "^3.0.0"
+
+fbjs-css-vars@^1.0.0:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz"
+ integrity sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==
+
+fbjs@^3.0.0, fbjs@^3.0.1:
+ version "3.0.4"
+ resolved "https://registry.npmmirror.com/fbjs/-/fbjs-3.0.4.tgz"
+ integrity sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==
+ dependencies:
+ cross-fetch "^3.1.5"
+ fbjs-css-vars "^1.0.0"
+ loose-envify "^1.0.0"
+ object-assign "^4.1.0"
+ promise "^7.1.1"
+ setimmediate "^1.0.5"
+ ua-parser-js "^0.7.30"
+
+feed@^4.2.2:
+ version "4.2.2"
+ resolved "https://registry.npmmirror.com/feed/-/feed-4.2.2.tgz"
+ integrity sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==
+ dependencies:
+ xml-js "^1.6.11"
+
+file-loader@^6.2.0:
+ version "6.2.0"
+ resolved "https://registry.npmmirror.com/file-loader/-/file-loader-6.2.0.tgz"
+ integrity sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==
+ dependencies:
+ loader-utils "^2.0.0"
+ schema-utils "^3.0.0"
+
+filesize@^6.1.0:
+ version "6.4.0"
+ resolved "https://registry.npmmirror.com/filesize/-/filesize-6.4.0.tgz"
+ integrity sha512-mjFIpOHC4jbfcTfoh4rkWpI31mF7viw9ikj/JyLoKzqlwG/YsefKfvYlYhdYdg/9mtK2z1AzgN/0LvVQ3zdlSQ==
+
+fill-range@^7.0.1:
+ version "7.0.1"
+ resolved "https://registry.npmmirror.com/fill-range/-/fill-range-7.0.1.tgz"
+ integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==
+ dependencies:
+ to-regex-range "^5.0.1"
+
+finalhandler@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/finalhandler/-/finalhandler-1.2.0.tgz"
+ integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==
+ dependencies:
+ debug "2.6.9"
+ encodeurl "~1.0.2"
+ escape-html "~1.0.3"
+ on-finished "2.4.1"
+ parseurl "~1.3.3"
+ statuses "2.0.1"
+ unpipe "~1.0.0"
+
+find-cache-dir@^3.3.1:
+ version "3.3.2"
+ resolved "https://registry.npmmirror.com/find-cache-dir/-/find-cache-dir-3.3.2.tgz"
+ integrity sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==
+ dependencies:
+ commondir "^1.0.1"
+ make-dir "^3.0.2"
+ pkg-dir "^4.1.0"
+
+find-up@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/find-up/-/find-up-3.0.0.tgz"
+ integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==
+ dependencies:
+ locate-path "^3.0.0"
+
+find-up@^4.0.0, find-up@^4.1.0:
+ version "4.1.0"
+ resolved "https://registry.npmmirror.com/find-up/-/find-up-4.1.0.tgz"
+ integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
+ dependencies:
+ locate-path "^5.0.0"
+ path-exists "^4.0.0"
+
+find-up@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmmirror.com/find-up/-/find-up-5.0.0.tgz"
+ integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==
+ dependencies:
+ locate-path "^6.0.0"
+ path-exists "^4.0.0"
+
+flux@^4.0.1:
+ version "4.0.3"
+ resolved "https://registry.npmmirror.com/flux/-/flux-4.0.3.tgz"
+ integrity sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw==
+ dependencies:
+ fbemitter "^3.0.0"
+ fbjs "^3.0.1"
+
+follow-redirects@^1.0.0, follow-redirects@^1.14.7:
+ version "1.15.2"
+ resolved "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.2.tgz"
+ integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
+
+fork-ts-checker-webpack-plugin@^6.0.5:
+ version "6.5.2"
+ resolved "https://registry.npmmirror.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.2.tgz"
+ integrity sha512-m5cUmF30xkZ7h4tWUgTAcEaKmUW7tfyUyTqNNOz7OxWJ0v1VWKTcOvH8FWHUwSjlW/356Ijc9vi3XfcPstpQKA==
+ dependencies:
+ "@babel/code-frame" "^7.8.3"
+ "@types/json-schema" "^7.0.5"
+ chalk "^4.1.0"
+ chokidar "^3.4.2"
+ cosmiconfig "^6.0.0"
+ deepmerge "^4.2.2"
+ fs-extra "^9.0.0"
+ glob "^7.1.6"
+ memfs "^3.1.2"
+ minimatch "^3.0.4"
+ schema-utils "2.7.0"
+ semver "^7.3.2"
+ tapable "^1.0.0"
+
+forwarded@0.2.0:
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz"
+ integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==
+
+fraction.js@^4.2.0:
+ version "4.2.0"
+ resolved "https://registry.npmmirror.com/fraction.js/-/fraction.js-4.2.0.tgz"
+ integrity sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==
+
+fresh@0.5.2:
+ version "0.5.2"
+ resolved "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz"
+ integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==
+
+fs-extra@^10.0.0, fs-extra@^10.1.0:
+ version "10.1.0"
+ resolved "https://registry.npmmirror.com/fs-extra/-/fs-extra-10.1.0.tgz"
+ integrity sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==
+ dependencies:
+ graceful-fs "^4.2.0"
+ jsonfile "^6.0.1"
+ universalify "^2.0.0"
+
+fs-extra@^9.0.0, fs-extra@^9.0.1:
+ version "9.1.0"
+ resolved "https://registry.npmmirror.com/fs-extra/-/fs-extra-9.1.0.tgz"
+ integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
+ dependencies:
+ at-least-node "^1.0.0"
+ graceful-fs "^4.2.0"
+ jsonfile "^6.0.1"
+ universalify "^2.0.0"
+
+fs-monkey@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/fs-monkey/-/fs-monkey-1.0.3.tgz"
+ integrity sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==
+
+fs.realpath@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/fs.realpath/-/fs.realpath-1.0.0.tgz"
+ integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
+
+fsevents@~2.3.2:
+ version "2.3.2"
+ resolved "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.2.tgz"
+ integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
+
+function-bind@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.1.tgz"
+ integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
+
+function.prototype.name@^1.1.5:
+ version "1.1.5"
+ resolved "https://registry.npmmirror.com/function.prototype.name/-/function.prototype.name-1.1.5.tgz"
+ integrity sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.0"
+ functions-have-names "^1.2.2"
+
+functions-have-names@^1.2.2:
+ version "1.2.3"
+ resolved "https://registry.npmmirror.com/functions-have-names/-/functions-have-names-1.2.3.tgz"
+ integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
+
+gensync@^1.0.0-beta.1, gensync@^1.0.0-beta.2:
+ version "1.0.0-beta.2"
+ resolved "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz"
+ integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==
+
+get-intrinsic@^1.0.2, get-intrinsic@^1.1.0, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.1.3.tgz"
+ integrity sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==
+ dependencies:
+ function-bind "^1.1.1"
+ has "^1.0.3"
+ has-symbols "^1.0.3"
+
+get-own-enumerable-property-symbols@^3.0.0:
+ version "3.0.2"
+ resolved "https://registry.npmmirror.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz"
+ integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==
+
+get-stream@^5.1.0:
+ version "5.2.0"
+ resolved "https://registry.npmmirror.com/get-stream/-/get-stream-5.2.0.tgz"
+ integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==
+ dependencies:
+ pump "^3.0.0"
+
+get-stream@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz"
+ integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==
+
+get-symbol-description@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz"
+ integrity sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==
+ dependencies:
+ call-bind "^1.0.2"
+ get-intrinsic "^1.1.1"
+
+github-slugger@^1.4.0:
+ version "1.5.0"
+ resolved "https://registry.npmmirror.com/github-slugger/-/github-slugger-1.5.0.tgz"
+ integrity sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==
+
+glob-parent@^5.1.2, glob-parent@~5.1.2:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz"
+ integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
+ dependencies:
+ is-glob "^4.0.1"
+
+glob-parent@^6.0.1:
+ version "6.0.2"
+ resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz"
+ integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==
+ dependencies:
+ is-glob "^4.0.3"
+
+glob-to-regexp@^0.4.1:
+ version "0.4.1"
+ resolved "https://registry.npmmirror.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz"
+ integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==
+
+glob@^7.0.0, glob@^7.1.3, glob@^7.1.6:
+ version "7.2.3"
+ resolved "https://registry.npmmirror.com/glob/-/glob-7.2.3.tgz"
+ integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
+ dependencies:
+ fs.realpath "^1.0.0"
+ inflight "^1.0.4"
+ inherits "2"
+ minimatch "^3.1.1"
+ once "^1.3.0"
+ path-is-absolute "^1.0.0"
+
+global-dirs@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/global-dirs/-/global-dirs-3.0.0.tgz"
+ integrity sha512-v8ho2DS5RiCjftj1nD9NmnfaOzTdud7RRnVd9kFNOjqZbISlx5DQ+OrTkywgd0dIt7oFCvKetZSHoHcP3sDdiA==
+ dependencies:
+ ini "2.0.0"
+
+global-modules@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/global-modules/-/global-modules-2.0.0.tgz"
+ integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==
+ dependencies:
+ global-prefix "^3.0.0"
+
+global-prefix@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/global-prefix/-/global-prefix-3.0.0.tgz"
+ integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==
+ dependencies:
+ ini "^1.3.5"
+ kind-of "^6.0.2"
+ which "^1.3.1"
+
+globals@^11.1.0:
+ version "11.12.0"
+ resolved "https://registry.npmmirror.com/globals/-/globals-11.12.0.tgz"
+ integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
+
+globby@^11.0.1, globby@^11.0.2, globby@^11.0.3, globby@^11.0.4, globby@^11.1.0:
+ version "11.1.0"
+ resolved "https://registry.npmmirror.com/globby/-/globby-11.1.0.tgz"
+ integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==
+ dependencies:
+ array-union "^2.1.0"
+ dir-glob "^3.0.1"
+ fast-glob "^3.2.9"
+ ignore "^5.2.0"
+ merge2 "^1.4.1"
+ slash "^3.0.0"
+
+got@11.8.5, got@^9.6.0:
+ version "11.8.5"
+ resolved "https://registry.yarnpkg.com/got/-/got-11.8.5.tgz#ce77d045136de56e8f024bebb82ea349bc730046"
+ integrity sha512-o0Je4NvQObAuZPHLFoRSkdG2lTgtcynqymzg2Vupdx6PorhaT5MCbIyXG6d4D94kk8ZG57QeosgdiqfJWhEhlQ==
+ dependencies:
+ "@sindresorhus/is" "^4.0.0"
+ "@szmarczak/http-timer" "^4.0.5"
+ "@types/cacheable-request" "^6.0.1"
+ "@types/responselike" "^1.0.0"
+ cacheable-lookup "^5.0.3"
+ cacheable-request "^7.0.2"
+ decompress-response "^6.0.0"
+ http2-wrapper "^1.0.0-beta.5.2"
+ lowercase-keys "^2.0.0"
+ p-cancelable "^2.0.0"
+ responselike "^2.0.0"
+
+graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.11, graceful-fs@^4.2.4, graceful-fs@^4.2.6:
+ version "4.2.11"
+ resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz"
+ integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
+
+gray-matter@^4.0.3:
+ version "4.0.3"
+ resolved "https://registry.npmmirror.com/gray-matter/-/gray-matter-4.0.3.tgz"
+ integrity sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==
+ dependencies:
+ js-yaml "^3.13.1"
+ kind-of "^6.0.2"
+ section-matter "^1.0.0"
+ strip-bom-string "^1.0.0"
+
+gzip-size@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/gzip-size/-/gzip-size-5.1.1.tgz"
+ integrity sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA==
+ dependencies:
+ duplexer "^0.1.1"
+ pify "^4.0.1"
+
+gzip-size@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/gzip-size/-/gzip-size-6.0.0.tgz"
+ integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==
+ dependencies:
+ duplexer "^0.1.2"
+
+handle-thing@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/handle-thing/-/handle-thing-2.0.1.tgz"
+ integrity sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==
+
+has-bigints@^1.0.1, has-bigints@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/has-bigints/-/has-bigints-1.0.2.tgz"
+ integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==
+
+has-flag@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/has-flag/-/has-flag-3.0.0.tgz"
+ integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==
+
+has-flag@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz"
+ integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
+
+has-property-descriptors@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz"
+ integrity sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==
+ dependencies:
+ get-intrinsic "^1.1.1"
+
+has-symbols@^1.0.1, has-symbols@^1.0.2, has-symbols@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.0.3.tgz"
+ integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
+
+has-tostringtag@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz"
+ integrity sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==
+ dependencies:
+ has-symbols "^1.0.2"
+
+has-yarn@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/has-yarn/-/has-yarn-2.1.0.tgz"
+ integrity sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==
+
+has@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/has/-/has-1.0.3.tgz"
+ integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
+ dependencies:
+ function-bind "^1.1.1"
+
+hast-to-hyperscript@^9.0.0:
+ version "9.0.1"
+ resolved "https://registry.npmmirror.com/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz"
+ integrity sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==
+ dependencies:
+ "@types/unist" "^2.0.3"
+ comma-separated-tokens "^1.0.0"
+ property-information "^5.3.0"
+ space-separated-tokens "^1.0.0"
+ style-to-object "^0.3.0"
+ unist-util-is "^4.0.0"
+ web-namespaces "^1.0.0"
+
+hast-util-from-parse5@^5.0.0:
+ version "5.0.3"
+ resolved "https://registry.npmmirror.com/hast-util-from-parse5/-/hast-util-from-parse5-5.0.3.tgz"
+ integrity sha512-gOc8UB99F6eWVWFtM9jUikjN7QkWxB3nY0df5Z0Zq1/Nkwl5V4hAAsl0tmwlgWl/1shlTF8DnNYLO8X6wRV9pA==
+ dependencies:
+ ccount "^1.0.3"
+ hastscript "^5.0.0"
+ property-information "^5.0.0"
+ web-namespaces "^1.1.2"
+ xtend "^4.0.1"
+
+hast-util-from-parse5@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz"
+ integrity sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==
+ dependencies:
+ "@types/parse5" "^5.0.0"
+ hastscript "^6.0.0"
+ property-information "^5.0.0"
+ vfile "^4.0.0"
+ vfile-location "^3.2.0"
+ web-namespaces "^1.0.0"
+
+hast-util-is-element@1.1.0, hast-util-is-element@^1.0.0:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/hast-util-is-element/-/hast-util-is-element-1.1.0.tgz"
+ integrity sha512-oUmNua0bFbdrD/ELDSSEadRVtWZOf3iF6Lbv81naqsIV99RnSCieTbWuWCY8BAeEfKJTKl0gRdokv+dELutHGQ==
+
+hast-util-parse-selector@^2.0.0:
+ version "2.2.5"
+ resolved "https://registry.npmmirror.com/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz"
+ integrity sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==
+
+hast-util-raw@6.0.1:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/hast-util-raw/-/hast-util-raw-6.0.1.tgz"
+ integrity sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==
+ dependencies:
+ "@types/hast" "^2.0.0"
+ hast-util-from-parse5 "^6.0.0"
+ hast-util-to-parse5 "^6.0.0"
+ html-void-elements "^1.0.0"
+ parse5 "^6.0.0"
+ unist-util-position "^3.0.0"
+ vfile "^4.0.0"
+ web-namespaces "^1.0.0"
+ xtend "^4.0.0"
+ zwitch "^1.0.0"
+
+hast-util-to-parse5@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz"
+ integrity sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==
+ dependencies:
+ hast-to-hyperscript "^9.0.0"
+ property-information "^5.0.0"
+ web-namespaces "^1.0.0"
+ xtend "^4.0.0"
+ zwitch "^1.0.0"
+
+hast-util-to-text@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/hast-util-to-text/-/hast-util-to-text-2.0.1.tgz"
+ integrity sha512-8nsgCARfs6VkwH2jJU9b8LNTuR4700na+0h3PqCaEk4MAnMDeu5P0tP8mjk9LLNGxIeQRLbiDbZVw6rku+pYsQ==
+ dependencies:
+ hast-util-is-element "^1.0.0"
+ repeat-string "^1.0.0"
+ unist-util-find-after "^3.0.0"
+
+hastscript@^5.0.0:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/hastscript/-/hastscript-5.1.2.tgz"
+ integrity sha512-WlztFuK+Lrvi3EggsqOkQ52rKbxkXL3RwB6t5lwoa8QLMemoWfBuL43eDrwOamJyR7uKQKdmKYaBH1NZBiIRrQ==
+ dependencies:
+ comma-separated-tokens "^1.0.0"
+ hast-util-parse-selector "^2.0.0"
+ property-information "^5.0.0"
+ space-separated-tokens "^1.0.0"
+
+hastscript@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/hastscript/-/hastscript-6.0.0.tgz"
+ integrity sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==
+ dependencies:
+ "@types/hast" "^2.0.0"
+ comma-separated-tokens "^1.0.0"
+ hast-util-parse-selector "^2.0.0"
+ property-information "^5.0.0"
+ space-separated-tokens "^1.0.0"
+
+he@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/he/-/he-1.2.0.tgz"
+ integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==
+
+history@^4.9.0:
+ version "4.10.1"
+ resolved "https://registry.npmmirror.com/history/-/history-4.10.1.tgz"
+ integrity sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==
+ dependencies:
+ "@babel/runtime" "^7.1.2"
+ loose-envify "^1.2.0"
+ resolve-pathname "^3.0.0"
+ tiny-invariant "^1.0.2"
+ tiny-warning "^1.0.0"
+ value-equal "^1.0.1"
+
+hoist-non-react-statics@^3.1.0:
+ version "3.3.2"
+ resolved "https://registry.npmmirror.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz"
+ integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==
+ dependencies:
+ react-is "^16.7.0"
+
+hpack.js@^2.1.6:
+ version "2.1.6"
+ resolved "https://registry.npmmirror.com/hpack.js/-/hpack.js-2.1.6.tgz"
+ integrity sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==
+ dependencies:
+ inherits "^2.0.1"
+ obuf "^1.0.0"
+ readable-stream "^2.0.1"
+ wbuf "^1.1.0"
+
+html-entities@^2.3.2:
+ version "2.3.3"
+ resolved "https://registry.npmmirror.com/html-entities/-/html-entities-2.3.3.tgz"
+ integrity sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==
+
+html-minifier-terser@^6.0.2:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz"
+ integrity sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==
+ dependencies:
+ camel-case "^4.1.2"
+ clean-css "^5.2.2"
+ commander "^8.3.0"
+ he "^1.2.0"
+ param-case "^3.0.4"
+ relateurl "^0.2.7"
+ terser "^5.10.0"
+
+html-tags@^3.1.0:
+ version "3.2.0"
+ resolved "https://registry.npmmirror.com/html-tags/-/html-tags-3.2.0.tgz"
+ integrity sha512-vy7ClnArOZwCnqZgvv+ddgHgJiAFXe3Ge9ML5/mBctVJoUoYPCdxVucOywjDARn6CVoh3dRSFdPHy2sX80L0Wg==
+
+html-void-elements@^1.0.0:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/html-void-elements/-/html-void-elements-1.0.5.tgz"
+ integrity sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==
+
+html-webpack-plugin@^5.4.0:
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz"
+ integrity sha512-sy88PC2cRTVxvETRgUHFrL4No3UxvcH8G1NepGhqaTT+GXN2kTamqasot0inS5hXeg1cMbFDt27zzo9p35lZVw==
+ dependencies:
+ "@types/html-minifier-terser" "^6.0.0"
+ html-minifier-terser "^6.0.2"
+ lodash "^4.17.21"
+ pretty-error "^4.0.0"
+ tapable "^2.0.0"
+
+htmlparser2@^6.1.0:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/htmlparser2/-/htmlparser2-6.1.0.tgz"
+ integrity sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==
+ dependencies:
+ domelementtype "^2.0.1"
+ domhandler "^4.0.0"
+ domutils "^2.5.2"
+ entities "^2.0.0"
+
+htmlparser2@^8.0.1:
+ version "8.0.1"
+ resolved "https://registry.npmmirror.com/htmlparser2/-/htmlparser2-8.0.1.tgz"
+ integrity sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA==
+ dependencies:
+ domelementtype "^2.3.0"
+ domhandler "^5.0.2"
+ domutils "^3.0.1"
+ entities "^4.3.0"
+
+http-cache-semantics@4.1.1, http-cache-semantics@^4.0.0:
+ version "4.1.1"
+ resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a"
+ integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==
+
+http-deceiver@^1.2.7:
+ version "1.2.7"
+ resolved "https://registry.npmmirror.com/http-deceiver/-/http-deceiver-1.2.7.tgz"
+ integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==
+
+http-errors@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz"
+ integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==
+ dependencies:
+ depd "2.0.0"
+ inherits "2.0.4"
+ setprototypeof "1.2.0"
+ statuses "2.0.1"
+ toidentifier "1.0.1"
+
+http-errors@~1.6.2:
+ version "1.6.3"
+ resolved "https://registry.npmmirror.com/http-errors/-/http-errors-1.6.3.tgz"
+ integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
+ dependencies:
+ depd "~1.1.2"
+ inherits "2.0.3"
+ setprototypeof "1.1.0"
+ statuses ">= 1.4.0 < 2"
+
+http-parser-js@>=0.5.1:
+ version "0.5.8"
+ resolved "https://registry.npmmirror.com/http-parser-js/-/http-parser-js-0.5.8.tgz"
+ integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==
+
+http-proxy-middleware@^2.0.3:
+ version "2.0.6"
+ resolved "https://registry.npmmirror.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz"
+ integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==
+ dependencies:
+ "@types/http-proxy" "^1.17.8"
+ http-proxy "^1.18.1"
+ is-glob "^4.0.1"
+ is-plain-obj "^3.0.0"
+ micromatch "^4.0.2"
+
+http-proxy@^1.18.1:
+ version "1.18.1"
+ resolved "https://registry.npmmirror.com/http-proxy/-/http-proxy-1.18.1.tgz"
+ integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==
+ dependencies:
+ eventemitter3 "^4.0.0"
+ follow-redirects "^1.0.0"
+ requires-port "^1.0.0"
+
+http2-wrapper@^1.0.0-beta.5.2:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/http2-wrapper/-/http2-wrapper-1.0.3.tgz"
+ integrity sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==
+ dependencies:
+ quick-lru "^5.1.1"
+ resolve-alpn "^1.0.0"
+
+human-signals@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/human-signals/-/human-signals-2.1.0.tgz"
+ integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==
+
+iconv-lite@0.4.24:
+ version "0.4.24"
+ resolved "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz"
+ integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==
+ dependencies:
+ safer-buffer ">= 2.1.2 < 3"
+
+icss-utils@^5.0.0, icss-utils@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/icss-utils/-/icss-utils-5.1.0.tgz"
+ integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==
+
+ignore@^5.2.0:
+ version "5.2.0"
+ resolved "https://registry.npmmirror.com/ignore/-/ignore-5.2.0.tgz"
+ integrity sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==
+
+immediate@^3.2.3:
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/immediate/-/immediate-3.3.0.tgz"
+ integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==
+
+immer@^9.0.6:
+ version "9.0.16"
+ resolved "https://registry.npmmirror.com/immer/-/immer-9.0.16.tgz"
+ integrity sha512-qenGE7CstVm1NrHQbMh8YaSzTZTFNP3zPqr3YU0S0UY441j4bJTg4A2Hh5KAhwgaiU6ZZ1Ar6y/2f4TblnMReQ==
+
+import-fresh@^3.1.0, import-fresh@^3.2.1, import-fresh@^3.2.2, import-fresh@^3.3.0:
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.0.tgz"
+ integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==
+ dependencies:
+ parent-module "^1.0.0"
+ resolve-from "^4.0.0"
+
+import-lazy@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/import-lazy/-/import-lazy-2.1.0.tgz"
+ integrity sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==
+
+imurmurhash@^0.1.4:
+ version "0.1.4"
+ resolved "https://registry.npmmirror.com/imurmurhash/-/imurmurhash-0.1.4.tgz"
+ integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==
+
+indent-string@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/indent-string/-/indent-string-4.0.0.tgz"
+ integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==
+
+infima@0.2.0-alpha.34:
+ version "0.2.0-alpha.34"
+ resolved "https://registry.npmmirror.com/infima/-/infima-0.2.0-alpha.34.tgz"
+ integrity sha512-Na6A2Tl56i1p9dzu7VOAT1Kmu3f5buz63Wvd+D9ZZWL6siQ47L7wkEZUICVKFgc5gERFZVZ/PoPB57Kl++h37Q==
+
+inflight@^1.0.4:
+ version "1.0.6"
+ resolved "https://registry.npmmirror.com/inflight/-/inflight-1.0.6.tgz"
+ integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==
+ dependencies:
+ once "^1.3.0"
+ wrappy "1"
+
+inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz"
+ integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
+
+inherits@2.0.3:
+ version "2.0.3"
+ resolved "https://registry.npmmirror.com/inherits/-/inherits-2.0.3.tgz"
+ integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==
+
+ini@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/ini/-/ini-2.0.0.tgz"
+ integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==
+
+ini@^1.3.5, ini@~1.3.0:
+ version "1.3.8"
+ resolved "https://registry.npmmirror.com/ini/-/ini-1.3.8.tgz"
+ integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
+
+inline-style-parser@0.1.1:
+ version "0.1.1"
+ resolved "https://registry.npmmirror.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz"
+ integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==
+
+internal-slot@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/internal-slot/-/internal-slot-1.0.3.tgz"
+ integrity sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==
+ dependencies:
+ get-intrinsic "^1.1.0"
+ has "^1.0.3"
+ side-channel "^1.0.4"
+
+interpret@^1.0.0:
+ version "1.4.0"
+ resolved "https://registry.npmmirror.com/interpret/-/interpret-1.4.0.tgz"
+ integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==
+
+ipaddr.js@1.9.1:
+ version "1.9.1"
+ resolved "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
+ integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
+
+ipaddr.js@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-2.0.1.tgz"
+ integrity sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==
+
+is-alphabetical@1.0.4, is-alphabetical@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-alphabetical/-/is-alphabetical-1.0.4.tgz"
+ integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==
+
+is-alphanumerical@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz"
+ integrity sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==
+ dependencies:
+ is-alphabetical "^1.0.0"
+ is-decimal "^1.0.0"
+
+is-arrayish@^0.2.1:
+ version "0.2.1"
+ resolved "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.2.1.tgz"
+ integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
+
+is-bigint@^1.0.1:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-bigint/-/is-bigint-1.0.4.tgz"
+ integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==
+ dependencies:
+ has-bigints "^1.0.1"
+
+is-binary-path@~2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-2.1.0.tgz"
+ integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==
+ dependencies:
+ binary-extensions "^2.0.0"
+
+is-boolean-object@^1.1.0:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz"
+ integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==
+ dependencies:
+ call-bind "^1.0.2"
+ has-tostringtag "^1.0.0"
+
+is-buffer@^2.0.0:
+ version "2.0.5"
+ resolved "https://registry.npmmirror.com/is-buffer/-/is-buffer-2.0.5.tgz"
+ integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==
+
+is-callable@^1.1.4, is-callable@^1.2.7:
+ version "1.2.7"
+ resolved "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.7.tgz"
+ integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==
+
+is-ci@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/is-ci/-/is-ci-2.0.0.tgz"
+ integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==
+ dependencies:
+ ci-info "^2.0.0"
+
+is-core-module@^2.9.0:
+ version "2.11.0"
+ resolved "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.11.0.tgz"
+ integrity sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==
+ dependencies:
+ has "^1.0.3"
+
+is-date-object@^1.0.1:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/is-date-object/-/is-date-object-1.0.5.tgz"
+ integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-decimal@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-decimal/-/is-decimal-1.0.4.tgz"
+ integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==
+
+is-docker@^2.0.0, is-docker@^2.1.1:
+ version "2.2.1"
+ resolved "https://registry.npmmirror.com/is-docker/-/is-docker-2.2.1.tgz"
+ integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==
+
+is-extendable@^0.1.0:
+ version "0.1.1"
+ resolved "https://registry.npmmirror.com/is-extendable/-/is-extendable-0.1.1.tgz"
+ integrity sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==
+
+is-extglob@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz"
+ integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==
+
+is-fullwidth-code-point@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz"
+ integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
+
+is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1:
+ version "4.0.3"
+ resolved "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz"
+ integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
+ dependencies:
+ is-extglob "^2.1.1"
+
+is-hexadecimal@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz"
+ integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==
+
+is-installed-globally@^0.4.0:
+ version "0.4.0"
+ resolved "https://registry.npmmirror.com/is-installed-globally/-/is-installed-globally-0.4.0.tgz"
+ integrity sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==
+ dependencies:
+ global-dirs "^3.0.0"
+ is-path-inside "^3.0.2"
+
+is-negative-zero@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.npmmirror.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz"
+ integrity sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==
+
+is-npm@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmmirror.com/is-npm/-/is-npm-5.0.0.tgz"
+ integrity sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==
+
+is-number-object@^1.0.4:
+ version "1.0.7"
+ resolved "https://registry.npmmirror.com/is-number-object/-/is-number-object-1.0.7.tgz"
+ integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-number@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz"
+ integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==
+
+is-obj@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/is-obj/-/is-obj-1.0.1.tgz"
+ integrity sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==
+
+is-obj@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/is-obj/-/is-obj-2.0.0.tgz"
+ integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==
+
+is-path-cwd@^2.2.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/is-path-cwd/-/is-path-cwd-2.2.0.tgz"
+ integrity sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==
+
+is-path-inside@^3.0.2:
+ version "3.0.3"
+ resolved "https://registry.npmmirror.com/is-path-inside/-/is-path-inside-3.0.3.tgz"
+ integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==
+
+is-plain-obj@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz"
+ integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==
+
+is-plain-obj@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-3.0.0.tgz"
+ integrity sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==
+
+is-plain-object@^2.0.4:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-2.0.4.tgz"
+ integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==
+ dependencies:
+ isobject "^3.0.1"
+
+is-regex@^1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/is-regex/-/is-regex-1.1.4.tgz"
+ integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==
+ dependencies:
+ call-bind "^1.0.2"
+ has-tostringtag "^1.0.0"
+
+is-regexp@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/is-regexp/-/is-regexp-1.0.0.tgz"
+ integrity sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==
+
+is-root@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/is-root/-/is-root-2.1.0.tgz"
+ integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==
+
+is-shared-array-buffer@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz"
+ integrity sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==
+ dependencies:
+ call-bind "^1.0.2"
+
+is-stream@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz"
+ integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==
+
+is-string@^1.0.5, is-string@^1.0.7:
+ version "1.0.7"
+ resolved "https://registry.npmmirror.com/is-string/-/is-string-1.0.7.tgz"
+ integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-symbol@^1.0.2, is-symbol@^1.0.3:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-symbol/-/is-symbol-1.0.4.tgz"
+ integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==
+ dependencies:
+ has-symbols "^1.0.2"
+
+is-typedarray@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/is-typedarray/-/is-typedarray-1.0.0.tgz"
+ integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==
+
+is-weakref@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/is-weakref/-/is-weakref-1.0.2.tgz"
+ integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==
+ dependencies:
+ call-bind "^1.0.2"
+
+is-whitespace-character@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz"
+ integrity sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==
+
+is-word-character@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/is-word-character/-/is-word-character-1.0.4.tgz"
+ integrity sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==
+
+is-wsl@^2.1.1, is-wsl@^2.2.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/is-wsl/-/is-wsl-2.2.0.tgz"
+ integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==
+ dependencies:
+ is-docker "^2.0.0"
+
+is-yarn-global@^0.3.0:
+ version "0.3.0"
+ resolved "https://registry.npmmirror.com/is-yarn-global/-/is-yarn-global-0.3.0.tgz"
+ integrity sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==
+
+isarray@0.0.1:
+ version "0.0.1"
+ resolved "https://registry.npmmirror.com/isarray/-/isarray-0.0.1.tgz"
+ integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
+
+isarray@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/isarray/-/isarray-1.0.0.tgz"
+ integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
+
+isexe@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz"
+ integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==
+
+isobject@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/isobject/-/isobject-3.0.1.tgz"
+ integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==
+
+jest-worker@^27.0.2, jest-worker@^27.4.5:
+ version "27.5.1"
+ resolved "https://registry.npmmirror.com/jest-worker/-/jest-worker-27.5.1.tgz"
+ integrity sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==
+ dependencies:
+ "@types/node" "*"
+ merge-stream "^2.0.0"
+ supports-color "^8.0.0"
+
+joi@^17.4.2, joi@^17.6.0:
+ version "17.7.0"
+ resolved "https://registry.npmmirror.com/joi/-/joi-17.7.0.tgz"
+ integrity sha512-1/ugc8djfn93rTE3WRKdCzGGt/EtiYKxITMO4Wiv6q5JL1gl9ePt4kBsl1S499nbosspfctIQTpYIhSmHA3WAg==
+ dependencies:
+ "@hapi/hoek" "^9.0.0"
+ "@hapi/topo" "^5.0.0"
+ "@sideway/address" "^4.1.3"
+ "@sideway/formula" "^3.0.0"
+ "@sideway/pinpoint" "^2.0.0"
+
+"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz"
+ integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
+
+js-yaml@^3.13.1:
+ version "3.14.1"
+ resolved "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz"
+ integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
+ dependencies:
+ argparse "^1.0.7"
+ esprima "^4.0.0"
+
+js-yaml@^4.0.0, js-yaml@^4.1.0:
+ version "4.1.0"
+ resolved "https://registry.npmmirror.com/js-yaml/-/js-yaml-4.1.0.tgz"
+ integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==
+ dependencies:
+ argparse "^2.0.1"
+
+jsesc@^2.5.1:
+ version "2.5.2"
+ resolved "https://registry.npmmirror.com/jsesc/-/jsesc-2.5.2.tgz"
+ integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
+
+jsesc@~0.5.0:
+ version "0.5.0"
+ resolved "https://registry.npmmirror.com/jsesc/-/jsesc-0.5.0.tgz"
+ integrity sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==
+
+json-buffer@3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/json-buffer/-/json-buffer-3.0.1.tgz"
+ integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
+
+json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1:
+ version "2.3.1"
+ resolved "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz"
+ integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==
+
+json-schema-traverse@^0.4.1:
+ version "0.4.1"
+ resolved "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz"
+ integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==
+
+json-schema-traverse@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz"
+ integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==
+
+json5@^2.1.2, json5@^2.2.1:
+ version "2.2.3"
+ resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz"
+ integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==
+
+jsonfile@^6.0.1:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/jsonfile/-/jsonfile-6.1.0.tgz"
+ integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==
+ dependencies:
+ universalify "^2.0.0"
+ optionalDependencies:
+ graceful-fs "^4.1.6"
+
+katex@^0.12.0:
+ version "0.12.0"
+ resolved "https://registry.npmmirror.com/katex/-/katex-0.12.0.tgz"
+ integrity sha512-y+8btoc/CK70XqcHqjxiGWBOeIL8upbS0peTPXTvgrh21n1RiWWcIpSWM+4uXq+IAgNh9YYQWdc7LVDPDAEEAg==
+ dependencies:
+ commander "^2.19.0"
+
+keyv@*, keyv@^4.0.0:
+ version "4.5.0"
+ resolved "https://registry.npmmirror.com/keyv/-/keyv-4.5.0.tgz"
+ integrity sha512-2YvuMsA+jnFGtBareKqgANOEKe1mk3HKiXu2fRmAfyxG0MJAywNhi5ttWA3PMjl4NmpyjZNbFifR2vNjW1znfA==
+ dependencies:
+ json-buffer "3.0.1"
+
+kind-of@^6.0.0, kind-of@^6.0.2:
+ version "6.0.3"
+ resolved "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz"
+ integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==
+
+klaw-sync@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/klaw-sync/-/klaw-sync-6.0.0.tgz"
+ integrity sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==
+ dependencies:
+ graceful-fs "^4.1.11"
+
+kleur@^3.0.3:
+ version "3.0.3"
+ resolved "https://registry.npmmirror.com/kleur/-/kleur-3.0.3.tgz"
+ integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==
+
+klona@^2.0.5:
+ version "2.0.5"
+ resolved "https://registry.npmmirror.com/klona/-/klona-2.0.5.tgz"
+ integrity sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==
+
+latest-version@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/latest-version/-/latest-version-5.1.0.tgz"
+ integrity sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==
+ dependencies:
+ package-json "^6.3.0"
+
+leven@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/leven/-/leven-3.1.0.tgz"
+ integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==
+
+lilconfig@^2.0.3:
+ version "2.0.6"
+ resolved "https://registry.npmmirror.com/lilconfig/-/lilconfig-2.0.6.tgz"
+ integrity sha512-9JROoBW7pobfsx+Sq2JsASvCo6Pfo6WWoUW79HuB1BCoBXD4PLWJPqDF6fNj67pqBYTbAHkE57M1kS/+L1neOg==
+
+lines-and-columns@^1.1.6:
+ version "1.2.4"
+ resolved "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz"
+ integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==
+
+loader-runner@^4.2.0:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/loader-runner/-/loader-runner-4.3.0.tgz"
+ integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==
+
+loader-utils@2.0.4, loader-utils@^2.0.0:
+ version "2.0.4"
+ resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.4.tgz#8b5cb38b5c34a9a018ee1fc0e6a066d1dfcc528c"
+ integrity sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==
+ dependencies:
+ big.js "^5.2.2"
+ emojis-list "^3.0.0"
+ json5 "^2.1.2"
+
+locate-path@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/locate-path/-/locate-path-3.0.0.tgz"
+ integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==
+ dependencies:
+ p-locate "^3.0.0"
+ path-exists "^3.0.0"
+
+locate-path@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmmirror.com/locate-path/-/locate-path-5.0.0.tgz"
+ integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==
+ dependencies:
+ p-locate "^4.1.0"
+
+locate-path@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/locate-path/-/locate-path-6.0.0.tgz"
+ integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==
+ dependencies:
+ p-locate "^5.0.0"
+
+lodash.curry@^4.0.1:
+ version "4.1.1"
+ resolved "https://registry.npmmirror.com/lodash.curry/-/lodash.curry-4.1.1.tgz"
+ integrity sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==
+
+lodash.debounce@^4.0.8:
+ version "4.0.8"
+ resolved "https://registry.npmmirror.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz"
+ integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
+
+lodash.flow@^3.3.0:
+ version "3.5.0"
+ resolved "https://registry.npmmirror.com/lodash.flow/-/lodash.flow-3.5.0.tgz"
+ integrity sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==
+
+lodash.memoize@^4.1.2:
+ version "4.1.2"
+ resolved "https://registry.npmmirror.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz"
+ integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==
+
+lodash.uniq@4.5.0, lodash.uniq@^4.5.0:
+ version "4.5.0"
+ resolved "https://registry.npmmirror.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz"
+ integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==
+
+lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21:
+ version "4.17.21"
+ resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz"
+ integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
+
+loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz"
+ integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==
+ dependencies:
+ js-tokens "^3.0.0 || ^4.0.0"
+
+lower-case@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.npmmirror.com/lower-case/-/lower-case-2.0.2.tgz"
+ integrity sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==
+ dependencies:
+ tslib "^2.0.3"
+
+lowercase-keys@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz"
+ integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==
+
+lru-cache@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz"
+ integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==
+ dependencies:
+ yallist "^4.0.0"
+
+lunr-languages@^1.4.0:
+ version "1.10.0"
+ resolved "https://registry.npmmirror.com/lunr-languages/-/lunr-languages-1.10.0.tgz"
+ integrity sha512-BBjKKcwrieJlzwwc9M5H/MRXGJ2qyOSDx/NXYiwkuKjiLOOoouh0WsDzeqcLoUWcX31y7i8sb8IgsZKObdUCkw==
+
+lunr@^2.3.9:
+ version "2.3.9"
+ resolved "https://registry.npmmirror.com/lunr/-/lunr-2.3.9.tgz"
+ integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==
+
+magic-string@^0.25.3:
+ version "0.25.9"
+ resolved "https://registry.npmmirror.com/magic-string/-/magic-string-0.25.9.tgz"
+ integrity sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==
+ dependencies:
+ sourcemap-codec "^1.4.8"
+
+make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/make-dir/-/make-dir-3.1.0.tgz"
+ integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==
+ dependencies:
+ semver "^6.0.0"
+
+mark.js@^8.11.1:
+ version "8.11.1"
+ resolved "https://registry.npmmirror.com/mark.js/-/mark.js-8.11.1.tgz"
+ integrity sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==
+
+markdown-escapes@^1.0.0:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/markdown-escapes/-/markdown-escapes-1.0.4.tgz"
+ integrity sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==
+
+mdast-squeeze-paragraphs@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz"
+ integrity sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==
+ dependencies:
+ unist-util-remove "^2.0.0"
+
+mdast-util-definitions@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz"
+ integrity sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==
+ dependencies:
+ unist-util-visit "^2.0.0"
+
+mdast-util-to-hast@10.0.1:
+ version "10.0.1"
+ resolved "https://registry.npmmirror.com/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz"
+ integrity sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==
+ dependencies:
+ "@types/mdast" "^3.0.0"
+ "@types/unist" "^2.0.0"
+ mdast-util-definitions "^4.0.0"
+ mdurl "^1.0.0"
+ unist-builder "^2.0.0"
+ unist-util-generated "^1.0.0"
+ unist-util-position "^3.0.0"
+ unist-util-visit "^2.0.0"
+
+mdast-util-to-string@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz"
+ integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==
+
+mdn-data@2.0.14:
+ version "2.0.14"
+ resolved "https://registry.npmmirror.com/mdn-data/-/mdn-data-2.0.14.tgz"
+ integrity sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==
+
+mdn-data@2.0.4:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/mdn-data/-/mdn-data-2.0.4.tgz"
+ integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==
+
+mdurl@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/mdurl/-/mdurl-1.0.1.tgz"
+ integrity sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==
+
+media-typer@0.3.0:
+ version "0.3.0"
+ resolved "https://registry.npmmirror.com/media-typer/-/media-typer-0.3.0.tgz"
+ integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==
+
+memfs@^3.1.2, memfs@^3.4.3:
+ version "3.4.10"
+ resolved "https://registry.npmmirror.com/memfs/-/memfs-3.4.10.tgz"
+ integrity sha512-0bCUP+L79P4am30yP1msPzApwuMQG23TjwlwdHeEV5MxioDR1a0AgB0T9FfggU52eJuDCq8WVwb5ekznFyWiTQ==
+ dependencies:
+ fs-monkey "^1.0.3"
+
+merge-descriptors@1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz"
+ integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==
+
+merge-stream@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/merge-stream/-/merge-stream-2.0.0.tgz"
+ integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==
+
+merge2@^1.3.0, merge2@^1.4.1:
+ version "1.4.1"
+ resolved "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz"
+ integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
+
+methods@~1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz"
+ integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==
+
+micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5:
+ version "4.0.5"
+ resolved "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.5.tgz"
+ integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==
+ dependencies:
+ braces "^3.0.2"
+ picomatch "^2.3.1"
+
+mime-db@1.52.0, "mime-db@>= 1.43.0 < 2":
+ version "1.52.0"
+ resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz"
+ integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+
+mime-db@~1.33.0:
+ version "1.33.0"
+ resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.33.0.tgz"
+ integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==
+
+mime-types@2.1.18, mime-types@~2.1.17:
+ version "2.1.18"
+ resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.18.tgz"
+ integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==
+ dependencies:
+ mime-db "~1.33.0"
+
+mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.24, mime-types@~2.1.34:
+ version "2.1.35"
+ resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
+mime@1.6.0:
+ version "1.6.0"
+ resolved "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz"
+ integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==
+
+mimic-fn@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/mimic-fn/-/mimic-fn-2.1.0.tgz"
+ integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==
+
+mimic-response@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/mimic-response/-/mimic-response-1.0.1.tgz"
+ integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==
+
+mimic-response@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/mimic-response/-/mimic-response-3.1.0.tgz"
+ integrity sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==
+
+mini-css-extract-plugin@^1.6.0:
+ version "1.6.2"
+ resolved "https://registry.npmmirror.com/mini-css-extract-plugin/-/mini-css-extract-plugin-1.6.2.tgz"
+ integrity sha512-WhDvO3SjGm40oV5y26GjMJYjd2UMqrLAGKy5YS2/3QKJy2F7jgynuHTir/tgUUOiNQu5saXHdc8reo7YuhhT4Q==
+ dependencies:
+ loader-utils "^2.0.0"
+ schema-utils "^3.0.0"
+ webpack-sources "^1.1.0"
+
+minimalistic-assert@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz"
+ integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
+
+minimatch@3.0.5, minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1:
+ version "3.0.5"
+ resolved "https://registry.npmmirror.com/minimatch/-/minimatch-3.0.5.tgz"
+ integrity sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==
+ dependencies:
+ brace-expansion "^1.1.7"
+
+minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6:
+ version "1.2.7"
+ resolved "https://registry.npmmirror.com/minimist/-/minimist-1.2.7.tgz"
+ integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==
+
+mkdirp@~0.5.1:
+ version "0.5.6"
+ resolved "https://registry.npmmirror.com/mkdirp/-/mkdirp-0.5.6.tgz"
+ integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
+ dependencies:
+ minimist "^1.2.6"
+
+mrmime@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/mrmime/-/mrmime-1.0.1.tgz"
+ integrity sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==
+
+ms@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz"
+ integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==
+
+ms@2.1.2:
+ version "2.1.2"
+ resolved "https://registry.npmmirror.com/ms/-/ms-2.1.2.tgz"
+ integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
+
+ms@2.1.3:
+ version "2.1.3"
+ resolved "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz"
+ integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
+
+multicast-dns@^7.2.5:
+ version "7.2.5"
+ resolved "https://registry.npmmirror.com/multicast-dns/-/multicast-dns-7.2.5.tgz"
+ integrity sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==
+ dependencies:
+ dns-packet "^5.2.2"
+ thunky "^1.0.2"
+
+nanoid@^3.3.6:
+ version "3.3.6"
+ resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz"
+ integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==
+
+negotiator@0.6.3:
+ version "0.6.3"
+ resolved "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz"
+ integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==
+
+neo-async@^2.6.2:
+ version "2.6.2"
+ resolved "https://registry.npmmirror.com/neo-async/-/neo-async-2.6.2.tgz"
+ integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==
+
+no-case@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmmirror.com/no-case/-/no-case-3.0.4.tgz"
+ integrity sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==
+ dependencies:
+ lower-case "^2.0.2"
+ tslib "^2.0.3"
+
+node-emoji@^1.10.0:
+ version "1.11.0"
+ resolved "https://registry.npmmirror.com/node-emoji/-/node-emoji-1.11.0.tgz"
+ integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==
+ dependencies:
+ lodash "^4.17.21"
+
+node-fetch@2.6.7:
+ version "2.6.7"
+ resolved "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.6.7.tgz"
+ integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==
+ dependencies:
+ whatwg-url "^5.0.0"
+
+node-forge@1.3.0, node-forge@^1:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.0.tgz#37a874ea723855f37db091e6c186e5b67a01d4b2"
+ integrity sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA==
+
+node-releases@^2.0.18:
+ version "2.0.18"
+ resolved "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz"
+ integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==
+
+normalize-path@^3.0.0, normalize-path@~3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz"
+ integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==
+
+normalize-range@^0.1.2:
+ version "0.1.2"
+ resolved "https://registry.npmmirror.com/normalize-range/-/normalize-range-0.1.2.tgz"
+ integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==
+
+normalize-url@^6.0.1:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/normalize-url/-/normalize-url-6.1.0.tgz"
+ integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==
+
+npm-run-path@^4.0.1:
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz"
+ integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==
+ dependencies:
+ path-key "^3.0.0"
+
+nprogress@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/nprogress/-/nprogress-0.2.0.tgz"
+ integrity sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==
+
+nth-check@2.0.1, nth-check@^1.0.2, nth-check@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.0.1.tgz#2efe162f5c3da06a28959fbd3db75dbeea9f0fc2"
+ integrity sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==
+ dependencies:
+ boolbase "^1.0.0"
+
+object-assign@^4.1.0, object-assign@^4.1.1:
+ version "4.1.1"
+ resolved "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz"
+ integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
+
+object-inspect@^1.12.2, object-inspect@^1.9.0:
+ version "1.12.2"
+ resolved "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.12.2.tgz"
+ integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==
+
+object-keys@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/object-keys/-/object-keys-1.1.1.tgz"
+ integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
+
+object.assign@^4.1.0, object.assign@^4.1.4:
+ version "4.1.4"
+ resolved "https://registry.npmmirror.com/object.assign/-/object.assign-4.1.4.tgz"
+ integrity sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ has-symbols "^1.0.3"
+ object-keys "^1.1.1"
+
+object.getownpropertydescriptors@^2.1.0:
+ version "2.1.4"
+ resolved "https://registry.npmmirror.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.4.tgz"
+ integrity sha512-sccv3L/pMModT6dJAYF3fzGMVcb38ysQ0tEE6ixv2yXJDtEIPph268OlAdJj5/qZMZDq2g/jqvwppt36uS/uQQ==
+ dependencies:
+ array.prototype.reduce "^1.0.4"
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.20.1"
+
+object.values@^1.1.0:
+ version "1.1.5"
+ resolved "https://registry.npmmirror.com/object.values/-/object.values-1.1.5.tgz"
+ integrity sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.1"
+
+obuf@^1.0.0, obuf@^1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/obuf/-/obuf-1.1.2.tgz"
+ integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==
+
+on-finished@2.4.1:
+ version "2.4.1"
+ resolved "https://registry.npmmirror.com/on-finished/-/on-finished-2.4.1.tgz"
+ integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==
+ dependencies:
+ ee-first "1.1.1"
+
+on-headers@~1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/on-headers/-/on-headers-1.0.2.tgz"
+ integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==
+
+once@^1.3.0, once@^1.3.1, once@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.npmmirror.com/once/-/once-1.4.0.tgz"
+ integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==
+ dependencies:
+ wrappy "1"
+
+onetime@^5.1.2:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/onetime/-/onetime-5.1.2.tgz"
+ integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==
+ dependencies:
+ mimic-fn "^2.1.0"
+
+open@^7.0.2:
+ version "7.4.2"
+ resolved "https://registry.npmmirror.com/open/-/open-7.4.2.tgz"
+ integrity sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==
+ dependencies:
+ is-docker "^2.0.0"
+ is-wsl "^2.1.1"
+
+open@^8.0.9:
+ version "8.4.0"
+ resolved "https://registry.npmmirror.com/open/-/open-8.4.0.tgz"
+ integrity sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==
+ dependencies:
+ define-lazy-prop "^2.0.0"
+ is-docker "^2.1.1"
+ is-wsl "^2.2.0"
+
+opener@^1.5.2:
+ version "1.5.2"
+ resolved "https://registry.npmmirror.com/opener/-/opener-1.5.2.tgz"
+ integrity sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==
+
+p-cancelable@^2.0.0:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/p-cancelable/-/p-cancelable-2.1.1.tgz"
+ integrity sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==
+
+p-limit@^2.0.0, p-limit@^2.2.0:
+ version "2.3.0"
+ resolved "https://registry.npmmirror.com/p-limit/-/p-limit-2.3.0.tgz"
+ integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==
+ dependencies:
+ p-try "^2.0.0"
+
+p-limit@^3.0.2:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz"
+ integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==
+ dependencies:
+ yocto-queue "^0.1.0"
+
+p-locate@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/p-locate/-/p-locate-3.0.0.tgz"
+ integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==
+ dependencies:
+ p-limit "^2.0.0"
+
+p-locate@^4.1.0:
+ version "4.1.0"
+ resolved "https://registry.npmmirror.com/p-locate/-/p-locate-4.1.0.tgz"
+ integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==
+ dependencies:
+ p-limit "^2.2.0"
+
+p-locate@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmmirror.com/p-locate/-/p-locate-5.0.0.tgz"
+ integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==
+ dependencies:
+ p-limit "^3.0.2"
+
+p-map@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/p-map/-/p-map-4.0.0.tgz"
+ integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==
+ dependencies:
+ aggregate-error "^3.0.0"
+
+p-retry@^4.5.0:
+ version "4.6.2"
+ resolved "https://registry.npmmirror.com/p-retry/-/p-retry-4.6.2.tgz"
+ integrity sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==
+ dependencies:
+ "@types/retry" "0.12.0"
+ retry "^0.13.1"
+
+p-try@^2.0.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz"
+ integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
+
+package-json@^6.3.0:
+ version "6.5.0"
+ resolved "https://registry.npmmirror.com/package-json/-/package-json-6.5.0.tgz"
+ integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==
+ dependencies:
+ got "^9.6.0"
+ registry-auth-token "^4.0.0"
+ registry-url "^5.0.0"
+ semver "^6.2.0"
+
+param-case@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmmirror.com/param-case/-/param-case-3.0.4.tgz"
+ integrity sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==
+ dependencies:
+ dot-case "^3.0.4"
+ tslib "^2.0.3"
+
+parent-module@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz"
+ integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==
+ dependencies:
+ callsites "^3.0.0"
+
+parse-entities@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/parse-entities/-/parse-entities-2.0.0.tgz"
+ integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==
+ dependencies:
+ character-entities "^1.0.0"
+ character-entities-legacy "^1.0.0"
+ character-reference-invalid "^1.0.0"
+ is-alphanumerical "^1.0.0"
+ is-decimal "^1.0.0"
+ is-hexadecimal "^1.0.0"
+
+parse-json@^5.0.0:
+ version "5.2.0"
+ resolved "https://registry.npmmirror.com/parse-json/-/parse-json-5.2.0.tgz"
+ integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==
+ dependencies:
+ "@babel/code-frame" "^7.0.0"
+ error-ex "^1.3.1"
+ json-parse-even-better-errors "^2.3.0"
+ lines-and-columns "^1.1.6"
+
+parse-numeric-range@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.npmmirror.com/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz"
+ integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==
+
+parse5-htmlparser2-tree-adapter@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.npmmirror.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz"
+ integrity sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==
+ dependencies:
+ domhandler "^5.0.2"
+ parse5 "^7.0.0"
+
+parse5@^5.0.0:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/parse5/-/parse5-5.1.1.tgz"
+ integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==
+
+parse5@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/parse5/-/parse5-6.0.1.tgz"
+ integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==
+
+parse5@^7.0.0:
+ version "7.1.1"
+ resolved "https://registry.npmmirror.com/parse5/-/parse5-7.1.1.tgz"
+ integrity sha512-kwpuwzB+px5WUg9pyK0IcK/shltJN5/OVhQagxhCQNtT9Y9QRZqNY2e1cmbu/paRh5LMnz/oVTVLBpjFmMZhSg==
+ dependencies:
+ entities "^4.4.0"
+
+parseurl@~1.3.2, parseurl@~1.3.3:
+ version "1.3.3"
+ resolved "https://registry.npmmirror.com/parseurl/-/parseurl-1.3.3.tgz"
+ integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
+
+pascal-case@^3.1.2:
+ version "3.1.2"
+ resolved "https://registry.npmmirror.com/pascal-case/-/pascal-case-3.1.2.tgz"
+ integrity sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==
+ dependencies:
+ no-case "^3.0.4"
+ tslib "^2.0.3"
+
+path-exists@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/path-exists/-/path-exists-3.0.0.tgz"
+ integrity sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==
+
+path-exists@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/path-exists/-/path-exists-4.0.0.tgz"
+ integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==
+
+path-is-absolute@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz"
+ integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==
+
+path-is-inside@1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/path-is-inside/-/path-is-inside-1.0.2.tgz"
+ integrity sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==
+
+path-key@^3.0.0, path-key@^3.1.0:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz"
+ integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
+
+path-parse@^1.0.7:
+ version "1.0.7"
+ resolved "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz"
+ integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
+
+path-to-regexp@0.1.7:
+ version "0.1.7"
+ resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz"
+ integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==
+
+path-to-regexp@2.2.1:
+ version "2.2.1"
+ resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-2.2.1.tgz"
+ integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==
+
+path-to-regexp@^1.7.0:
+ version "1.8.0"
+ resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz"
+ integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
+ dependencies:
+ isarray "0.0.1"
+
+path-type@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz"
+ integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==
+
+picocolors@^1.0.0, picocolors@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz"
+ integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
+
+picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1:
+ version "2.3.1"
+ resolved "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.1.tgz"
+ integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
+
+pify@^4.0.1:
+ version "4.0.1"
+ resolved "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz"
+ integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==
+
+pkg-dir@^4.1.0:
+ version "4.2.0"
+ resolved "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-4.2.0.tgz"
+ integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==
+ dependencies:
+ find-up "^4.0.0"
+
+pkg-up@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/pkg-up/-/pkg-up-3.1.0.tgz"
+ integrity sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==
+ dependencies:
+ find-up "^3.0.0"
+
+postcss-calc@^8.2.3:
+ version "8.2.4"
+ resolved "https://registry.npmmirror.com/postcss-calc/-/postcss-calc-8.2.4.tgz"
+ integrity sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==
+ dependencies:
+ postcss-selector-parser "^6.0.9"
+ postcss-value-parser "^4.2.0"
+
+postcss-colormin@^5.3.0:
+ version "5.3.0"
+ resolved "https://registry.npmmirror.com/postcss-colormin/-/postcss-colormin-5.3.0.tgz"
+ integrity sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg==
+ dependencies:
+ browserslist "^4.16.6"
+ caniuse-api "^3.0.0"
+ colord "^2.9.1"
+ postcss-value-parser "^4.2.0"
+
+postcss-convert-values@^5.1.3:
+ version "5.1.3"
+ resolved "https://registry.npmmirror.com/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz"
+ integrity sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==
+ dependencies:
+ browserslist "^4.21.4"
+ postcss-value-parser "^4.2.0"
+
+postcss-discard-comments@^5.1.2:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz"
+ integrity sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==
+
+postcss-discard-duplicates@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz"
+ integrity sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==
+
+postcss-discard-empty@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz"
+ integrity sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==
+
+postcss-discard-overridden@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz"
+ integrity sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==
+
+postcss-discard-unused@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz"
+ integrity sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==
+ dependencies:
+ postcss-selector-parser "^6.0.5"
+
+postcss-loader@^6.1.1:
+ version "6.2.1"
+ resolved "https://registry.npmmirror.com/postcss-loader/-/postcss-loader-6.2.1.tgz"
+ integrity sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==
+ dependencies:
+ cosmiconfig "^7.0.0"
+ klona "^2.0.5"
+ semver "^7.3.5"
+
+postcss-merge-idents@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz"
+ integrity sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==
+ dependencies:
+ cssnano-utils "^3.1.0"
+ postcss-value-parser "^4.2.0"
+
+postcss-merge-longhand@^5.1.7:
+ version "5.1.7"
+ resolved "https://registry.npmmirror.com/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz"
+ integrity sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+ stylehacks "^5.1.1"
+
+postcss-merge-rules@^5.1.3:
+ version "5.1.3"
+ resolved "https://registry.npmmirror.com/postcss-merge-rules/-/postcss-merge-rules-5.1.3.tgz"
+ integrity sha512-LbLd7uFC00vpOuMvyZop8+vvhnfRGpp2S+IMQKeuOZZapPRY4SMq5ErjQeHbHsjCUgJkRNrlU+LmxsKIqPKQlA==
+ dependencies:
+ browserslist "^4.21.4"
+ caniuse-api "^3.0.0"
+ cssnano-utils "^3.1.0"
+ postcss-selector-parser "^6.0.5"
+
+postcss-minify-font-values@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz"
+ integrity sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-minify-gradients@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz"
+ integrity sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==
+ dependencies:
+ colord "^2.9.1"
+ cssnano-utils "^3.1.0"
+ postcss-value-parser "^4.2.0"
+
+postcss-minify-params@^5.1.4:
+ version "5.1.4"
+ resolved "https://registry.npmmirror.com/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz"
+ integrity sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==
+ dependencies:
+ browserslist "^4.21.4"
+ cssnano-utils "^3.1.0"
+ postcss-value-parser "^4.2.0"
+
+postcss-minify-selectors@^5.2.1:
+ version "5.2.1"
+ resolved "https://registry.npmmirror.com/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz"
+ integrity sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==
+ dependencies:
+ postcss-selector-parser "^6.0.5"
+
+postcss-modules-extract-imports@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz"
+ integrity sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==
+
+postcss-modules-local-by-default@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz"
+ integrity sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==
+ dependencies:
+ icss-utils "^5.0.0"
+ postcss-selector-parser "^6.0.2"
+ postcss-value-parser "^4.1.0"
+
+postcss-modules-scope@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz"
+ integrity sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==
+ dependencies:
+ postcss-selector-parser "^6.0.4"
+
+postcss-modules-values@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz"
+ integrity sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==
+ dependencies:
+ icss-utils "^5.0.0"
+
+postcss-normalize-charset@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz"
+ integrity sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==
+
+postcss-normalize-display-values@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz"
+ integrity sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-positions@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz"
+ integrity sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-repeat-style@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz"
+ integrity sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-string@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz"
+ integrity sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-timing-functions@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz"
+ integrity sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-unicode@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz"
+ integrity sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==
+ dependencies:
+ browserslist "^4.21.4"
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-url@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz"
+ integrity sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==
+ dependencies:
+ normalize-url "^6.0.1"
+ postcss-value-parser "^4.2.0"
+
+postcss-normalize-whitespace@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz"
+ integrity sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-ordered-values@^5.1.3:
+ version "5.1.3"
+ resolved "https://registry.npmmirror.com/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz"
+ integrity sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==
+ dependencies:
+ cssnano-utils "^3.1.0"
+ postcss-value-parser "^4.2.0"
+
+postcss-reduce-idents@^5.2.0:
+ version "5.2.0"
+ resolved "https://registry.npmmirror.com/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz"
+ integrity sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-reduce-initial@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-reduce-initial/-/postcss-reduce-initial-5.1.1.tgz"
+ integrity sha512-//jeDqWcHPuXGZLoolFrUXBDyuEGbr9S2rMo19bkTIjBQ4PqkaO+oI8wua5BOUxpfi97i3PCoInsiFIEBfkm9w==
+ dependencies:
+ browserslist "^4.21.4"
+ caniuse-api "^3.0.0"
+
+postcss-reduce-transforms@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz"
+ integrity sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+
+postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4, postcss-selector-parser@^6.0.5, postcss-selector-parser@^6.0.9:
+ version "6.0.10"
+ resolved "https://registry.npmmirror.com/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz"
+ integrity sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==
+ dependencies:
+ cssesc "^3.0.0"
+ util-deprecate "^1.0.2"
+
+postcss-sort-media-queries@^4.1.0:
+ version "4.3.0"
+ resolved "https://registry.npmmirror.com/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz"
+ integrity sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg==
+ dependencies:
+ sort-css-media-queries "2.1.0"
+
+postcss-svgo@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-svgo/-/postcss-svgo-5.1.0.tgz"
+ integrity sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==
+ dependencies:
+ postcss-value-parser "^4.2.0"
+ svgo "^2.7.0"
+
+postcss-unique-selectors@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz"
+ integrity sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==
+ dependencies:
+ postcss-selector-parser "^6.0.5"
+
+postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0:
+ version "4.2.0"
+ resolved "https://registry.npmmirror.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz"
+ integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==
+
+postcss-zindex@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/postcss-zindex/-/postcss-zindex-5.1.0.tgz"
+ integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==
+
+postcss@^8.2.15, postcss@^8.3.11, postcss@^8.3.5, postcss@^8.3.7:
+ version "8.4.31"
+ resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz"
+ integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==
+ dependencies:
+ nanoid "^3.3.6"
+ picocolors "^1.0.0"
+ source-map-js "^1.0.2"
+
+pretty-error@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/pretty-error/-/pretty-error-4.0.0.tgz"
+ integrity sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==
+ dependencies:
+ lodash "^4.17.20"
+ renderkid "^3.0.0"
+
+pretty-time@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/pretty-time/-/pretty-time-1.1.0.tgz"
+ integrity sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==
+
+prism-react-renderer@^1.2.1:
+ version "1.3.5"
+ resolved "https://registry.npmmirror.com/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz"
+ integrity sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==
+
+prismjs@^1.23.0:
+ version "1.29.0"
+ resolved "https://registry.npmmirror.com/prismjs/-/prismjs-1.29.0.tgz"
+ integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==
+
+process-nextick-args@~2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz"
+ integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==
+
+promise@^7.1.1:
+ version "7.3.1"
+ resolved "https://registry.npmmirror.com/promise/-/promise-7.3.1.tgz"
+ integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==
+ dependencies:
+ asap "~2.0.3"
+
+prompts@^2.4.0, prompts@^2.4.1:
+ version "2.4.2"
+ resolved "https://registry.npmmirror.com/prompts/-/prompts-2.4.2.tgz"
+ integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==
+ dependencies:
+ kleur "^3.0.3"
+ sisteransi "^1.0.5"
+
+prop-types@^15.6.2, prop-types@^15.7.2:
+ version "15.8.1"
+ resolved "https://registry.npmmirror.com/prop-types/-/prop-types-15.8.1.tgz"
+ integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==
+ dependencies:
+ loose-envify "^1.4.0"
+ object-assign "^4.1.1"
+ react-is "^16.13.1"
+
+property-information@^5.0.0, property-information@^5.3.0:
+ version "5.6.0"
+ resolved "https://registry.npmmirror.com/property-information/-/property-information-5.6.0.tgz"
+ integrity sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==
+ dependencies:
+ xtend "^4.0.0"
+
+proxy-addr@~2.0.7:
+ version "2.0.7"
+ resolved "https://registry.npmmirror.com/proxy-addr/-/proxy-addr-2.0.7.tgz"
+ integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==
+ dependencies:
+ forwarded "0.2.0"
+ ipaddr.js "1.9.1"
+
+pump@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/pump/-/pump-3.0.0.tgz"
+ integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==
+ dependencies:
+ end-of-stream "^1.1.0"
+ once "^1.3.1"
+
+punycode@^1.3.2:
+ version "1.4.1"
+ resolved "https://registry.npmmirror.com/punycode/-/punycode-1.4.1.tgz"
+ integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==
+
+punycode@^2.1.0:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/punycode/-/punycode-2.1.1.tgz"
+ integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
+
+pupa@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/pupa/-/pupa-2.1.1.tgz"
+ integrity sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==
+ dependencies:
+ escape-goat "^2.0.0"
+
+pure-color@^1.2.0:
+ version "1.3.0"
+ resolved "https://registry.npmmirror.com/pure-color/-/pure-color-1.3.0.tgz"
+ integrity sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==
+
+q@^1.1.2:
+ version "1.5.1"
+ resolved "https://registry.npmmirror.com/q/-/q-1.5.1.tgz"
+ integrity sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==
+
+qs@6.11.0:
+ version "6.11.0"
+ resolved "https://registry.npmmirror.com/qs/-/qs-6.11.0.tgz"
+ integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==
+ dependencies:
+ side-channel "^1.0.4"
+
+querystring@0.2.0:
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/querystring/-/querystring-0.2.0.tgz"
+ integrity sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g==
+
+queue-microtask@^1.2.2:
+ version "1.2.3"
+ resolved "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz"
+ integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
+
+quick-lru@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/quick-lru/-/quick-lru-5.1.1.tgz"
+ integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==
+
+randombytes@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/randombytes/-/randombytes-2.1.0.tgz"
+ integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
+ dependencies:
+ safe-buffer "^5.1.0"
+
+range-parser@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.0.tgz"
+ integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==
+
+range-parser@^1.2.1, range-parser@~1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz"
+ integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
+
+raw-body@2.5.1:
+ version "2.5.1"
+ resolved "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.1.tgz"
+ integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==
+ dependencies:
+ bytes "3.1.2"
+ http-errors "2.0.0"
+ iconv-lite "0.4.24"
+ unpipe "1.0.0"
+
+rc@1.2.8, rc@^1.2.8:
+ version "1.2.8"
+ resolved "https://registry.npmmirror.com/rc/-/rc-1.2.8.tgz"
+ integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==
+ dependencies:
+ deep-extend "^0.6.0"
+ ini "~1.3.0"
+ minimist "^1.2.0"
+ strip-json-comments "~2.0.1"
+
+react-base16-styling@^0.6.0:
+ version "0.6.0"
+ resolved "https://registry.npmmirror.com/react-base16-styling/-/react-base16-styling-0.6.0.tgz"
+ integrity sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==
+ dependencies:
+ base16 "^1.0.0"
+ lodash.curry "^4.0.1"
+ lodash.flow "^3.3.0"
+ pure-color "^1.2.0"
+
+react-dev-utils@12.0.0-next.47:
+ version "12.0.0-next.47"
+ resolved "https://registry.npmmirror.com/react-dev-utils/-/react-dev-utils-12.0.0-next.47.tgz"
+ integrity sha512-PsE71vP15TZMmp/RZKOJC4fYD5Pvt0+wCoyG3QHclto0d4FyIJI78xGRICOOThZFROqgXYlZP6ddmeybm+jO4w==
+ dependencies:
+ "@babel/code-frame" "^7.10.4"
+ address "^1.1.2"
+ browserslist "^4.16.5"
+ chalk "^2.4.2"
+ cross-spawn "^7.0.3"
+ detect-port-alt "^1.1.6"
+ escape-string-regexp "^2.0.0"
+ filesize "^6.1.0"
+ find-up "^4.1.0"
+ fork-ts-checker-webpack-plugin "^6.0.5"
+ global-modules "^2.0.0"
+ globby "^11.0.1"
+ gzip-size "^5.1.1"
+ immer "^9.0.6"
+ is-root "^2.1.0"
+ loader-utils "^2.0.0"
+ open "^7.0.2"
+ pkg-up "^3.1.0"
+ prompts "^2.4.0"
+ react-error-overlay "7.0.0-next.54+1465357b"
+ recursive-readdir "^2.2.2"
+ shell-quote "^1.7.2"
+ strip-ansi "^6.0.0"
+ text-table "^0.2.0"
+
+react-dom@^17.0.1:
+ version "17.0.2"
+ resolved "https://registry.npmmirror.com/react-dom/-/react-dom-17.0.2.tgz"
+ integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==
+ dependencies:
+ loose-envify "^1.1.0"
+ object-assign "^4.1.1"
+ scheduler "^0.20.2"
+
+react-error-overlay@7.0.0-next.54+1465357b:
+ version "7.0.0-next.54"
+ resolved "https://registry.npmmirror.com/react-error-overlay/-/react-error-overlay-7.0.0-next.54.tgz"
+ integrity sha512-b96CiTnZahXPDNH9MKplvt5+jD+BkxDw7q5R3jnkUXze/ux1pLv32BBZmlj0OfCUeMqyz4sAmF+0ccJGVMlpXw==
+
+react-error-overlay@^6.0.9:
+ version "6.0.11"
+ resolved "https://registry.npmmirror.com/react-error-overlay/-/react-error-overlay-6.0.11.tgz"
+ integrity sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==
+
+react-fast-compare@^3.1.1:
+ version "3.2.0"
+ resolved "https://registry.npmmirror.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz"
+ integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==
+
+react-helmet@^6.1.0:
+ version "6.1.0"
+ resolved "https://registry.npmmirror.com/react-helmet/-/react-helmet-6.1.0.tgz"
+ integrity sha512-4uMzEY9nlDlgxr61NL3XbKRy1hEkXmKNXhjbAIOVw5vcFrsdYbH2FEwcNyWvWinl103nXgzYNlns9ca+8kFiWw==
+ dependencies:
+ object-assign "^4.1.1"
+ prop-types "^15.7.2"
+ react-fast-compare "^3.1.1"
+ react-side-effect "^2.1.0"
+
+react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0:
+ version "16.13.1"
+ resolved "https://registry.npmmirror.com/react-is/-/react-is-16.13.1.tgz"
+ integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
+
+react-json-view@^1.21.3:
+ version "1.21.3"
+ resolved "https://registry.npmmirror.com/react-json-view/-/react-json-view-1.21.3.tgz"
+ integrity sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==
+ dependencies:
+ flux "^4.0.1"
+ react-base16-styling "^0.6.0"
+ react-lifecycles-compat "^3.0.4"
+ react-textarea-autosize "^8.3.2"
+
+react-lifecycles-compat@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmmirror.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz"
+ integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==
+
+react-loadable-ssr-addon-v5-slorber@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz"
+ integrity sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==
+ dependencies:
+ "@babel/runtime" "^7.10.3"
+
+"react-loadable@npm:@docusaurus/react-loadable@5.5.2":
+ version "5.5.2"
+ resolved "https://registry.npmmirror.com/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz"
+ integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==
+ dependencies:
+ "@types/react" "*"
+ prop-types "^15.6.2"
+
+react-router-config@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/react-router-config/-/react-router-config-5.1.1.tgz"
+ integrity sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==
+ dependencies:
+ "@babel/runtime" "^7.1.2"
+
+react-router-dom@^5.2.0:
+ version "5.3.4"
+ resolved "https://registry.npmmirror.com/react-router-dom/-/react-router-dom-5.3.4.tgz"
+ integrity sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==
+ dependencies:
+ "@babel/runtime" "^7.12.13"
+ history "^4.9.0"
+ loose-envify "^1.3.1"
+ prop-types "^15.6.2"
+ react-router "5.3.4"
+ tiny-invariant "^1.0.2"
+ tiny-warning "^1.0.0"
+
+react-router@5.3.4, react-router@^5.2.0:
+ version "5.3.4"
+ resolved "https://registry.npmmirror.com/react-router/-/react-router-5.3.4.tgz"
+ integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==
+ dependencies:
+ "@babel/runtime" "^7.12.13"
+ history "^4.9.0"
+ hoist-non-react-statics "^3.1.0"
+ loose-envify "^1.3.1"
+ path-to-regexp "^1.7.0"
+ prop-types "^15.6.2"
+ react-is "^16.6.0"
+ tiny-invariant "^1.0.2"
+ tiny-warning "^1.0.0"
+
+react-side-effect@^2.1.0:
+ version "2.1.2"
+ resolved "https://registry.npmmirror.com/react-side-effect/-/react-side-effect-2.1.2.tgz"
+ integrity sha512-PVjOcvVOyIILrYoyGEpDN3vmYNLdy1CajSFNt4TDsVQC5KpTijDvWVoR+/7Rz2xT978D8/ZtFceXxzsPwZEDvw==
+
+react-textarea-autosize@^8.3.2:
+ version "8.3.4"
+ resolved "https://registry.npmmirror.com/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz"
+ integrity sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==
+ dependencies:
+ "@babel/runtime" "^7.10.2"
+ use-composed-ref "^1.3.0"
+ use-latest "^1.2.1"
+
+react@^17.0.1:
+ version "17.0.2"
+ resolved "https://registry.npmmirror.com/react/-/react-17.0.2.tgz"
+ integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==
+ dependencies:
+ loose-envify "^1.1.0"
+ object-assign "^4.1.1"
+
+readable-stream@^2.0.1:
+ version "2.3.7"
+ resolved "https://registry.npmmirror.com/readable-stream/-/readable-stream-2.3.7.tgz"
+ integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
+readable-stream@^3.0.6:
+ version "3.6.0"
+ resolved "https://registry.npmmirror.com/readable-stream/-/readable-stream-3.6.0.tgz"
+ integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==
+ dependencies:
+ inherits "^2.0.3"
+ string_decoder "^1.1.1"
+ util-deprecate "^1.0.1"
+
+readdirp@~3.6.0:
+ version "3.6.0"
+ resolved "https://registry.npmmirror.com/readdirp/-/readdirp-3.6.0.tgz"
+ integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==
+ dependencies:
+ picomatch "^2.2.1"
+
+reading-time@^1.5.0:
+ version "1.5.0"
+ resolved "https://registry.npmmirror.com/reading-time/-/reading-time-1.5.0.tgz"
+ integrity sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==
+
+rechoir@^0.6.2:
+ version "0.6.2"
+ resolved "https://registry.npmmirror.com/rechoir/-/rechoir-0.6.2.tgz"
+ integrity sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==
+ dependencies:
+ resolve "^1.1.6"
+
+recursive-readdir@^2.2.2:
+ version "2.2.3"
+ resolved "https://registry.npmmirror.com/recursive-readdir/-/recursive-readdir-2.2.3.tgz"
+ integrity sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==
+ dependencies:
+ minimatch "^3.0.5"
+
+regenerate-unicode-properties@^10.1.0:
+ version "10.1.0"
+ resolved "https://registry.npmmirror.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz"
+ integrity sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==
+ dependencies:
+ regenerate "^1.4.2"
+
+regenerate-unicode-properties@^9.0.0:
+ version "9.0.0"
+ resolved "https://registry.npmmirror.com/regenerate-unicode-properties/-/regenerate-unicode-properties-9.0.0.tgz"
+ integrity sha512-3E12UeNSPfjrgwjkR81m5J7Aw/T55Tu7nUyZVQYCKEOs+2dkxEY+DpPtZzO4YruuiPb7NkYLVcyJC4+zCbk5pA==
+ dependencies:
+ regenerate "^1.4.2"
+
+regenerate@^1.4.2:
+ version "1.4.2"
+ resolved "https://registry.npmmirror.com/regenerate/-/regenerate-1.4.2.tgz"
+ integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==
+
+regenerator-runtime@^0.13.10:
+ version "0.13.10"
+ resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.10.tgz"
+ integrity sha512-KepLsg4dU12hryUO7bp/axHAKvwGOCV0sGloQtpagJ12ai+ojVDqkeGSiRX1zlq+kjIMZ1t7gpze+26QqtdGqw==
+
+regenerator-transform@^0.15.0:
+ version "0.15.0"
+ resolved "https://registry.npmmirror.com/regenerator-transform/-/regenerator-transform-0.15.0.tgz"
+ integrity sha512-LsrGtPmbYg19bcPHwdtmXwbW+TqNvtY4riE3P83foeHRroMbH6/2ddFBfab3t7kbzc7v7p4wbkIecHImqt0QNg==
+ dependencies:
+ "@babel/runtime" "^7.8.4"
+
+regexp.prototype.flags@^1.4.3:
+ version "1.4.3"
+ resolved "https://registry.npmmirror.com/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz"
+ integrity sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ functions-have-names "^1.2.2"
+
+regexpu-core@^4.5.4:
+ version "4.8.0"
+ resolved "https://registry.npmmirror.com/regexpu-core/-/regexpu-core-4.8.0.tgz"
+ integrity sha512-1F6bYsoYiz6is+oz70NWur2Vlh9KWtswuRuzJOfeYUrfPX2o8n74AnUVaOGDbUqVGO9fNHu48/pjJO4sNVwsOg==
+ dependencies:
+ regenerate "^1.4.2"
+ regenerate-unicode-properties "^9.0.0"
+ regjsgen "^0.5.2"
+ regjsparser "^0.7.0"
+ unicode-match-property-ecmascript "^2.0.0"
+ unicode-match-property-value-ecmascript "^2.0.0"
+
+regexpu-core@^5.1.0:
+ version "5.2.1"
+ resolved "https://registry.npmmirror.com/regexpu-core/-/regexpu-core-5.2.1.tgz"
+ integrity sha512-HrnlNtpvqP1Xkb28tMhBUO2EbyUHdQlsnlAhzWcwHy8WJR53UWr7/MAvqrsQKMbV4qdpv03oTMG8iIhfsPFktQ==
+ dependencies:
+ regenerate "^1.4.2"
+ regenerate-unicode-properties "^10.1.0"
+ regjsgen "^0.7.1"
+ regjsparser "^0.9.1"
+ unicode-match-property-ecmascript "^2.0.0"
+ unicode-match-property-value-ecmascript "^2.0.0"
+
+registry-auth-token@^4.0.0:
+ version "4.2.2"
+ resolved "https://registry.npmmirror.com/registry-auth-token/-/registry-auth-token-4.2.2.tgz"
+ integrity sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==
+ dependencies:
+ rc "1.2.8"
+
+registry-url@^5.0.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/registry-url/-/registry-url-5.1.0.tgz"
+ integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==
+ dependencies:
+ rc "^1.2.8"
+
+regjsgen@^0.5.2:
+ version "0.5.2"
+ resolved "https://registry.npmmirror.com/regjsgen/-/regjsgen-0.5.2.tgz"
+ integrity sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==
+
+regjsgen@^0.7.1:
+ version "0.7.1"
+ resolved "https://registry.npmmirror.com/regjsgen/-/regjsgen-0.7.1.tgz"
+ integrity sha512-RAt+8H2ZEzHeYWxZ3H2z6tF18zyyOnlcdaafLrm21Bguj7uZy6ULibiAFdXEtKQY4Sy7wDTwDiOazasMLc4KPA==
+
+regjsparser@^0.7.0:
+ version "0.7.0"
+ resolved "https://registry.npmmirror.com/regjsparser/-/regjsparser-0.7.0.tgz"
+ integrity sha512-A4pcaORqmNMDVwUjWoTzuhwMGpP+NykpfqAsEgI1FSH/EzC7lrN5TMd+kN8YCovX+jMpu8eaqXgXPCa0g8FQNQ==
+ dependencies:
+ jsesc "~0.5.0"
+
+regjsparser@^0.9.1:
+ version "0.9.1"
+ resolved "https://registry.npmmirror.com/regjsparser/-/regjsparser-0.9.1.tgz"
+ integrity sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==
+ dependencies:
+ jsesc "~0.5.0"
+
+rehype-katex@4:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/rehype-katex/-/rehype-katex-4.0.0.tgz"
+ integrity sha512-0mgBqYugQyIW0eUl6RDOZ28Cat2YzrnWGaYgKCMQnJw6ClmKgLqXBnkDAPGh2mwxvkkKwQOUMUpSLpA5rt7rzA==
+ dependencies:
+ "@types/katex" "^0.11.0"
+ hast-util-to-text "^2.0.0"
+ katex "^0.12.0"
+ rehype-parse "^7.0.0"
+ unified "^9.0.0"
+ unist-util-visit "^2.0.0"
+
+rehype-parse@^6.0.2:
+ version "6.0.2"
+ resolved "https://registry.npmmirror.com/rehype-parse/-/rehype-parse-6.0.2.tgz"
+ integrity sha512-0S3CpvpTAgGmnz8kiCyFLGuW5yA4OQhyNTm/nwPopZ7+PI11WnGl1TTWTGv/2hPEe/g2jRLlhVVSsoDH8waRug==
+ dependencies:
+ hast-util-from-parse5 "^5.0.0"
+ parse5 "^5.0.0"
+ xtend "^4.0.0"
+
+rehype-parse@^7.0.0:
+ version "7.0.1"
+ resolved "https://registry.npmmirror.com/rehype-parse/-/rehype-parse-7.0.1.tgz"
+ integrity sha512-fOiR9a9xH+Le19i4fGzIEowAbwG7idy2Jzs4mOrFWBSJ0sNUgy0ev871dwWnbOo371SjgjG4pwzrbgSVrKxecw==
+ dependencies:
+ hast-util-from-parse5 "^6.0.0"
+ parse5 "^6.0.0"
+
+relateurl@^0.2.7:
+ version "0.2.7"
+ resolved "https://registry.npmmirror.com/relateurl/-/relateurl-0.2.7.tgz"
+ integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==
+
+remark-admonitions@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/remark-admonitions/-/remark-admonitions-1.2.1.tgz"
+ integrity sha512-Ji6p68VDvD+H1oS95Fdx9Ar5WA2wcDA4kwrrhVU7fGctC6+d3uiMICu7w7/2Xld+lnU7/gi+432+rRbup5S8ow==
+ dependencies:
+ rehype-parse "^6.0.2"
+ unified "^8.4.2"
+ unist-util-visit "^2.0.1"
+
+remark-emoji@^2.1.0:
+ version "2.2.0"
+ resolved "https://registry.npmmirror.com/remark-emoji/-/remark-emoji-2.2.0.tgz"
+ integrity sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==
+ dependencies:
+ emoticon "^3.2.0"
+ node-emoji "^1.10.0"
+ unist-util-visit "^2.0.3"
+
+remark-footnotes@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/remark-footnotes/-/remark-footnotes-2.0.0.tgz"
+ integrity sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==
+
+remark-math@3:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/remark-math/-/remark-math-3.0.1.tgz"
+ integrity sha512-epT77R/HK0x7NqrWHdSV75uNLwn8g9qTyMqCRCDujL0vj/6T6+yhdrR7mjELWtkse+Fw02kijAaBuVcHBor1+Q==
+
+remark-mdx-remove-exports@^1.6.22:
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/remark-mdx-remove-exports/-/remark-mdx-remove-exports-1.6.22.tgz"
+ integrity sha512-7g2uiTmTGfz5QyVb+toeX25frbk1Y6yd03RXGPtqx0+DVh86Gb7MkNYbk7H2X27zdZ3CQv1W/JqlFO0Oo8IxVA==
+ dependencies:
+ unist-util-remove "2.0.0"
+
+remark-mdx-remove-imports@^1.6.22:
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/remark-mdx-remove-imports/-/remark-mdx-remove-imports-1.6.22.tgz"
+ integrity sha512-lmjAXD8Ltw0TsvBzb45S+Dxx7LTJAtDaMneMAv8LAUIPEyYoKkmGbmVsiF0/pY6mhM1Q16swCmu1TN+ie/vn/A==
+ dependencies:
+ unist-util-remove "2.0.0"
+
+remark-mdx@1.6.22:
+ version "1.6.22"
+ resolved "https://registry.npmmirror.com/remark-mdx/-/remark-mdx-1.6.22.tgz"
+ integrity sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==
+ dependencies:
+ "@babel/core" "7.12.9"
+ "@babel/helper-plugin-utils" "7.10.4"
+ "@babel/plugin-proposal-object-rest-spread" "7.12.1"
+ "@babel/plugin-syntax-jsx" "7.12.1"
+ "@mdx-js/util" "1.6.22"
+ is-alphabetical "1.0.4"
+ remark-parse "8.0.3"
+ unified "9.2.0"
+
+remark-parse@8.0.3:
+ version "8.0.3"
+ resolved "https://registry.npmmirror.com/remark-parse/-/remark-parse-8.0.3.tgz"
+ integrity sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==
+ dependencies:
+ ccount "^1.0.0"
+ collapse-white-space "^1.0.2"
+ is-alphabetical "^1.0.0"
+ is-decimal "^1.0.0"
+ is-whitespace-character "^1.0.0"
+ is-word-character "^1.0.0"
+ markdown-escapes "^1.0.0"
+ parse-entities "^2.0.0"
+ repeat-string "^1.5.4"
+ state-toggle "^1.0.0"
+ trim "0.0.1"
+ trim-trailing-lines "^1.0.0"
+ unherit "^1.0.4"
+ unist-util-remove-position "^2.0.0"
+ vfile-location "^3.0.0"
+ xtend "^4.0.1"
+
+remark-squeeze-paragraphs@4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz"
+ integrity sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==
+ dependencies:
+ mdast-squeeze-paragraphs "^4.0.0"
+
+renderkid@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/renderkid/-/renderkid-3.0.0.tgz"
+ integrity sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==
+ dependencies:
+ css-select "^4.1.3"
+ dom-converter "^0.2.0"
+ htmlparser2 "^6.1.0"
+ lodash "^4.17.21"
+ strip-ansi "^6.0.1"
+
+repeat-string@^1.0.0, repeat-string@^1.5.4:
+ version "1.6.1"
+ resolved "https://registry.npmmirror.com/repeat-string/-/repeat-string-1.6.1.tgz"
+ integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==
+
+require-from-string@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.npmmirror.com/require-from-string/-/require-from-string-2.0.2.tgz"
+ integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==
+
+"require-like@>= 0.1.1":
+ version "0.1.2"
+ resolved "https://registry.npmmirror.com/require-like/-/require-like-0.1.2.tgz"
+ integrity sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==
+
+requires-port@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/requires-port/-/requires-port-1.0.0.tgz"
+ integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==
+
+resolve-alpn@^1.0.0:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/resolve-alpn/-/resolve-alpn-1.2.1.tgz"
+ integrity sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==
+
+resolve-from@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/resolve-from/-/resolve-from-4.0.0.tgz"
+ integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==
+
+resolve-pathname@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz"
+ integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==
+
+resolve@^1.1.6, resolve@^1.14.2, resolve@^1.3.2:
+ version "1.22.1"
+ resolved "https://registry.npmmirror.com/resolve/-/resolve-1.22.1.tgz"
+ integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==
+ dependencies:
+ is-core-module "^2.9.0"
+ path-parse "^1.0.7"
+ supports-preserve-symlinks-flag "^1.0.0"
+
+responselike@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/responselike/-/responselike-2.0.1.tgz"
+ integrity sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==
+ dependencies:
+ lowercase-keys "^2.0.0"
+
+retry@^0.13.1:
+ version "0.13.1"
+ resolved "https://registry.npmmirror.com/retry/-/retry-0.13.1.tgz"
+ integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==
+
+reusify@^1.0.4:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/reusify/-/reusify-1.0.4.tgz"
+ integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
+
+rimraf@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.npmmirror.com/rimraf/-/rimraf-3.0.2.tgz"
+ integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
+ dependencies:
+ glob "^7.1.3"
+
+rtl-detect@^1.0.4:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/rtl-detect/-/rtl-detect-1.0.4.tgz"
+ integrity sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==
+
+rtlcss@^3.3.0:
+ version "3.5.0"
+ resolved "https://registry.npmmirror.com/rtlcss/-/rtlcss-3.5.0.tgz"
+ integrity sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A==
+ dependencies:
+ find-up "^5.0.0"
+ picocolors "^1.0.0"
+ postcss "^8.3.11"
+ strip-json-comments "^3.1.1"
+
+run-parallel@^1.1.9:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz"
+ integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==
+ dependencies:
+ queue-microtask "^1.2.2"
+
+rxjs@^7.5.4:
+ version "7.5.7"
+ resolved "https://registry.npmmirror.com/rxjs/-/rxjs-7.5.7.tgz"
+ integrity sha512-z9MzKh/UcOqB3i20H6rtrlaE/CgjLOvheWK/9ILrbhROGTweAi1BaFsTT9FbwZi5Trr1qNRs+MXkhmR06awzQA==
+ dependencies:
+ tslib "^2.1.0"
+
+safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
+ version "5.1.2"
+ resolved "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.1.2.tgz"
+ integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
+
+safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0:
+ version "5.2.1"
+ resolved "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz"
+ integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
+
+safe-regex-test@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/safe-regex-test/-/safe-regex-test-1.0.0.tgz"
+ integrity sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==
+ dependencies:
+ call-bind "^1.0.2"
+ get-intrinsic "^1.1.3"
+ is-regex "^1.1.4"
+
+"safer-buffer@>= 2.1.2 < 3":
+ version "2.1.2"
+ resolved "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz"
+ integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
+
+sax@^1.2.4, sax@~1.2.4:
+ version "1.2.4"
+ resolved "https://registry.npmmirror.com/sax/-/sax-1.2.4.tgz"
+ integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
+
+scheduler@^0.20.2:
+ version "0.20.2"
+ resolved "https://registry.npmmirror.com/scheduler/-/scheduler-0.20.2.tgz"
+ integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==
+ dependencies:
+ loose-envify "^1.1.0"
+ object-assign "^4.1.1"
+
+schema-utils@2.7.0:
+ version "2.7.0"
+ resolved "https://registry.npmmirror.com/schema-utils/-/schema-utils-2.7.0.tgz"
+ integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
+ dependencies:
+ "@types/json-schema" "^7.0.4"
+ ajv "^6.12.2"
+ ajv-keywords "^3.4.1"
+
+schema-utils@^2.6.5:
+ version "2.7.1"
+ resolved "https://registry.npmmirror.com/schema-utils/-/schema-utils-2.7.1.tgz"
+ integrity sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==
+ dependencies:
+ "@types/json-schema" "^7.0.5"
+ ajv "^6.12.4"
+ ajv-keywords "^3.5.2"
+
+schema-utils@^3.0.0, schema-utils@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.1.1.tgz"
+ integrity sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==
+ dependencies:
+ "@types/json-schema" "^7.0.8"
+ ajv "^6.12.5"
+ ajv-keywords "^3.5.2"
+
+schema-utils@^3.2.0:
+ version "3.3.0"
+ resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
+ integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
+ dependencies:
+ "@types/json-schema" "^7.0.8"
+ ajv "^6.12.5"
+ ajv-keywords "^3.5.2"
+
+schema-utils@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.0.0.tgz"
+ integrity sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==
+ dependencies:
+ "@types/json-schema" "^7.0.9"
+ ajv "^8.8.0"
+ ajv-formats "^2.1.1"
+ ajv-keywords "^5.0.0"
+
+section-matter@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/section-matter/-/section-matter-1.0.0.tgz"
+ integrity sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==
+ dependencies:
+ extend-shallow "^2.0.1"
+ kind-of "^6.0.0"
+
+select-hose@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/select-hose/-/select-hose-2.0.0.tgz"
+ integrity sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==
+
+selfsigned@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.npmmirror.com/selfsigned/-/selfsigned-2.1.1.tgz"
+ integrity sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==
+ dependencies:
+ node-forge "^1"
+
+semver-diff@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/semver-diff/-/semver-diff-3.1.1.tgz"
+ integrity sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==
+ dependencies:
+ semver "^6.3.0"
+
+semver@^5.4.1:
+ version "5.7.2"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
+ integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
+
+semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.2.0, semver@^6.3.0:
+ version "6.3.1"
+ resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz"
+ integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
+
+semver@^7.3.2, semver@^7.3.4, semver@^7.3.5:
+ version "7.5.4"
+ resolved "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz"
+ integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==
+ dependencies:
+ lru-cache "^6.0.0"
+
+send@0.18.0:
+ version "0.18.0"
+ resolved "https://registry.npmmirror.com/send/-/send-0.18.0.tgz"
+ integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==
+ dependencies:
+ debug "2.6.9"
+ depd "2.0.0"
+ destroy "1.2.0"
+ encodeurl "~1.0.2"
+ escape-html "~1.0.3"
+ etag "~1.8.1"
+ fresh "0.5.2"
+ http-errors "2.0.0"
+ mime "1.6.0"
+ ms "2.1.3"
+ on-finished "2.4.1"
+ range-parser "~1.2.1"
+ statuses "2.0.1"
+
+serialize-javascript@^6.0.0, serialize-javascript@^6.0.1:
+ version "6.0.2"
+ resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz"
+ integrity sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==
+ dependencies:
+ randombytes "^2.1.0"
+
+serve-handler@^6.1.3:
+ version "6.1.5"
+ resolved "https://registry.npmmirror.com/serve-handler/-/serve-handler-6.1.5.tgz"
+ integrity sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==
+ dependencies:
+ bytes "3.0.0"
+ content-disposition "0.5.2"
+ fast-url-parser "1.1.3"
+ mime-types "2.1.18"
+ minimatch "3.1.2"
+ path-is-inside "1.0.2"
+ path-to-regexp "2.2.1"
+ range-parser "1.2.0"
+
+serve-index@^1.9.1:
+ version "1.9.1"
+ resolved "https://registry.npmmirror.com/serve-index/-/serve-index-1.9.1.tgz"
+ integrity sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==
+ dependencies:
+ accepts "~1.3.4"
+ batch "0.6.1"
+ debug "2.6.9"
+ escape-html "~1.0.3"
+ http-errors "~1.6.2"
+ mime-types "~2.1.17"
+ parseurl "~1.3.2"
+
+serve-static@1.15.0:
+ version "1.15.0"
+ resolved "https://registry.npmmirror.com/serve-static/-/serve-static-1.15.0.tgz"
+ integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==
+ dependencies:
+ encodeurl "~1.0.2"
+ escape-html "~1.0.3"
+ parseurl "~1.3.3"
+ send "0.18.0"
+
+setimmediate@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/setimmediate/-/setimmediate-1.0.5.tgz"
+ integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==
+
+setprototypeof@1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.1.0.tgz"
+ integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==
+
+setprototypeof@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.2.0.tgz"
+ integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==
+
+shallow-clone@^3.0.0:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/shallow-clone/-/shallow-clone-3.0.1.tgz"
+ integrity sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==
+ dependencies:
+ kind-of "^6.0.2"
+
+shebang-command@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz"
+ integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==
+ dependencies:
+ shebang-regex "^3.0.0"
+
+shebang-regex@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz"
+ integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
+
+shell-quote@^1.7.2:
+ version "1.7.4"
+ resolved "https://registry.npmmirror.com/shell-quote/-/shell-quote-1.7.4.tgz"
+ integrity sha512-8o/QEhSSRb1a5i7TFR0iM4G16Z0vYB2OQVs4G3aAFXjn3T6yEx8AZxy1PgDF7I00LZHYA3WxaSYIf5e5sAX8Rw==
+
+shelljs@^0.8.4, shelljs@^0.8.5:
+ version "0.8.5"
+ resolved "https://registry.npmmirror.com/shelljs/-/shelljs-0.8.5.tgz"
+ integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==
+ dependencies:
+ glob "^7.0.0"
+ interpret "^1.0.0"
+ rechoir "^0.6.2"
+
+side-channel@^1.0.4:
+ version "1.0.4"
+ resolved "https://registry.npmmirror.com/side-channel/-/side-channel-1.0.4.tgz"
+ integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==
+ dependencies:
+ call-bind "^1.0.0"
+ get-intrinsic "^1.0.2"
+ object-inspect "^1.9.0"
+
+signal-exit@^3.0.2, signal-exit@^3.0.3:
+ version "3.0.7"
+ resolved "https://registry.npmmirror.com/signal-exit/-/signal-exit-3.0.7.tgz"
+ integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==
+
+sirv@^1.0.7:
+ version "1.0.19"
+ resolved "https://registry.npmmirror.com/sirv/-/sirv-1.0.19.tgz"
+ integrity sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==
+ dependencies:
+ "@polka/url" "^1.0.0-next.20"
+ mrmime "^1.0.0"
+ totalist "^1.0.0"
+
+sisteransi@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/sisteransi/-/sisteransi-1.0.5.tgz"
+ integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==
+
+sitemap@^7.0.0:
+ version "7.1.1"
+ resolved "https://registry.npmmirror.com/sitemap/-/sitemap-7.1.1.tgz"
+ integrity sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==
+ dependencies:
+ "@types/node" "^17.0.5"
+ "@types/sax" "^1.2.1"
+ arg "^5.0.0"
+ sax "^1.2.4"
+
+slash@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/slash/-/slash-3.0.0.tgz"
+ integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==
+
+sockjs@^0.3.24:
+ version "0.3.24"
+ resolved "https://registry.npmmirror.com/sockjs/-/sockjs-0.3.24.tgz"
+ integrity sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==
+ dependencies:
+ faye-websocket "^0.11.3"
+ uuid "^8.3.2"
+ websocket-driver "^0.7.4"
+
+sort-css-media-queries@2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz"
+ integrity sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==
+
+source-list-map@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/source-list-map/-/source-list-map-2.0.1.tgz"
+ integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==
+
+source-map-js@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.0.2.tgz"
+ integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
+
+source-map-support@~0.5.20:
+ version "0.5.21"
+ resolved "https://registry.npmmirror.com/source-map-support/-/source-map-support-0.5.21.tgz"
+ integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
+ dependencies:
+ buffer-from "^1.0.0"
+ source-map "^0.6.0"
+
+source-map@^0.5.0:
+ version "0.5.7"
+ resolved "https://registry.npmmirror.com/source-map/-/source-map-0.5.7.tgz"
+ integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==
+
+source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1:
+ version "0.6.1"
+ resolved "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz"
+ integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
+
+sourcemap-codec@^1.4.8:
+ version "1.4.8"
+ resolved "https://registry.npmmirror.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz"
+ integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==
+
+space-separated-tokens@^1.0.0:
+ version "1.1.5"
+ resolved "https://registry.npmmirror.com/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz"
+ integrity sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==
+
+spdy-transport@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/spdy-transport/-/spdy-transport-3.0.0.tgz"
+ integrity sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==
+ dependencies:
+ debug "^4.1.0"
+ detect-node "^2.0.4"
+ hpack.js "^2.1.6"
+ obuf "^1.1.2"
+ readable-stream "^3.0.6"
+ wbuf "^1.7.3"
+
+spdy@^4.0.2:
+ version "4.0.2"
+ resolved "https://registry.npmmirror.com/spdy/-/spdy-4.0.2.tgz"
+ integrity sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==
+ dependencies:
+ debug "^4.1.0"
+ handle-thing "^2.0.0"
+ http-deceiver "^1.2.7"
+ select-hose "^2.0.0"
+ spdy-transport "^3.0.0"
+
+sprintf-js@~1.0.2:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz"
+ integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==
+
+stable@^0.1.8:
+ version "0.1.8"
+ resolved "https://registry.npmmirror.com/stable/-/stable-0.1.8.tgz"
+ integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==
+
+state-toggle@^1.0.0:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/state-toggle/-/state-toggle-1.0.3.tgz"
+ integrity sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==
+
+statuses@2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz"
+ integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==
+
+"statuses@>= 1.4.0 < 2":
+ version "1.5.0"
+ resolved "https://registry.npmmirror.com/statuses/-/statuses-1.5.0.tgz"
+ integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
+
+std-env@^2.2.1:
+ version "2.3.1"
+ resolved "https://registry.npmmirror.com/std-env/-/std-env-2.3.1.tgz"
+ integrity sha512-eOsoKTWnr6C8aWrqJJ2KAReXoa7Vn5Ywyw6uCXgA/xDhxPoaIsBa5aNJmISY04dLwXPBnDHW4diGM7Sn5K4R/g==
+ dependencies:
+ ci-info "^3.1.1"
+
+std-env@^3.0.1:
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/std-env/-/std-env-3.3.0.tgz"
+ integrity sha512-cNNS+VYsXIs5gI6gJipO4qZ8YYT274JHvNnQ1/R/x8Q8mdP0qj0zoMchRXmBNPqp/0eOEhX+3g7g6Fgb7meLIQ==
+
+string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.2:
+ version "4.2.3"
+ resolved "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string.prototype.trimend@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/string.prototype.trimend/-/string.prototype.trimend-1.0.5.tgz"
+ integrity sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+
+string.prototype.trimstart@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.5.tgz"
+ integrity sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+
+string_decoder@^1.1.1:
+ version "1.3.0"
+ resolved "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.3.0.tgz"
+ integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
+ dependencies:
+ safe-buffer "~5.2.0"
+
+string_decoder@~1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.1.1.tgz"
+ integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
+ dependencies:
+ safe-buffer "~5.1.0"
+
+stringify-object@^3.3.0:
+ version "3.3.0"
+ resolved "https://registry.npmmirror.com/stringify-object/-/stringify-object-3.3.0.tgz"
+ integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==
+ dependencies:
+ get-own-enumerable-property-symbols "^3.0.0"
+ is-obj "^1.0.1"
+ is-regexp "^1.0.0"
+
+strip-ansi@^6.0.0, strip-ansi@^6.0.1:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz"
+ integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
+ dependencies:
+ ansi-regex "^5.0.1"
+
+strip-bom-string@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz"
+ integrity sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==
+
+strip-final-newline@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz"
+ integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==
+
+strip-json-comments@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz"
+ integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
+
+strip-json-comments@~2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz"
+ integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==
+
+style-to-object@0.3.0, style-to-object@^0.3.0:
+ version "0.3.0"
+ resolved "https://registry.npmmirror.com/style-to-object/-/style-to-object-0.3.0.tgz"
+ integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==
+ dependencies:
+ inline-style-parser "0.1.1"
+
+stylehacks@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.npmmirror.com/stylehacks/-/stylehacks-5.1.1.tgz"
+ integrity sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==
+ dependencies:
+ browserslist "^4.21.4"
+ postcss-selector-parser "^6.0.4"
+
+supports-color@^5.3.0:
+ version "5.5.0"
+ resolved "https://registry.npmmirror.com/supports-color/-/supports-color-5.5.0.tgz"
+ integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
+ dependencies:
+ has-flag "^3.0.0"
+
+supports-color@^7.1.0:
+ version "7.2.0"
+ resolved "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz"
+ integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
+ dependencies:
+ has-flag "^4.0.0"
+
+supports-color@^8.0.0:
+ version "8.1.1"
+ resolved "https://registry.npmmirror.com/supports-color/-/supports-color-8.1.1.tgz"
+ integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==
+ dependencies:
+ has-flag "^4.0.0"
+
+supports-preserve-symlinks-flag@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz"
+ integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
+
+svg-parser@^2.0.2, svg-parser@^2.0.4:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/svg-parser/-/svg-parser-2.0.4.tgz"
+ integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==
+
+svgo@^1.2.2:
+ version "1.3.2"
+ resolved "https://registry.npmmirror.com/svgo/-/svgo-1.3.2.tgz"
+ integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==
+ dependencies:
+ chalk "^2.4.1"
+ coa "^2.0.2"
+ css-select "^2.0.0"
+ css-select-base-adapter "^0.1.1"
+ css-tree "1.0.0-alpha.37"
+ csso "^4.0.2"
+ js-yaml "^3.13.1"
+ mkdirp "~0.5.1"
+ object.values "^1.1.0"
+ sax "~1.2.4"
+ stable "^0.1.8"
+ unquote "~1.1.1"
+ util.promisify "~1.0.0"
+
+svgo@^2.7.0, svgo@^2.8.0:
+ version "2.8.0"
+ resolved "https://registry.npmmirror.com/svgo/-/svgo-2.8.0.tgz"
+ integrity sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==
+ dependencies:
+ "@trysound/sax" "0.2.0"
+ commander "^7.2.0"
+ css-select "^4.1.3"
+ css-tree "^1.1.3"
+ csso "^4.2.0"
+ picocolors "^1.0.0"
+ stable "^0.1.8"
+
+tapable@^1.0.0:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/tapable/-/tapable-1.1.3.tgz"
+ integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==
+
+tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0:
+ version "2.2.1"
+ resolved "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz"
+ integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==
+
+terser-webpack-plugin@^5.2.4, terser-webpack-plugin@^5.3.10:
+ version "5.3.10"
+ resolved "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz"
+ integrity sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==
+ dependencies:
+ "@jridgewell/trace-mapping" "^0.3.20"
+ jest-worker "^27.4.5"
+ schema-utils "^3.1.1"
+ serialize-javascript "^6.0.1"
+ terser "^5.26.0"
+
+terser@^5.10.0, terser@^5.26.0:
+ version "5.31.5"
+ resolved "https://registry.npmjs.org/terser/-/terser-5.31.5.tgz"
+ integrity sha512-YPmas0L0rE1UyLL/llTWA0SiDOqIcAQYLeUj7cJYzXHlRTAnMSg9pPe4VJ5PlKvTrPQsdVFuiRiwyeNlYgwh2Q==
+ dependencies:
+ "@jridgewell/source-map" "^0.3.3"
+ acorn "^8.8.2"
+ commander "^2.20.0"
+ source-map-support "~0.5.20"
+
+text-table@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.npmmirror.com/text-table/-/text-table-0.2.0.tgz"
+ integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
+
+thunky@^1.0.2:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/thunky/-/thunky-1.1.0.tgz"
+ integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==
+
+tiny-invariant@^1.0.2:
+ version "1.3.1"
+ resolved "https://registry.npmmirror.com/tiny-invariant/-/tiny-invariant-1.3.1.tgz"
+ integrity sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==
+
+tiny-warning@^1.0.0:
+ version "1.0.3"
+ resolved "https://registry.npmmirror.com/tiny-warning/-/tiny-warning-1.0.3.tgz"
+ integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==
+
+to-fast-properties@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz"
+ integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==
+
+to-regex-range@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz"
+ integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==
+ dependencies:
+ is-number "^7.0.0"
+
+toidentifier@1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/toidentifier/-/toidentifier-1.0.1.tgz"
+ integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==
+
+totalist@^1.0.0:
+ version "1.1.0"
+ resolved "https://registry.npmmirror.com/totalist/-/totalist-1.1.0.tgz"
+ integrity sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==
+
+tr46@~0.0.3:
+ version "0.0.3"
+ resolved "https://registry.npmmirror.com/tr46/-/tr46-0.0.3.tgz"
+ integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==
+
+trim-trailing-lines@^1.0.0:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz"
+ integrity sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==
+
+trim@0.0.1, trim@0.0.3, trim@^0.0.3:
+ version "0.0.3"
+ resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.3.tgz#05243a47a3a4113e6b49367880a9cca59697a20b"
+ integrity sha512-h82ywcYhHK7veeelXrCScdH7HkWfbIT1D/CgYO+nmDarz3SGNssVBMws6jU16Ga60AJCRAvPV6w6RLuNerQqjg==
+
+trough@^1.0.0:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/trough/-/trough-1.0.5.tgz"
+ integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==
+
+tslib@^2.0.3, tslib@^2.1.0, tslib@^2.2.0, tslib@^2.3.1, tslib@^2.4.0:
+ version "2.4.1"
+ resolved "https://registry.npmmirror.com/tslib/-/tslib-2.4.1.tgz"
+ integrity sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==
+
+type-fest@^0.20.2:
+ version "0.20.2"
+ resolved "https://registry.npmmirror.com/type-fest/-/type-fest-0.20.2.tgz"
+ integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==
+
+type-is@~1.6.18:
+ version "1.6.18"
+ resolved "https://registry.npmmirror.com/type-is/-/type-is-1.6.18.tgz"
+ integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==
+ dependencies:
+ media-typer "0.3.0"
+ mime-types "~2.1.24"
+
+typedarray-to-buffer@^3.1.5:
+ version "3.1.5"
+ resolved "https://registry.npmmirror.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz"
+ integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==
+ dependencies:
+ is-typedarray "^1.0.0"
+
+ua-parser-js@^0.7.30:
+ version "0.7.33"
+ resolved "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.33.tgz"
+ integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw==
+
+unbox-primitive@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz"
+ integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==
+ dependencies:
+ call-bind "^1.0.2"
+ has-bigints "^1.0.2"
+ has-symbols "^1.0.3"
+ which-boxed-primitive "^1.0.2"
+
+unherit@^1.0.4:
+ version "1.1.3"
+ resolved "https://registry.npmmirror.com/unherit/-/unherit-1.1.3.tgz"
+ integrity sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==
+ dependencies:
+ inherits "^2.0.0"
+ xtend "^4.0.0"
+
+unicode-canonical-property-names-ecmascript@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz"
+ integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==
+
+unicode-match-property-ecmascript@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz"
+ integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==
+ dependencies:
+ unicode-canonical-property-names-ecmascript "^2.0.0"
+ unicode-property-aliases-ecmascript "^2.0.0"
+
+unicode-match-property-value-ecmascript@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.0.0.tgz"
+ integrity sha512-7Yhkc0Ye+t4PNYzOGKedDhXbYIBe1XEQYQxOPyhcXNMJ0WCABqqj6ckydd6pWRZTHV4GuCPKdBAUiMc60tsKVw==
+
+unicode-property-aliases-ecmascript@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.npmmirror.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz"
+ integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==
+
+unified@9.2.0, unified@^9.0.0:
+ version "9.2.0"
+ resolved "https://registry.npmmirror.com/unified/-/unified-9.2.0.tgz"
+ integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==
+ dependencies:
+ bail "^1.0.0"
+ extend "^3.0.0"
+ is-buffer "^2.0.0"
+ is-plain-obj "^2.0.0"
+ trough "^1.0.0"
+ vfile "^4.0.0"
+
+unified@^8.4.2:
+ version "8.4.2"
+ resolved "https://registry.npmmirror.com/unified/-/unified-8.4.2.tgz"
+ integrity sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA==
+ dependencies:
+ bail "^1.0.0"
+ extend "^3.0.0"
+ is-plain-obj "^2.0.0"
+ trough "^1.0.0"
+ vfile "^4.0.0"
+
+unique-string@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/unique-string/-/unique-string-2.0.0.tgz"
+ integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==
+ dependencies:
+ crypto-random-string "^2.0.0"
+
+unist-builder@2.0.3, unist-builder@^2.0.0:
+ version "2.0.3"
+ resolved "https://registry.npmmirror.com/unist-builder/-/unist-builder-2.0.3.tgz"
+ integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==
+
+unist-util-find-after@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmmirror.com/unist-util-find-after/-/unist-util-find-after-3.0.0.tgz"
+ integrity sha512-ojlBqfsBftYXExNu3+hHLfJQ/X1jYY/9vdm4yZWjIbf0VuWF6CRufci1ZyoD/wV2TYMKxXUoNuoqwy+CkgzAiQ==
+ dependencies:
+ unist-util-is "^4.0.0"
+
+unist-util-generated@^1.0.0:
+ version "1.1.6"
+ resolved "https://registry.npmmirror.com/unist-util-generated/-/unist-util-generated-1.1.6.tgz"
+ integrity sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==
+
+unist-util-is@^4.0.0:
+ version "4.1.0"
+ resolved "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-4.1.0.tgz"
+ integrity sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==
+
+unist-util-position@^3.0.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/unist-util-position/-/unist-util-position-3.1.0.tgz"
+ integrity sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==
+
+unist-util-remove-position@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmmirror.com/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz"
+ integrity sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==
+ dependencies:
+ unist-util-visit "^2.0.0"
+
+unist-util-remove@2.0.0, unist-util-remove@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/unist-util-remove/-/unist-util-remove-2.0.0.tgz"
+ integrity sha512-HwwWyNHKkeg/eXRnE11IpzY8JT55JNM1YCwwU9YNCnfzk6s8GhPXrVBBZWiwLeATJbI7euvoGSzcy9M29UeW3g==
+ dependencies:
+ unist-util-is "^4.0.0"
+
+unist-util-stringify-position@^2.0.0:
+ version "2.0.3"
+ resolved "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz"
+ integrity sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==
+ dependencies:
+ "@types/unist" "^2.0.2"
+
+unist-util-visit-parents@^3.0.0:
+ version "3.1.1"
+ resolved "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz"
+ integrity sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==
+ dependencies:
+ "@types/unist" "^2.0.0"
+ unist-util-is "^4.0.0"
+
+unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.1, unist-util-visit@^2.0.2, unist-util-visit@^2.0.3:
+ version "2.0.3"
+ resolved "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-2.0.3.tgz"
+ integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==
+ dependencies:
+ "@types/unist" "^2.0.0"
+ unist-util-is "^4.0.0"
+ unist-util-visit-parents "^3.0.0"
+
+universalify@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/universalify/-/universalify-2.0.0.tgz"
+ integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==
+
+unpipe@1.0.0, unpipe@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz"
+ integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
+
+unquote@~1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmmirror.com/unquote/-/unquote-1.1.1.tgz"
+ integrity sha512-vRCqFv6UhXpWxZPyGDh/F3ZpNv8/qo7w6iufLpQg9aKnQ71qM4B5KiI7Mia9COcjEhrO9LueHpMYjYzsWH3OIg==
+
+update-browserslist-db@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz"
+ integrity sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==
+ dependencies:
+ escalade "^3.1.2"
+ picocolors "^1.0.1"
+
+update-notifier@^5.1.0:
+ version "5.1.0"
+ resolved "https://registry.npmmirror.com/update-notifier/-/update-notifier-5.1.0.tgz"
+ integrity sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==
+ dependencies:
+ boxen "^5.0.0"
+ chalk "^4.1.0"
+ configstore "^5.0.1"
+ has-yarn "^2.1.0"
+ import-lazy "^2.1.0"
+ is-ci "^2.0.0"
+ is-installed-globally "^0.4.0"
+ is-npm "^5.0.0"
+ is-yarn-global "^0.3.0"
+ latest-version "^5.1.0"
+ pupa "^2.1.1"
+ semver "^7.3.4"
+ semver-diff "^3.1.1"
+ xdg-basedir "^4.0.0"
+
+uri-js@^4.2.2:
+ version "4.4.1"
+ resolved "https://registry.npmmirror.com/uri-js/-/uri-js-4.4.1.tgz"
+ integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==
+ dependencies:
+ punycode "^2.1.0"
+
+url-loader@^4.1.1:
+ version "4.1.1"
+ resolved "https://registry.npmmirror.com/url-loader/-/url-loader-4.1.1.tgz"
+ integrity sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==
+ dependencies:
+ loader-utils "^2.0.0"
+ mime-types "^2.1.27"
+ schema-utils "^3.0.0"
+
+use-composed-ref@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.npmmirror.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz"
+ integrity sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==
+
+use-isomorphic-layout-effect@^1.1.1:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz"
+ integrity sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==
+
+use-latest@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmmirror.com/use-latest/-/use-latest-1.2.1.tgz"
+ integrity sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==
+ dependencies:
+ use-isomorphic-layout-effect "^1.1.1"
+
+util-deprecate@^1.0.1, util-deprecate@^1.0.2, util-deprecate@~1.0.1:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/util-deprecate/-/util-deprecate-1.0.2.tgz"
+ integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==
+
+util.promisify@~1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/util.promisify/-/util.promisify-1.0.1.tgz"
+ integrity sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==
+ dependencies:
+ define-properties "^1.1.3"
+ es-abstract "^1.17.2"
+ has-symbols "^1.0.1"
+ object.getownpropertydescriptors "^2.1.0"
+
+utila@~0.4:
+ version "0.4.0"
+ resolved "https://registry.npmmirror.com/utila/-/utila-0.4.0.tgz"
+ integrity sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==
+
+utility-types@^3.10.0:
+ version "3.10.0"
+ resolved "https://registry.npmmirror.com/utility-types/-/utility-types-3.10.0.tgz"
+ integrity sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==
+
+utils-merge@1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/utils-merge/-/utils-merge-1.0.1.tgz"
+ integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==
+
+uuid@^8.3.2:
+ version "8.3.2"
+ resolved "https://registry.npmmirror.com/uuid/-/uuid-8.3.2.tgz"
+ integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==
+
+value-equal@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmmirror.com/value-equal/-/value-equal-1.0.1.tgz"
+ integrity sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==
+
+vary@~1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmmirror.com/vary/-/vary-1.1.2.tgz"
+ integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==
+
+vfile-location@^3.0.0, vfile-location@^3.2.0:
+ version "3.2.0"
+ resolved "https://registry.npmmirror.com/vfile-location/-/vfile-location-3.2.0.tgz"
+ integrity sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==
+
+vfile-message@^2.0.0:
+ version "2.0.4"
+ resolved "https://registry.npmmirror.com/vfile-message/-/vfile-message-2.0.4.tgz"
+ integrity sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==
+ dependencies:
+ "@types/unist" "^2.0.0"
+ unist-util-stringify-position "^2.0.0"
+
+vfile@^4.0.0:
+ version "4.2.1"
+ resolved "https://registry.npmmirror.com/vfile/-/vfile-4.2.1.tgz"
+ integrity sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==
+ dependencies:
+ "@types/unist" "^2.0.0"
+ is-buffer "^2.0.0"
+ unist-util-stringify-position "^2.0.0"
+ vfile-message "^2.0.0"
+
+wait-on@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.npmmirror.com/wait-on/-/wait-on-6.0.1.tgz"
+ integrity sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==
+ dependencies:
+ axios "^0.25.0"
+ joi "^17.6.0"
+ lodash "^4.17.21"
+ minimist "^1.2.5"
+ rxjs "^7.5.4"
+
+watchpack@^2.4.1:
+ version "2.4.1"
+ resolved "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz"
+ integrity sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==
+ dependencies:
+ glob-to-regexp "^0.4.1"
+ graceful-fs "^4.1.2"
+
+wbuf@^1.1.0, wbuf@^1.7.3:
+ version "1.7.3"
+ resolved "https://registry.npmmirror.com/wbuf/-/wbuf-1.7.3.tgz"
+ integrity sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==
+ dependencies:
+ minimalistic-assert "^1.0.0"
+
+web-namespaces@^1.0.0, web-namespaces@^1.1.2:
+ version "1.1.4"
+ resolved "https://registry.npmmirror.com/web-namespaces/-/web-namespaces-1.1.4.tgz"
+ integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==
+
+webidl-conversions@^3.0.0:
+ version "3.0.1"
+ resolved "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz"
+ integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==
+
+webpack-bundle-analyzer@^4.4.2:
+ version "4.7.0"
+ resolved "https://registry.npmmirror.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.7.0.tgz"
+ integrity sha512-j9b8ynpJS4K+zfO5GGwsAcQX4ZHpWV+yRiHDiL+bE0XHJ8NiPYLTNVQdlFYWxtpg9lfAQNlwJg16J9AJtFSXRg==
+ dependencies:
+ acorn "^8.0.4"
+ acorn-walk "^8.0.0"
+ chalk "^4.1.0"
+ commander "^7.2.0"
+ gzip-size "^6.0.0"
+ lodash "^4.17.20"
+ opener "^1.5.2"
+ sirv "^1.0.7"
+ ws "^7.3.1"
+
+webpack-dev-middleware@^5.3.1:
+ version "5.3.3"
+ resolved "https://registry.npmmirror.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz"
+ integrity sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==
+ dependencies:
+ colorette "^2.0.10"
+ memfs "^3.4.3"
+ mime-types "^2.1.31"
+ range-parser "^1.2.1"
+ schema-utils "^4.0.0"
+
+webpack-dev-server@^4.4.0:
+ version "4.11.1"
+ resolved "https://registry.npmmirror.com/webpack-dev-server/-/webpack-dev-server-4.11.1.tgz"
+ integrity sha512-lILVz9tAUy1zGFwieuaQtYiadImb5M3d+H+L1zDYalYoDl0cksAB1UNyuE5MMWJrG6zR1tXkCP2fitl7yoUJiw==
+ dependencies:
+ "@types/bonjour" "^3.5.9"
+ "@types/connect-history-api-fallback" "^1.3.5"
+ "@types/express" "^4.17.13"
+ "@types/serve-index" "^1.9.1"
+ "@types/serve-static" "^1.13.10"
+ "@types/sockjs" "^0.3.33"
+ "@types/ws" "^8.5.1"
+ ansi-html-community "^0.0.8"
+ bonjour-service "^1.0.11"
+ chokidar "^3.5.3"
+ colorette "^2.0.10"
+ compression "^1.7.4"
+ connect-history-api-fallback "^2.0.0"
+ default-gateway "^6.0.3"
+ express "^4.17.3"
+ graceful-fs "^4.2.6"
+ html-entities "^2.3.2"
+ http-proxy-middleware "^2.0.3"
+ ipaddr.js "^2.0.1"
+ open "^8.0.9"
+ p-retry "^4.5.0"
+ rimraf "^3.0.2"
+ schema-utils "^4.0.0"
+ selfsigned "^2.1.1"
+ serve-index "^1.9.1"
+ sockjs "^0.3.24"
+ spdy "^4.0.2"
+ webpack-dev-middleware "^5.3.1"
+ ws "^8.4.2"
+
+webpack-merge@^5.8.0:
+ version "5.8.0"
+ resolved "https://registry.npmmirror.com/webpack-merge/-/webpack-merge-5.8.0.tgz"
+ integrity sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==
+ dependencies:
+ clone-deep "^4.0.1"
+ wildcard "^2.0.0"
+
+webpack-sources@^1.1.0:
+ version "1.4.3"
+ resolved "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-1.4.3.tgz"
+ integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==
+ dependencies:
+ source-list-map "^2.0.0"
+ source-map "~0.6.1"
+
+webpack-sources@^3.2.2, webpack-sources@^3.2.3:
+ version "3.2.3"
+ resolved "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-3.2.3.tgz"
+ integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
+
+webpack@^5.61.0, webpack@^5.73.0:
+ version "5.93.0"
+ resolved "https://registry.npmjs.org/webpack/-/webpack-5.93.0.tgz"
+ integrity sha512-Y0m5oEY1LRuwly578VqluorkXbvXKh7U3rLoQCEO04M97ScRr44afGVkI0FQFsXzysk5OgFAxjZAb9rsGQVihA==
+ dependencies:
+ "@types/eslint-scope" "^3.7.3"
+ "@types/estree" "^1.0.5"
+ "@webassemblyjs/ast" "^1.12.1"
+ "@webassemblyjs/wasm-edit" "^1.12.1"
+ "@webassemblyjs/wasm-parser" "^1.12.1"
+ acorn "^8.7.1"
+ acorn-import-attributes "^1.9.5"
+ browserslist "^4.21.10"
+ chrome-trace-event "^1.0.2"
+ enhanced-resolve "^5.17.0"
+ es-module-lexer "^1.2.1"
+ eslint-scope "5.1.1"
+ events "^3.2.0"
+ glob-to-regexp "^0.4.1"
+ graceful-fs "^4.2.11"
+ json-parse-even-better-errors "^2.3.1"
+ loader-runner "^4.2.0"
+ mime-types "^2.1.27"
+ neo-async "^2.6.2"
+ schema-utils "^3.2.0"
+ tapable "^2.1.1"
+ terser-webpack-plugin "^5.3.10"
+ watchpack "^2.4.1"
+ webpack-sources "^3.2.3"
+
+webpackbar@^5.0.0-3:
+ version "5.0.2"
+ resolved "https://registry.npmmirror.com/webpackbar/-/webpackbar-5.0.2.tgz"
+ integrity sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==
+ dependencies:
+ chalk "^4.1.0"
+ consola "^2.15.3"
+ pretty-time "^1.1.0"
+ std-env "^3.0.1"
+
+websocket-driver@>=0.5.1, websocket-driver@^0.7.4:
+ version "0.7.4"
+ resolved "https://registry.npmmirror.com/websocket-driver/-/websocket-driver-0.7.4.tgz"
+ integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==
+ dependencies:
+ http-parser-js ">=0.5.1"
+ safe-buffer ">=5.1.0"
+ websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+ version "0.1.4"
+ resolved "https://registry.npmmirror.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz"
+ integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==
+
+whatwg-url@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-5.0.0.tgz"
+ integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==
+ dependencies:
+ tr46 "~0.0.3"
+ webidl-conversions "^3.0.0"
+
+which-boxed-primitive@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz"
+ integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==
+ dependencies:
+ is-bigint "^1.0.1"
+ is-boolean-object "^1.1.0"
+ is-number-object "^1.0.4"
+ is-string "^1.0.5"
+ is-symbol "^1.0.3"
+
+which@^1.3.1:
+ version "1.3.1"
+ resolved "https://registry.npmmirror.com/which/-/which-1.3.1.tgz"
+ integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
+ dependencies:
+ isexe "^2.0.0"
+
+which@^2.0.1:
+ version "2.0.2"
+ resolved "https://registry.npmmirror.com/which/-/which-2.0.2.tgz"
+ integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==
+ dependencies:
+ isexe "^2.0.0"
+
+widest-line@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmmirror.com/widest-line/-/widest-line-3.1.0.tgz"
+ integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==
+ dependencies:
+ string-width "^4.0.0"
+
+wildcard@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmmirror.com/wildcard/-/wildcard-2.0.0.tgz"
+ integrity sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==
+
+wrap-ansi@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz"
+ integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==
+ dependencies:
+ ansi-styles "^4.0.0"
+ string-width "^4.1.0"
+ strip-ansi "^6.0.0"
+
+wrappy@1:
+ version "1.0.2"
+ resolved "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz"
+ integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==
+
+write-file-atomic@^3.0.0:
+ version "3.0.3"
+ resolved "https://registry.npmmirror.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz"
+ integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==
+ dependencies:
+ imurmurhash "^0.1.4"
+ is-typedarray "^1.0.0"
+ signal-exit "^3.0.2"
+ typedarray-to-buffer "^3.1.5"
+
+ws@^7.3.1:
+ version "7.5.9"
+ resolved "https://registry.npmmirror.com/ws/-/ws-7.5.9.tgz"
+ integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==
+
+ws@^8.4.2:
+ version "8.10.0"
+ resolved "https://registry.npmmirror.com/ws/-/ws-8.10.0.tgz"
+ integrity sha512-+s49uSmZpvtAsd2h37vIPy1RBusaLawVe8of+GyEPsaJTCMpj/2v8NpeK1SHXjBlQ95lQTmQofOJnFiLoaN3yw==
+
+xdg-basedir@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz"
+ integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==
+
+xml-js@^1.6.11:
+ version "1.6.11"
+ resolved "https://registry.npmmirror.com/xml-js/-/xml-js-1.6.11.tgz"
+ integrity sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==
+ dependencies:
+ sax "^1.2.4"
+
+xtend@^4.0.0, xtend@^4.0.1:
+ version "4.0.2"
+ resolved "https://registry.npmmirror.com/xtend/-/xtend-4.0.2.tgz"
+ integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
+
+yallist@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz"
+ integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
+
+yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2:
+ version "1.10.2"
+ resolved "https://registry.npmmirror.com/yaml/-/yaml-1.10.2.tgz"
+ integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==
+
+yocto-queue@^0.1.0:
+ version "0.1.0"
+ resolved "https://registry.npmmirror.com/yocto-queue/-/yocto-queue-0.1.0.tgz"
+ integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==
+
+zwitch@^1.0.0:
+ version "1.0.5"
+ resolved "https://registry.npmmirror.com/zwitch/-/zwitch-1.0.5.tgz"
+ integrity sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==
diff --git a/frontend/README.md b/frontend/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1139e4e4695bae424487fd5455def7c443d2cec
--- /dev/null
+++ b/frontend/README.md
@@ -0,0 +1,85 @@
+# Frontend Application
+
+This frontend project aims to enhance the user experience of GPT-Researcher, providing an intuitive and efficient interface for automated research. It offers two deployment options to suit different needs and environments.
+
+## Option 1: Static Frontend (FastAPI)
+
+A lightweight solution using FastAPI to serve static files.
+
+#### Prerequisites
+- Python 3.11+
+- pip
+
+#### Setup and Running
+
+1. Install required packages:
+ ```
+ pip install -r requirements.txt
+ ```
+
+2. Start the server:
+ ```
+ python -m uvicorn main:app
+ ```
+
+3. Access at `http://localhost:8000`
+
+#### Demo
+https://github.com/assafelovic/gpt-researcher/assets/13554167/dd6cf08f-b31e-40c6-9907-1915f52a7110
+
+## Option 2: NextJS Frontend
+
+A more robust solution with enhanced features and performance.
+
+#### Prerequisites
+- Node.js (v18.17.0 recommended)
+- npm
+
+#### Setup and Running
+
+1. Navigate to NextJS directory:
+ ```
+ cd nextjs
+ ```
+
+2. Set up Node.js:
+ ```
+ nvm install 18.17.0
+ nvm use v18.17.0
+ ```
+
+3. Install dependencies:
+ ```
+ npm install --legacy-peer-deps
+ ```
+
+4. Start development server:
+ ```
+ npm run dev
+ ```
+
+5. Access at `http://localhost:3000`
+
+Note: Requires backend server on `localhost:8000` as detailed in option 1.
+
+#### Demo
+https://github.com/user-attachments/assets/092e9e71-7e27-475d-8c4f-9dddd28934a3
+
+## Choosing an Option
+
+- Static Frontend: Quick setup, lightweight deployment.
+- NextJS Frontend: Feature-rich, scalable, better performance and SEO.
+
+For production, NextJS is recommended.
+
+## Frontend Features
+
+Our frontend enhances GPT-Researcher by providing:
+
+1. Intuitive Research Interface: Streamlined input for research queries.
+2. Real-time Progress Tracking: Visual feedback on ongoing research tasks.
+3. Interactive Results Display: Easy-to-navigate presentation of findings.
+4. Customizable Settings: Adjust research parameters to suit specific needs.
+5. Responsive Design: Optimal experience across various devices.
+
+These features aim to make the research process more efficient and user-friendly, complementing GPT-Researcher's powerful agent capabilities.
\ No newline at end of file
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..e022b90b90dec40c75d3a8c2e3d81346652ef724
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,204 @@
+
+
+
+
+ GPT Researcher
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Say Hello to GPT Researcher , your AI mate for rapid insights and comprehensive research.
+ GPT Researcher takes care of everything from accurate source gathering and organization of research results to generation of customized reports with citations.
+
+
Get Started
+
+
+
+
+
+
+
+
+
Agent Output
+
An agent tailored specifically to your task
+ will be generated to provide the most precise and relevant research results.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/.dockerignore b/frontend/nextjs/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..ee63e9e2001f615f95324cbd4c29b5174a3ddf45
--- /dev/null
+++ b/frontend/nextjs/.dockerignore
@@ -0,0 +1,57 @@
+.git
+
+# Ignore env containing secrets
+.env
+.venv
+.envrc
+
+# Ignore Virtual Env
+env/
+venv/
+.venv/
+
+# Other Environments
+ENV/
+env.bak/
+venv.bak/
+
+# Ignore generated outputs
+outputs/
+
+# Ignore my local docs
+my-docs/
+
+# Ignore pycache
+**/__pycache__/
+
+# Ignore mypy cache
+.mypy_cache/
+
+# Node modules
+node_modules
+
+# Ignore IDE config
+.idea
+
+# macOS specific files
+.DS_Store
+
+# Docusaurus build artifacts
+.docusaurus
+
+# Build directories
+build
+docs/build
+
+# Language graph data
+.langgraph-data/
+
+# Next.js build artifacts
+.next/
+
+# Package lock file
+package-lock.json
+
+# Docker-specific exclusions (if any)
+Dockerfile
+docker-compose.yml
diff --git a/frontend/nextjs/.eslintrc.json b/frontend/nextjs/.eslintrc.json
new file mode 100644
index 0000000000000000000000000000000000000000..ea782d70ab03aa5bb6423f4b092faf80d407eede
--- /dev/null
+++ b/frontend/nextjs/.eslintrc.json
@@ -0,0 +1,3 @@
+{
+ "extends": "next/core-web-vitals"
+}
diff --git a/frontend/nextjs/.example.env b/frontend/nextjs/.example.env
new file mode 100644
index 0000000000000000000000000000000000000000..478a5e51bb289c2a0119f8edb95cae5049ba9e0d
--- /dev/null
+++ b/frontend/nextjs/.example.env
@@ -0,0 +1,3 @@
+TOGETHER_API_KEY=
+BING_API_KEY=
+HELICONE_API_KEY=
diff --git a/frontend/nextjs/.gitignore b/frontend/nextjs/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..4c4ec827361a49db9065e667d608c66a06b65600
--- /dev/null
+++ b/frontend/nextjs/.gitignore
@@ -0,0 +1,38 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+.env
+package-lock.json
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.js
+.yarn/install-state.gz
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# local env files
+.env*.local
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
diff --git a/frontend/nextjs/.prettierrc b/frontend/nextjs/.prettierrc
new file mode 100644
index 0000000000000000000000000000000000000000..a64e3591efa13f49178391bee4daa9d90b8da69d
--- /dev/null
+++ b/frontend/nextjs/.prettierrc
@@ -0,0 +1 @@
+{ "plugins": ["prettier-plugin-tailwindcss"] }
diff --git a/frontend/nextjs/Dockerfile b/frontend/nextjs/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..83be12dc498105ae4715b4fadf77bc2eaa6b5fea
--- /dev/null
+++ b/frontend/nextjs/Dockerfile
@@ -0,0 +1,11 @@
+FROM node:18.17.0-alpine as builder
+WORKDIR /app
+COPY ./package.json ./
+RUN npm install --legacy-peer-deps
+COPY . .
+RUN npm run build
+
+FROM nginx
+EXPOSE 3000
+COPY ./nginx/default.conf /etc/nginx/conf.d/default.conf
+COPY --from=builder /app/build /usr/share/nginx/html
diff --git a/frontend/nextjs/Dockerfile.dev b/frontend/nextjs/Dockerfile.dev
new file mode 100644
index 0000000000000000000000000000000000000000..77e4b7823bd9a1ce2688253737138b248943f146
--- /dev/null
+++ b/frontend/nextjs/Dockerfile.dev
@@ -0,0 +1,6 @@
+FROM node:18.17.0-alpine
+WORKDIR /app
+COPY ./package.json ./
+RUN npm install --legacy-peer-deps
+COPY . .
+CMD ["npm", "run", "dev"]
\ No newline at end of file
diff --git a/frontend/nextjs/README.md b/frontend/nextjs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bac8f764b486dad0ab9f1c03e006cab51487cf1f
--- /dev/null
+++ b/frontend/nextjs/README.md
@@ -0,0 +1,4 @@
+## Cloning & running
+
+1. Create a `.env` (use the `.example.env` for reference) and replace the API keys
+2. Run `npm install --legacy-peer-deps` and `npm run dev` to install dependencies and run locally
diff --git a/frontend/nextjs/actions/apiActions.ts b/frontend/nextjs/actions/apiActions.ts
new file mode 100644
index 0000000000000000000000000000000000000000..45d7d8b8a3855a27af4431366763ea9d8719e211
--- /dev/null
+++ b/frontend/nextjs/actions/apiActions.ts
@@ -0,0 +1,108 @@
+import { createParser, ParsedEvent, ReconnectInterval } from "eventsource-parser";
+
+export async function handleSourcesAndAnswer(question: string) {
+ let sourcesResponse = await fetch("/api/getSources", {
+ method: "POST",
+ body: JSON.stringify({ question }),
+ });
+ let sources = await sourcesResponse.json();
+
+ const response = await fetch("/api/getAnswer", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({ question, sources }),
+ });
+
+ if (!response.ok) {
+ throw new Error(response.statusText);
+ }
+
+ if (response.status === 202) {
+ const fullAnswer = await response.text();
+ return fullAnswer;
+ }
+
+ // This data is a ReadableStream
+ const data = response.body;
+ if (!data) {
+ return;
+ }
+
+ const onParse = (event: ParsedEvent | ReconnectInterval) => {
+ if (event.type === "event") {
+ const data = event.data;
+ try {
+ const text = JSON.parse(data).text ?? "";
+ return text;
+ } catch (e) {
+ console.error(e);
+ }
+ }
+ };
+
+ // https://web.dev/streams/#the-getreader-and-read-methods
+ const reader = data.getReader();
+ const decoder = new TextDecoder();
+ const parser = createParser(onParse);
+ let done = false;
+ while (!done) {
+ const { value, done: doneReading } = await reader.read();
+ done = doneReading;
+ const chunkValue = decoder.decode(value);
+ parser.feed(chunkValue);
+ }
+}
+
+export async function handleSimilarQuestions(question: string) {
+ let res = await fetch("/api/getSimilarQuestions", {
+ method: "POST",
+ body: JSON.stringify({ question }),
+ });
+ let questions = await res.json();
+ return questions;
+}
+
+export async function handleLanggraphAnswer(question: string) {
+ const response = await fetch("/api/generateLanggraph", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({ question }),
+ });
+
+ if (!response.ok) {
+ throw new Error(response.statusText);
+ }
+
+ // This data is a ReadableStream
+ const data = response.body;
+ if (!data) {
+ return;
+ }
+
+ const onParse = (event: ParsedEvent | ReconnectInterval) => {
+ if (event.type === "event") {
+ const data = event.data;
+ try {
+ const text = JSON.parse(data).text ?? "";
+ return text;
+ } catch (e) {
+ console.error(e);
+ }
+ }
+ };
+
+ const reader = data.getReader();
+ const decoder = new TextDecoder();
+ const parser = createParser(onParse);
+ let done = false;
+ while (!done) {
+ const { value, done: doneReading } = await reader.read();
+ done = doneReading;
+ const chunkValue = decoder.decode(value);
+ parser.feed(chunkValue);
+ }
+}
\ No newline at end of file
diff --git a/frontend/nextjs/app/globals.css b/frontend/nextjs/app/globals.css
new file mode 100644
index 0000000000000000000000000000000000000000..e1e860487d9531c6277e8d486ef4c5089cd636cc
--- /dev/null
+++ b/frontend/nextjs/app/globals.css
@@ -0,0 +1,122 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+@keyframes gradientBG {
+ 0% {background-position: 0% 50%;}
+ 50% {background-position: 100% 50%;}
+ 100% {background-position: 0% 50%;}
+}
+
+html {
+ scroll-behavior: smooth;
+}
+
+textarea {
+ max-height: 300px; /* Set an appropriate max height */
+ overflow-y: auto; /* Enable internal scrolling */
+ /* transition: height 0.2s ease-in-out; */
+}
+
+.log-message {
+ word-wrap: break-word; /* For handling long URLs or text */
+ overflow-wrap: break-word; /* For handling overflow in modern browsers */
+ overflow-x: hidden; /* Hide horizontal overflow */
+ word-break: break-word; /* Break long words if needed */
+}
+
+body {
+ font-family: 'Montserrat', sans-serif;
+ line-height: 1.6;
+ background-size: 200% 200%;
+ background-image: linear-gradient(170deg, #151A2D, #036f73, #151A2D);
+ /*animation: gradientBG 10s ease infinite;*/
+}
+
+.landing {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 30vh;
+ text-align: center;
+ color: white;
+}
+
+.landing h1 {
+ font-size: 3.5rem;
+ font-weight: 700;
+ margin-bottom: 2rem;
+}
+
+@layer utilities {
+ .text-balance {
+ text-wrap: balance;
+ }
+ /* Hide scrollbar for Chrome, Safari and Opera */
+ .no-scrollbar::-webkit-scrollbar {
+ display: none;
+ }
+ /* Hide scrollbar for IE, Edge and Firefox */
+ .no-scrollbar {
+ -ms-overflow-style: none; /* IE and Edge */
+ scrollbar-width: none; /* Firefox */
+ }
+ .loader {
+ text-align: left;
+ display: flex;
+ gap: 3px;
+ }
+
+ .loader span {
+ display: inline-block;
+ vertical-align: middle;
+ width: 7px;
+ height: 7px;
+ /* background: #4b4b4b; */
+ background: white;
+ border-radius: 50%;
+ animation: loader 0.6s infinite alternate;
+ }
+
+ .loader span:nth-of-type(2) {
+ animation-delay: 0.2s;
+ }
+
+ .loader span:nth-of-type(3) {
+ animation-delay: 0.6s;
+ }
+
+ @keyframes loader {
+ 0% {
+ opacity: 1;
+ transform: scale(0.6);
+ }
+
+ 100% {
+ opacity: 0.3;
+ transform: scale(1);
+ }
+ }
+}
+
+body {
+ margin: 0px !important;
+}
+
+/* Add these styles for the scrollbar */
+.scrollbar-thin {
+ scrollbar-width: thin;
+}
+
+.scrollbar-thumb-gray-600::-webkit-scrollbar-thumb {
+ background-color: #4B5563;
+ border-radius: 6px;
+}
+
+.scrollbar-track-gray-300::-webkit-scrollbar-track {
+ background-color: #D1D5DB;
+}
+
+.scrollbar-thin::-webkit-scrollbar {
+ width: 6px;
+}
diff --git a/frontend/nextjs/app/layout.tsx b/frontend/nextjs/app/layout.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..bf78a9aec460fb473e69b2de6dd91fb3a435730d
--- /dev/null
+++ b/frontend/nextjs/app/layout.tsx
@@ -0,0 +1,56 @@
+import type { Metadata } from "next";
+import { Lexend } from "next/font/google";
+import PlausibleProvider from "next-plausible";
+import "./globals.css";
+
+const inter = Lexend({ subsets: ["latin"] });
+
+let title = "GPT Researcher";
+let description =
+ "LLM based autonomous agent that conducts local and web research on any topic and generates a comprehensive report with citations.";
+let url = "https://github.com/assafelovic/gpt-researcher";
+let ogimage = "/favicon.ico";
+let sitename = "GPT Researcher";
+
+export const metadata: Metadata = {
+ metadataBase: new URL(url),
+ title,
+ description,
+ icons: {
+ icon: "/favicon.ico",
+ },
+ openGraph: {
+ images: [ogimage],
+ title,
+ description,
+ url: url,
+ siteName: sitename,
+ locale: "en_US",
+ type: "website",
+ },
+ twitter: {
+ card: "summary_large_image",
+ images: [ogimage],
+ title,
+ description,
+ },
+};
+
+export default function RootLayout({
+ children,
+}: Readonly<{
+ children: React.ReactNode;
+}>) {
+ return (
+
+
+
+
+
+ {children}
+
+
+ );
+}
diff --git a/frontend/nextjs/app/page.tsx b/frontend/nextjs/app/page.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..23dee8f6638db82a4f60cb134e28b745e62b3b34
--- /dev/null
+++ b/frontend/nextjs/app/page.tsx
@@ -0,0 +1,317 @@
+"use client";
+
+import { useRef, useState, useEffect, useCallback } from "react";
+import { useWebSocket } from '@/hooks/useWebSocket';
+import { startLanggraphResearch } from '../components/Langgraph/Langgraph';
+import findDifferences from '../helpers/findDifferences';
+import { Data, ChatBoxSettings, QuestionData } from '../types/data';
+import { preprocessOrderedData } from '../utils/dataProcessing';
+import { ResearchResults } from '../components/ResearchResults';
+
+import Header from "@/components/Header";
+import Hero from "@/components/Hero";
+import Footer from "@/components/Footer";
+import InputArea from "@/components/ResearchBlocks/elements/InputArea";
+import HumanFeedback from "@/components/HumanFeedback";
+import LoadingDots from "@/components/LoadingDots";
+
+export default function Home() {
+ const [promptValue, setPromptValue] = useState("");
+ const [showResult, setShowResult] = useState(false);
+ const [answer, setAnswer] = useState("");
+ const [loading, setLoading] = useState(false);
+ const [chatBoxSettings, setChatBoxSettings] = useState({
+ report_source: 'web',
+ report_type: 'research_report',
+ tone: 'Objective'
+ });
+ const [question, setQuestion] = useState("");
+ const [orderedData, setOrderedData] = useState([]);
+ const [showHumanFeedback, setShowHumanFeedback] = useState(false);
+ const [questionForHuman, setQuestionForHuman] = useState(false);
+ const [allLogs, setAllLogs] = useState([]);
+ const chatContainerRef = useRef(null);
+ const [isStopped, setIsStopped] = useState(false);
+ const [showScrollButton, setShowScrollButton] = useState(false);
+ const mainContentRef = useRef(null);
+
+ const { socket, initializeWebSocket } = useWebSocket(
+ setOrderedData,
+ setAnswer,
+ setLoading,
+ setShowHumanFeedback,
+ setQuestionForHuman
+ );
+
+ const handleFeedbackSubmit = (feedback: string | null) => {
+ if (socket) {
+ socket.send(JSON.stringify({ type: 'human_feedback', content: feedback }));
+ }
+ setShowHumanFeedback(false);
+ };
+
+ const handleChat = async (message: string) => {
+ if (socket) {
+ setShowResult(true);
+ setQuestion(message);
+ setLoading(true);
+ setPromptValue("");
+ setAnswer("");
+
+ const questionData: QuestionData = { type: 'question', content: message };
+ setOrderedData(prevOrder => [...prevOrder, questionData]);
+
+ socket.send(`chat${JSON.stringify({ message })}`);
+ }
+ };
+
+ const handleDisplayResult = async (newQuestion: string) => {
+ setShowResult(true);
+ setLoading(true);
+ setQuestion(newQuestion);
+ setPromptValue("");
+ setAnswer("");
+ setOrderedData((prevOrder) => [...prevOrder, { type: 'question', content: newQuestion }]);
+
+ const storedConfig = localStorage.getItem('apiVariables');
+ const apiVariables = storedConfig ? JSON.parse(storedConfig) : {};
+ const langgraphHostUrl = apiVariables.LANGGRAPH_HOST_URL;
+
+ if (chatBoxSettings.report_type === 'multi_agents' && langgraphHostUrl) {
+ let { streamResponse, host, thread_id } = await startLanggraphResearch(newQuestion, chatBoxSettings.report_source, langgraphHostUrl);
+ const langsmithGuiLink = `https://smith.langchain.com/studio/thread/${thread_id}?baseUrl=${host}`;
+ setOrderedData((prevOrder) => [...prevOrder, { type: 'langgraphButton', link: langsmithGuiLink }]);
+
+ let previousChunk = null;
+ for await (const chunk of streamResponse) {
+ if (chunk.data.report != null && chunk.data.report != "Full report content here") {
+ setOrderedData((prevOrder) => [...prevOrder, { ...chunk.data, output: chunk.data.report, type: 'report' }]);
+ setLoading(false);
+ } else if (previousChunk) {
+ const differences = findDifferences(previousChunk, chunk);
+ setOrderedData((prevOrder) => [...prevOrder, { type: 'differences', content: 'differences', output: JSON.stringify(differences) }]);
+ }
+ previousChunk = chunk;
+ }
+ } else {
+ initializeWebSocket(newQuestion, chatBoxSettings);
+ }
+ };
+
+ const reset = () => {
+ setShowResult(false);
+ setPromptValue("");
+ setQuestion("");
+ setAnswer("");
+ };
+
+ const handleClickSuggestion = (value: string) => {
+ setPromptValue(value);
+ const element = document.getElementById('input-area');
+ if (element) {
+ element.scrollIntoView({ behavior: 'smooth' });
+ }
+ };
+
+ /**
+ * Handles stopping the current research
+ * - Closes WebSocket connection
+ * - Stops loading state
+ * - Marks research as stopped
+ * - Preserves current results
+ */
+ const handleStopResearch = () => {
+ if (socket) {
+ socket.close();
+ }
+ setLoading(false);
+ setIsStopped(true);
+ };
+
+ /**
+ * Handles starting a new research
+ * - Clears all previous research data and states
+ * - Resets UI to initial state
+ * - Closes any existing WebSocket connections
+ */
+ const handleStartNewResearch = () => {
+ // Reset UI states
+ setShowResult(false);
+ setPromptValue("");
+ setIsStopped(false);
+
+ // Clear previous research data
+ setQuestion("");
+ setAnswer("");
+ setOrderedData([]);
+ setAllLogs([]);
+
+ // Reset feedback states
+ setShowHumanFeedback(false);
+ setQuestionForHuman(false);
+
+ // Clean up connections
+ if (socket) {
+ socket.close();
+ }
+ setLoading(false);
+ };
+
+ /**
+ * Processes ordered data into logs for display
+ * Updates whenever orderedData changes
+ */
+ useEffect(() => {
+ const groupedData = preprocessOrderedData(orderedData);
+ const statusReports = ["agent_generated", "starting_research", "planning_research"];
+
+ const newLogs = groupedData.reduce((acc: any[], data) => {
+ // Process accordion blocks (grouped data)
+ if (data.type === 'accordionBlock') {
+ const logs = data.items.map((item: any, subIndex: any) => ({
+ header: item.content,
+ text: item.output,
+ metadata: item.metadata,
+ key: `${item.type}-${item.content}-${subIndex}`,
+ }));
+ return [...acc, ...logs];
+ }
+ // Process status reports
+ else if (statusReports.includes(data.content)) {
+ return [...acc, {
+ header: data.content,
+ text: data.output,
+ metadata: data.metadata,
+ key: `${data.type}-${data.content}`,
+ }];
+ }
+ return acc;
+ }, []);
+
+ setAllLogs(newLogs);
+ }, [orderedData]);
+
+ const handleScroll = useCallback(() => {
+ // Calculate if we're near bottom (within 100px)
+ const scrollPosition = window.scrollY + window.innerHeight;
+ const nearBottom = scrollPosition >= document.documentElement.scrollHeight - 100;
+
+ // Show button if we're not near bottom and page is scrollable
+ const isPageScrollable = document.documentElement.scrollHeight > window.innerHeight;
+ setShowScrollButton(isPageScrollable && !nearBottom);
+ }, []);
+
+ // Add ResizeObserver to watch for content changes
+ useEffect(() => {
+ const resizeObserver = new ResizeObserver(() => {
+ handleScroll();
+ });
+
+ if (mainContentRef.current) {
+ resizeObserver.observe(mainContentRef.current);
+ }
+
+ window.addEventListener('scroll', handleScroll);
+ window.addEventListener('resize', handleScroll);
+
+ return () => {
+ if (mainContentRef.current) {
+ resizeObserver.unobserve(mainContentRef.current);
+ }
+ resizeObserver.disconnect();
+ window.removeEventListener('scroll', handleScroll);
+ window.removeEventListener('resize', handleScroll);
+ };
+ }, [handleScroll]);
+
+ const scrollToBottom = () => {
+ window.scrollTo({
+ top: document.documentElement.scrollHeight,
+ behavior: 'smooth'
+ });
+ };
+
+ return (
+ <>
+
+
+ {!showResult && (
+
+ )}
+
+ {showResult && (
+
+
+
+
+
+
+ {showHumanFeedback && (
+
+ )}
+
+
+
+
+ {loading ? (
+
+ ) : (
+
+ )}
+
+
+ )}
+
+ {showScrollButton && showResult && (
+
+
+
+
+
+ )}
+
+ >
+ );
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/Footer.tsx b/frontend/nextjs/components/Footer.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..aac2ed07f8a37e87ba740e5cbe1c0d9d70e7400d
--- /dev/null
+++ b/frontend/nextjs/components/Footer.tsx
@@ -0,0 +1,56 @@
+import Image from "next/image";
+import Link from "next/link";
+import Modal from './Settings/Modal';
+
+interface ChatBoxSettings {
+ report_source: string;
+ report_type: string;
+ tone: string;
+}
+
+interface ChatBoxProps {
+ chatBoxSettings: ChatBoxSettings;
+ setChatBoxSettings: React.Dispatch>;
+}
+
+const Footer = ({ setChatBoxSettings, chatBoxSettings}: ChatBoxProps) => {
+
+ return (
+ <>
+
+
+
+ © {new Date().getFullYear()} GPT Researcher. All rights reserved.
+
+
+
+ {" "}
+
+
+ {" "}
+
+
+ {" "}
+
+
+
+ >
+ );
+};
+
+export default Footer;
\ No newline at end of file
diff --git a/frontend/nextjs/components/Header.tsx b/frontend/nextjs/components/Header.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..fb1152b7a616bfeef97db6aede7414248bfc0075
--- /dev/null
+++ b/frontend/nextjs/components/Header.tsx
@@ -0,0 +1,58 @@
+import Image from "next/image";
+
+interface HeaderProps {
+ loading?: boolean; // Indicates if research is currently in progress
+ isStopped?: boolean; // Indicates if research was manually stopped
+ showResult?: boolean; // Controls if research results are being displayed
+ onStop?: () => void; // Handler for stopping ongoing research
+ onNewResearch?: () => void; // Handler for starting fresh research
+}
+
+const Header = ({ loading, isStopped, showResult, onStop, onNewResearch }: HeaderProps) => {
+ return (
+
+ {/* Original gradient background with blur effect */}
+
+
+ {/* Header container */}
+
+
+ {/* Logo/Home link */}
+
+
+
+
+ {/* Action buttons container */}
+
+ {/* Stop button - shown only during active research */}
+ {loading && !isStopped && (
+
+ Stop
+
+ )}
+ {/* New Research button - shown after stopping or completing research */}
+ {(isStopped || !loading) && showResult && (
+
+ New Research
+
+ )}
+
+
+
+
+ );
+};
+
+export default Header;
diff --git a/frontend/nextjs/components/Hero.tsx b/frontend/nextjs/components/Hero.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..f5925a413c840d9677be9f320c17ee8eb513058b
--- /dev/null
+++ b/frontend/nextjs/components/Hero.tsx
@@ -0,0 +1,100 @@
+import Image from "next/image";
+import { FC } from "react";
+import InputArea from "./ResearchBlocks/elements/InputArea";
+
+type THeroProps = {
+ promptValue: string;
+ setPromptValue: React.Dispatch>;
+ handleDisplayResult: (query : string) => void;
+};
+
+const Hero: FC = ({
+ promptValue,
+ setPromptValue,
+ handleDisplayResult,
+}) => {
+ const handleClickSuggestion = (value: string) => {
+ setPromptValue(value);
+ };
+
+ return (
+
+
+
+
+ Say Goodbye to
+
+ Hours of Research
+
+
+
+ Say Hello to GPT Researcher, your AI mate for rapid insights and comprehensive research
+
+
+
+ {/* Input section */}
+
+
+
+
+ {/* Suggestions section */}
+
+ {suggestions.map((item) => (
+
handleClickSuggestion(item?.name)}
+ key={item.id}
+ >
+
+
+ {item.name}
+
+
+ ))}
+
+
+
+ );
+};
+
+type suggestionType = {
+ id: number;
+ name: string;
+ icon: string;
+};
+
+const suggestions: suggestionType[] = [
+ {
+ id: 1,
+ name: "Stock analysis on ",
+ icon: "/img/stock2.svg",
+ },
+ {
+ id: 2,
+ name: "Help me plan an adventure to ",
+ icon: "/img/hiker.svg",
+ },
+ {
+ id: 3,
+ name: "What are the latest news on ",
+ icon: "/img/news.svg",
+ },
+];
+
+export default Hero;
diff --git a/frontend/nextjs/components/HumanFeedback.tsx b/frontend/nextjs/components/HumanFeedback.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..b86dba50e87dce2cd0f36857a316b861c6a77239
--- /dev/null
+++ b/frontend/nextjs/components/HumanFeedback.tsx
@@ -0,0 +1,44 @@
+// /multi_agents/frontend/components/HumanFeedback.tsx
+
+import React, { useState, useEffect } from 'react';
+
+interface HumanFeedbackProps {
+ websocket: WebSocket | null;
+ onFeedbackSubmit: (feedback: string | null) => void;
+ questionForHuman: boolean;
+}
+
+const HumanFeedback: React.FC = ({ questionForHuman, websocket, onFeedbackSubmit }) => {
+ const [feedbackRequest, setFeedbackRequest] = useState(null);
+ const [userFeedback, setUserFeedback] = useState('');
+
+ const handleSubmit = (e: React.FormEvent) => {
+ e.preventDefault();
+ onFeedbackSubmit(userFeedback === '' ? null : userFeedback);
+ setFeedbackRequest(null);
+ setUserFeedback('');
+ };
+
+ return (
+
+
Human Feedback Required
+
{questionForHuman}
+
+
+ );
+};
+
+export default HumanFeedback;
\ No newline at end of file
diff --git a/frontend/nextjs/components/Images/ImageModal.jsx b/frontend/nextjs/components/Images/ImageModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..100158aa149d13a4d760e4b56ba47a1500b668fa
--- /dev/null
+++ b/frontend/nextjs/components/Images/ImageModal.jsx
@@ -0,0 +1,83 @@
+import React, { useEffect } from 'react';
+
+export default function ImageModal({ imageSrc, isOpen, onClose, onNext, onPrev }) {
+ if (!isOpen) return null;
+
+ // Set up keyboard event listeners
+ useEffect(() => {
+ const handleKeyDown = (e) => {
+ if (e.key === 'ArrowLeft') {
+ onPrev();
+ } else if (e.key === 'ArrowRight') {
+ onNext();
+ } else if (e.key === 'Escape') {
+ onClose();
+ }
+ };
+
+ document.addEventListener('keydown', handleKeyDown);
+ return () => document.removeEventListener('keydown', handleKeyDown);
+ }, [onClose, onNext, onPrev]);
+
+ // Swipe detection for mobile
+ let touchStartX = 0;
+ let touchEndX = 0;
+
+ const handleTouchStart = (e) => {
+ touchStartX = e.changedTouches[0].screenX;
+ };
+
+ const handleTouchEnd = (e) => {
+ touchEndX = e.changedTouches[0].screenX;
+ handleSwipeGesture();
+ };
+
+ const handleSwipeGesture = () => {
+ if (touchEndX < touchStartX - 50) {
+ onNext();
+ } else if (touchEndX > touchStartX + 50) {
+ onPrev();
+ }
+ };
+
+ const handleClose = (e) => {
+ if (e.target === e.currentTarget) {
+ onClose();
+ }
+ };
+
+ return (
+
+
+
+ ←
+
+
+
+ →
+
+
+ ×
+
+
+
+ );
+}
diff --git a/frontend/nextjs/components/Images/ImagesAlbum.jsx b/frontend/nextjs/components/Images/ImagesAlbum.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2519ca799a97e7aca4a037f88a8591feb7902028
--- /dev/null
+++ b/frontend/nextjs/components/Images/ImagesAlbum.jsx
@@ -0,0 +1,75 @@
+import React, { useState, useEffect } from 'react';
+import ImageModal from './ImageModal';
+
+export default function ImagesAlbum({ images }) {
+ const [isModalOpen, setIsModalOpen] = useState(false);
+ const [selectedImage, setSelectedImage] = useState(null);
+ const [selectedIndex, setSelectedIndex] = useState(0);
+ const [validImages, setValidImages] = useState(images);
+
+ const openModal = (image, index) => {
+ setSelectedImage(image);
+ setSelectedIndex(index);
+ setIsModalOpen(true);
+ };
+
+ const closeModal = () => {
+ setIsModalOpen(false);
+ setSelectedImage(null);
+ };
+
+ // Handle navigation in modal
+ const nextImage = () => {
+ setSelectedIndex((prevIndex) => (prevIndex + 1) % validImages.length);
+ setSelectedImage(validImages[(selectedIndex + 1) % validImages.length]);
+ };
+
+ const prevImage = () => {
+ setSelectedIndex((prevIndex) => (prevIndex - 1 + validImages.length) % validImages.length);
+ setSelectedImage(validImages[(selectedIndex - 1 + validImages.length) % validImages.length]);
+ };
+
+ // Handle broken images by filtering them out
+ const handleImageError = (brokenImage) => {
+ setValidImages((prevImages) => prevImages.filter((img) => img !== brokenImage));
+ };
+
+ useEffect(() => {
+ const imagesToHide = []
+ const filteredImages = images.filter((img) => !imagesToHide.includes(img));
+ setValidImages(filteredImages);
+ }, [images]);
+
+ if (validImages.length === 0) return null;
+
+ return (
+
+
+ {validImages.map((image, index) => (
+
+
openModal(image, index)}
+ onError={() => handleImageError(image)}
+ />
+
+ ))}
+
+
+ {selectedImage && (
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/Langgraph/Langgraph.js b/frontend/nextjs/components/Langgraph/Langgraph.js
new file mode 100644
index 0000000000000000000000000000000000000000..e8d47f7e802401e609f0d48b008bd8ecc5e73346
--- /dev/null
+++ b/frontend/nextjs/components/Langgraph/Langgraph.js
@@ -0,0 +1,48 @@
+import { Client } from "@langchain/langgraph-sdk";
+import { task } from '../../config/task';
+
+export async function startLanggraphResearch(newQuestion, report_source, langgraphHostUrl) {
+ // Update the task query with the new question
+ task.task.query = newQuestion;
+ task.task.source = report_source;
+ const host = langgraphHostUrl;
+
+ // Add your Langgraph Cloud Authentication token here
+ const authToken = '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';
+
+ const client = new Client({
+ apiUrl: host,
+ defaultHeaders: {
+ 'Content-Type': 'application/json',
+ 'X-Api-Key': authToken
+ }
+ });
+
+ // List all assistants
+ const assistants = await client.assistants.search({
+ metadata: null,
+ offset: 0,
+ limit: 10,
+ });
+
+ console.log('assistants: ', assistants);
+
+ // We auto-create an assistant for each graph you register in config.
+ const agent = assistants[0];
+
+ // Start a new thread
+ const thread = await client.threads.create();
+
+ // Start a streaming run
+ const input = task;
+
+ const streamResponse = client.runs.stream(
+ thread["thread_id"],
+ agent["assistant_id"],
+ {
+ input,
+ },
+ );
+
+ return {streamResponse, host, thread_id: thread["thread_id"]};
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/LoadingDots.tsx b/frontend/nextjs/components/LoadingDots.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..c79ad823520a0980af47227adbeee8204e3f8ccf
--- /dev/null
+++ b/frontend/nextjs/components/LoadingDots.tsx
@@ -0,0 +1,13 @@
+const LoadingDots = () => {
+ return (
+
+ );
+};
+
+export default LoadingDots;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchBlocks/AccessReport.tsx b/frontend/nextjs/components/ResearchBlocks/AccessReport.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..ba70e33efb4d6db7048c5a3b9e326c1e61a69f72
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/AccessReport.tsx
@@ -0,0 +1,75 @@
+import React from 'react';
+import {getHost} from '../../helpers/getHost'
+
+interface AccessReportProps {
+ accessData: {
+ pdf?: string;
+ docx?: string;
+ json?: string;
+ };
+ chatBoxSettings: {
+ report_type?: string;
+ };
+ report: string;
+}
+
+const AccessReport: React.FC = ({ accessData, chatBoxSettings, report }) => {
+ const host = getHost();
+
+ const getReportLink = (dataType: 'pdf' | 'docx' | 'json'): string => {
+ // Early return if path is not available
+ if (!accessData?.[dataType]) {
+ console.warn(`No ${dataType} path provided`);
+ return '#';
+ }
+
+ const path = accessData[dataType] as string;
+
+ // Clean the path - remove leading/trailing slashes and handle outputs/ prefix
+ const cleanPath = path
+ .trim()
+ .replace(/^\/+|\/+$/g, ''); // Remove leading/trailing slashes
+
+ // Only prepend outputs/ if it's not already there
+ const finalPath = cleanPath.startsWith('outputs/')
+ ? cleanPath
+ : `outputs/${cleanPath}`;
+
+ return `${host}/${finalPath}`;
+ };
+
+ // Safety check for accessData
+ if (!accessData || typeof accessData !== 'object') {
+ return null;
+ }
+
+ return (
+
+ );
+};
+
+export default AccessReport;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchBlocks/Answer.tsx b/frontend/nextjs/components/ResearchBlocks/Answer.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..ef85effb2816313c7729b3c3735dc0a1b43ff0d1
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/Answer.tsx
@@ -0,0 +1,73 @@
+import Image from "next/image";
+import { Toaster, toast } from "react-hot-toast";
+import { useEffect, useState } from 'react';
+import { remark } from 'remark';
+import html from 'remark-html';
+import { Compatible } from "vfile";
+import '@/styles/markdown.css';
+
+export default function Answer({ answer }: { answer: string }) {
+ async function markdownToHtml(markdown: Compatible | undefined) {
+ try {
+ const result = await remark().use(html).process(markdown);
+ return result.toString();
+ } catch (error) {
+ console.error('Error converting Markdown to HTML:', error);
+ return ''; // Handle error gracefully, return empty string or default content
+ }
+ }
+
+ const [htmlContent, setHtmlContent] = useState('');
+
+ useEffect(() => {
+ markdownToHtml(answer).then((html) => setHtmlContent(html));
+ }, [answer]);
+
+ return (
+
+
+
+ {answer && (
+
+ {
+ navigator.clipboard.writeText(answer.trim());
+ toast("Answer copied to clipboard", {
+ icon: "✂️",
+ });
+ }}
+ >
+
+
+
+ )}
+
+
+
+ {answer ? (
+
+ ) : (
+
+ )}
+
+
+
+
+
+ );
+}
diff --git a/frontend/nextjs/components/ResearchBlocks/ImageSection.tsx b/frontend/nextjs/components/ResearchBlocks/ImageSection.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..c10108deadc4ad9a0d576440e2d6d835a7e594f7
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/ImageSection.tsx
@@ -0,0 +1,24 @@
+import Image from "next/image";
+import ImagesAlbum from '../Images/ImagesAlbum';
+
+interface ImageSectionProps {
+ metadata: any;
+}
+
+const ImageSection = ({ metadata }: ImageSectionProps) => {
+ return (
+
+
+
+
+ Related Images
+
+
+
+
+
+
+ );
+};
+
+export default ImageSection;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchBlocks/LogsSection.tsx b/frontend/nextjs/components/ResearchBlocks/LogsSection.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..468f6709048cc3a2241de8a4cec0ecd570511e30
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/LogsSection.tsx
@@ -0,0 +1,44 @@
+import Image from "next/image";
+import LogMessage from './elements/LogMessage';
+import { useEffect, useRef } from 'react';
+
+interface Log {
+ header: string;
+ text: string;
+ metadata: any;
+ key: string;
+}
+
+interface OrderedLogsProps {
+ logs: Log[];
+}
+
+const LogsSection = ({ logs }: OrderedLogsProps) => {
+ const logsContainerRef = useRef(null);
+
+ useEffect(() => {
+ // Scroll to bottom whenever logs change
+ if (logsContainerRef.current) {
+ logsContainerRef.current.scrollTop = logsContainerRef.current.scrollHeight;
+ }
+ }, [logs]); // Dependency on logs array ensures this runs when new logs are added
+
+ return (
+
+
+
+
+ Agent Work
+
+
+
+
+
+
+ );
+};
+
+export default LogsSection;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchBlocks/Question.tsx b/frontend/nextjs/components/ResearchBlocks/Question.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..f194bd38c67ace42201be0f38bd87126dadd4c1c
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/Question.tsx
@@ -0,0 +1,27 @@
+import Image from "next/image";
+
+interface QuestionProps {
+ question: string;
+}
+
+const Question: React.FC = ({ question }) => {
+ return (
+
+ );
+};
+
+export default Question;
diff --git a/frontend/nextjs/components/ResearchBlocks/Sources.tsx b/frontend/nextjs/components/ResearchBlocks/Sources.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..3c08a9795bf6660112c54360da259d2f369b1061
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/Sources.tsx
@@ -0,0 +1,37 @@
+import Image from "next/image";
+import SourceCard from "./elements/SourceCard";
+
+export default function Sources({
+ sources,
+}: {
+ sources: { name: string; url: string }[];
+}) {
+ return (
+
+
+
+
+ sources{" "}
+
+
+
+
+ {sources.length > 0 ? (
+ sources.map((source) => (
+
+ ))
+ ) : (
+ <>
+
+
+
+
+
+
+ >
+ )}
+
+
+
+ );
+}
diff --git a/frontend/nextjs/components/ResearchBlocks/elements/InputArea.tsx b/frontend/nextjs/components/ResearchBlocks/elements/InputArea.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..db143001b4892d5958a6d7e07c5cf972d5e2e3bd
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/elements/InputArea.tsx
@@ -0,0 +1,131 @@
+import Image from "next/image";
+import { FC, useRef } from "react";
+import TypeAnimation from "../../TypeAnimation";
+
+type TInputAreaProps = {
+ promptValue: string;
+ setPromptValue: React.Dispatch>;
+ handleSubmit: (query: string) => void;
+ handleSecondary?: (query: string) => void;
+ disabled?: boolean;
+ reset?: () => void;
+ isStopped?: boolean;
+};
+
+// Debounce function to limit the rate at which a function can fire
+function debounce(func: Function, wait: number) {
+ let timeout: NodeJS.Timeout | undefined;
+ return function executedFunction(...args: any[]) {
+ const later = () => {
+ clearTimeout(timeout);
+ func(...args);
+ };
+ clearTimeout(timeout);
+ timeout = setTimeout(later, wait);
+ };
+}
+
+const InputArea: FC = ({
+ promptValue,
+ setPromptValue,
+ handleSubmit,
+ handleSecondary,
+ disabled,
+ reset,
+ isStopped,
+}) => {
+ // Only show input if not stopped
+ if (isStopped) {
+ return null;
+ }
+
+ const placeholder = handleSecondary
+ ? "Any questions about this report?"
+ : "What would you like to research next?";
+
+ const textareaRef = useRef(null);
+
+ const resetHeight = () => {
+ if (textareaRef.current) {
+ textareaRef.current.style.height = '3em'; // Reset to base height
+ }
+ };
+
+ const handleKeyDown = (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter') {
+ if (e.shiftKey) {
+ return; // Allow new line on Shift+Enter
+ } else {
+ e.preventDefault();
+ if (!disabled) {
+ if (reset) reset();
+ handleSubmit(promptValue);
+ setPromptValue(''); // Clear prompt value
+ resetHeight(); // Reset height after submit
+ }
+ }
+ }
+ };
+
+ // Debounced version of the height adjustment function
+ const adjustHeight = debounce((target: HTMLTextAreaElement) => {
+ target.style.height = 'auto'; // Reset height to auto to allow shrinking
+ target.style.height = `${target.scrollHeight}px`; // Adjust height
+ }, 100); // Adjust the delay as needed
+
+ const handleTextareaChange = (e: React.ChangeEvent) => {
+ const target = e.target;
+ adjustHeight(target); // Use debounced function
+ setPromptValue(target.value);
+ };
+
+ return (
+ {
+ e.preventDefault();
+ if (reset) reset();
+ handleSubmit(promptValue);
+ setPromptValue(''); // Clear prompt value
+ resetHeight();
+ }}
+ >
+
+
+
+ {disabled && (
+
+
+
+ )}
+
+
+
+
+ );
+};
+
+export default InputArea;
diff --git a/frontend/nextjs/components/ResearchBlocks/elements/LogMessage.tsx b/frontend/nextjs/components/ResearchBlocks/elements/LogMessage.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..17c995df2cf20990422e928ddaecb6f13a6ab777
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/elements/LogMessage.tsx
@@ -0,0 +1,98 @@
+// LogMessage.tsx
+import Accordion from '../../Task/Accordion';
+import { useEffect, useState } from 'react';
+import { remark } from 'remark';
+import html from 'remark-html';
+import ImagesAlbum from '../../Images/ImagesAlbum';
+import Image from "next/image";
+
+type ProcessedData = {
+ field: string;
+ htmlContent: string;
+ isMarkdown: boolean;
+};
+
+type Log = {
+ header: string;
+ text: string;
+ processedData?: ProcessedData[];
+ metadata?: any;
+};
+
+interface LogMessageProps {
+ logs: Log[];
+}
+
+const LogMessage: React.FC = ({ logs }) => {
+ const [processedLogs, setProcessedLogs] = useState([]);
+
+ useEffect(() => {
+ const processLogs = async () => {
+ if (!logs) return;
+
+ const newLogs = await Promise.all(
+ logs.map(async (log) => {
+ try {
+ if (log.header === 'differences' && log.text) {
+ const data = JSON.parse(log.text).data;
+ const processedData = await Promise.all(
+ Object.keys(data).map(async (field) => {
+ const fieldValue = data[field].after || data[field].before;
+ if (!plainTextFields.includes(field)) {
+ const htmlContent = await markdownToHtml(fieldValue);
+ return { field, htmlContent, isMarkdown: true };
+ }
+ return { field, htmlContent: fieldValue, isMarkdown: false };
+ })
+ );
+ return { ...log, processedData };
+ }
+ return log;
+ } catch (error) {
+ console.error('Error processing log:', error);
+ return log;
+ }
+ })
+ );
+ setProcessedLogs(newLogs);
+ };
+
+ processLogs();
+ }, [logs]);
+
+ return (
+ <>
+ {processedLogs.map((log, index) => {
+ if (log.header === 'subquery_context_window' || log.header === 'differences') {
+ return ;
+ } else if (log.header !== 'selected_images' && log.header !== 'scraping_images') {
+ return (
+
+ );
+ }
+ return null;
+ })}
+ >
+ );
+};
+
+const markdownToHtml = async (markdown: string): Promise => {
+ try {
+ const result = await remark().use(html).process(markdown);
+ return result.toString();
+ } catch (error) {
+ console.error('Error converting Markdown to HTML:', error);
+ return ''; // Handle error gracefully, return empty string or default content
+ }
+};
+
+const plainTextFields = ['task', 'sections', 'headers', 'sources', 'research_data'];
+
+export default LogMessage;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchBlocks/elements/SourceCard.tsx b/frontend/nextjs/components/ResearchBlocks/elements/SourceCard.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..1b1826f592368c40bf15f48b770aaba7d020646b
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/elements/SourceCard.tsx
@@ -0,0 +1,40 @@
+import Image from "next/image";
+import { useState } from "react";
+
+const SourceCard = ({ source }: { source: { name: string; url: string } }) => {
+ const [imageSrc, setImageSrc] = useState(`https://www.google.com/s2/favicons?domain=${source.url}&sz=128`);
+
+ const handleImageError = () => {
+ setImageSrc("/img/globe.svg");
+ };
+
+ return (
+
+ );
+};
+
+export default SourceCard;
diff --git a/frontend/nextjs/components/ResearchBlocks/elements/SubQuestions.tsx b/frontend/nextjs/components/ResearchBlocks/elements/SubQuestions.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..114e416b5aa523fd9e6b387c31ab2dbd6a3a71da
--- /dev/null
+++ b/frontend/nextjs/components/ResearchBlocks/elements/SubQuestions.tsx
@@ -0,0 +1,42 @@
+import Image from "next/image";
+
+interface SubQuestionsProps {
+ metadata: string[];
+ handleClickSuggestion: (value: string) => void;
+}
+
+const SubQuestions: React.FC = ({ metadata, handleClickSuggestion }) => {
+ return (
+
+
+
+
+
+
+ Pondering your question from several angles
+
+
+ {metadata.map((item, subIndex) => (
+
handleClickSuggestion(item)}
+ key={`${item}-${subIndex}`}
+ >
+
+ {item}
+
+
+ ))}
+
+
+
+ );
+};
+
+export default SubQuestions;
\ No newline at end of file
diff --git a/frontend/nextjs/components/ResearchResults.tsx b/frontend/nextjs/components/ResearchResults.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..248868f9191110a197b59ff78ccafc3091518540
--- /dev/null
+++ b/frontend/nextjs/components/ResearchResults.tsx
@@ -0,0 +1,81 @@
+import React from 'react';
+import Question from './ResearchBlocks/Question';
+import Answer from './ResearchBlocks/Answer';
+import Sources from './ResearchBlocks/Sources';
+import ImageSection from './ResearchBlocks/ImageSection';
+import SubQuestions from './ResearchBlocks/elements/SubQuestions';
+import LogsSection from './ResearchBlocks/LogsSection';
+import AccessReport from './ResearchBlocks/AccessReport';
+import { preprocessOrderedData } from '../utils/dataProcessing';
+import { Data } from '../types/data';
+
+interface ResearchResultsProps {
+ orderedData: Data[];
+ answer: string;
+ allLogs: any[];
+ chatBoxSettings: any;
+ handleClickSuggestion: (value: string) => void;
+}
+
+export const ResearchResults: React.FC = ({
+ orderedData,
+ answer,
+ allLogs,
+ chatBoxSettings,
+ handleClickSuggestion
+}) => {
+ const groupedData = preprocessOrderedData(orderedData);
+ const pathData = groupedData.find(data => data.type === 'path');
+ const initialQuestion = groupedData.find(data => data.type === 'question');
+
+ const chatComponents = groupedData
+ .filter(data => {
+ if (data.type === 'question' && data === initialQuestion) {
+ return false;
+ }
+ return (data.type === 'question' || data.type === 'chat');
+ })
+ .map((data, index) => {
+ if (data.type === 'question') {
+ return ;
+ } else {
+ return ;
+ }
+ });
+
+ const sourceComponents = groupedData
+ .filter(data => data.type === 'sourceBlock')
+ .map((data, index) => (
+
+ ));
+
+ const imageComponents = groupedData
+ .filter(data => data.type === 'imagesBlock')
+ .map((data, index) => (
+
+ ));
+
+ const initialReport = groupedData.find(data => data.type === 'reportBlock');
+ const finalReport = groupedData
+ .filter(data => data.type === 'reportBlock')
+ .pop();
+ const subqueriesComponent = groupedData.find(data => data.content === 'subqueries');
+
+ return (
+ <>
+ {orderedData.length > 0 && }
+ {initialQuestion && }
+ {subqueriesComponent && (
+
+ )}
+ {sourceComponents}
+ {imageComponents}
+ {finalReport && }
+ {pathData && }
+ {chatComponents}
+ >
+ );
+};
\ No newline at end of file
diff --git a/frontend/nextjs/components/Settings/App.css b/frontend/nextjs/components/Settings/App.css
new file mode 100644
index 0000000000000000000000000000000000000000..905420aaa7314a4e591bf5688c3af45d171422a6
--- /dev/null
+++ b/frontend/nextjs/components/Settings/App.css
@@ -0,0 +1,215 @@
+@keyframes gradientBG {
+ 0% {
+ background-position: 0% 50%;
+ }
+ 50% {
+ background-position: 100% 50%;
+ }
+ 100% {
+ background-position: 0% 50%;
+ }
+}
+
+.tabs {
+ display: flex;
+ justify-content: space-around;
+ margin-bottom: 1rem;
+}
+
+.tab-button {
+ padding: 0.5rem 1rem;
+ border: none;
+ background: none;
+ cursor: pointer;
+ font-size: 1rem;
+ transition: all 0.3s ease-in-out;
+}
+
+.tab-button:hover {
+ opacity: 0.8;
+}
+
+.tab-button.active {
+ background-image: linear-gradient(to right, #9867F0, #ED4E50);
+ color: white;
+ border-radius: 5px;
+}
+
+.settings html {
+ scroll-behavior: smooth;
+}
+
+.settings body {
+ font-family: 'Montserrat', sans-serif;
+ color: #fff;
+ line-height: 1.6;
+ background-size: 200% 200%;
+ background-image: linear-gradient(45deg, #151A2D, #2D284D, #151A2D);
+ animation: gradientBG 10s ease infinite;
+}
+
+.settings .landing {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 50vh; /* Adjusted height */
+ text-align: center;
+}
+
+.settings .landing h1 {
+ font-size: 3.5rem;
+ font-weight: 700;
+ margin-bottom: 2rem;
+}
+
+.settings .landing p {
+ font-size: 1.5rem;
+ font-weight: 400;
+ max-width: 500px;
+ margin: auto;
+ margin-bottom: 2rem;
+}
+
+.settings .container {
+ max-width: 900px;
+ margin: auto;
+ padding: 20px;
+ background-color: rgba(255, 255, 255, 0.1);
+ border-radius: 12px;
+ box-shadow: 0px 10px 25px rgba(0, 0, 0, 0.1);
+ transition: all .3s ease-in-out;
+ max-height: 80vh; /* Fixed maximum height */
+ overflow-y: auto; /* Enable scrolling if content overflows */
+}
+
+.settings .container:hover {
+ transform: scale(1.01);
+ box-shadow: 0px 15px 30px rgba(0, 0, 0, 0.2);
+}
+
+.settings input,
+.settings select,
+.settings #output,
+.settings #reportContainer {
+ background-color: rgba(0, 0, 0, 0.5); /* Darker background color */
+ border: none;
+ color: #fff; /* White text color */
+ transition: all .3s ease-in-out;
+}
+
+.settings input:hover,
+.settings input:focus,
+.settings select:hover,
+.settings select:focus {
+ background-color: #333; /* Darker hover/focus background color */
+ border: 1px solid rgba(255, 255, 255, 0.5);
+ box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
+ transition: all 0.3s ease-in-out;
+}
+
+.settings .btn-primary {
+ background: linear-gradient(to right, #0062cc, #007bff);
+ border: none;
+ transition: all .3s ease-in-out;
+}
+
+.settings .btn-secondary {
+ background: linear-gradient(to right, #6c757d, #6c757d);
+ border: none;
+ transition: all .3s ease-in-out;
+}
+
+.settings .btn:hover {
+ opacity: 0.8;
+ transform: scale(1.1);
+ box-shadow: 0px 10px 20px rgba(0, 0, 0, 0.3);
+}
+
+.settings .agent_question {
+ font-size: 1.2rem;
+ font-weight: 500;
+ margin-bottom: 0.2rem;
+}
+
+.settings footer {
+ position: fixed;
+ left: 0;
+ bottom: 0;
+ width: 100%;
+ background: linear-gradient(to right, #151A2D, #111827);
+ color: white;
+ text-align: center;
+ padding: 10px 0;
+}
+
+.settings .margin-div {
+ margin-top: 20px;
+ margin-bottom: 20px;
+ padding: 10px;
+}
+
+.settings .agent_response {
+ background-color: #747d8c;
+ margin: 10px;
+ padding: 10px;
+ border-radius: 12px;
+}
+
+.settings #output {
+ height: 150px; /* Adjusted height */
+ font-family: 'Times New Roman', Times, "Courier New", serif;
+ overflow: auto;
+ padding: 10px;
+ margin-bottom: 10px;
+ margin-top: 10px;
+}
+
+.settings #reportContainer {
+ background-color: rgba(255, 255, 255, 0.1);
+ border: none;
+ color: #fff;
+ transition: all .3s ease-in-out;
+ padding: 10px;
+ border-radius: 12px;
+}
+
+/* refactoring inline css */
+.settings .sayGoodbye {
+ background-image: linear-gradient(to right, #9867F0, #ED4E50);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+}
+
+.settings .form-group {
+ display: flex;
+ align-items: center;
+ width: 100%;
+ margin-bottom: 1rem;
+}
+
+.settings .form-group label {
+ flex: 1;
+ margin-right: 1rem;
+}
+
+.settings .form-group input,
+.settings .form-group select {
+ flex: 2;
+ width: 100%;
+ padding: 0.5rem;
+ border-radius: 0.375rem; /* Rounded corners */
+ border: 1px solid rgba(255, 255, 255, 0.5);
+ background-color: rgba(0, 0, 0, 0.5); /* Darker background color */
+ color: #fff; /* White text color */
+ transition: all 0.3s ease-in-out;
+}
+
+.settings .form-group input:hover,
+.settings .form-group input:focus,
+.settings .form-group select:hover,
+.settings .form-group select:focus {
+ background-color: #333; /* Darker hover/focus background color */
+ border: 1px solid rgba(255, 255, 255, 0.5);
+ box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
+ transition: all 0.3s ease-in-out;
+}
diff --git a/frontend/nextjs/components/Settings/ChatBox.tsx b/frontend/nextjs/components/Settings/ChatBox.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..9c4531e0d2a9aa22960d52767048ca79d0a83bfe
--- /dev/null
+++ b/frontend/nextjs/components/Settings/ChatBox.tsx
@@ -0,0 +1,90 @@
+import React, { useState, useEffect } from 'react';
+import ResearchForm from '../Task/ResearchForm';
+import Report from '../Task/Report';
+import AgentLogs from '../Task/AgentLogs';
+import AccessReport from '../ResearchBlocks/AccessReport';
+
+interface ChatBoxSettings {
+ report_source: string;
+ report_type: string;
+ tone: string;
+}
+
+interface ChatBoxProps {
+ chatBoxSettings: ChatBoxSettings;
+ setChatBoxSettings: React.Dispatch>;
+}
+
+interface OutputData {
+ pdf?: string;
+ docx?: string;
+ json?: string;
+}
+
+interface WebSocketMessage {
+ type: 'logs' | 'report' | 'path';
+ output: string | OutputData;
+}
+
+export default function ChatBox({ chatBoxSettings, setChatBoxSettings }: ChatBoxProps) {
+
+ const [agentLogs, setAgentLogs] = useState([]);
+ const [report, setReport] = useState("");
+ const [accessData, setAccessData] = useState({});
+ const [socket, setSocket] = useState(null);
+
+ useEffect(() => {
+ if (typeof window !== 'undefined') {
+ const { protocol, pathname } = window.location;
+ let { host } = window.location;
+ host = host.includes('localhost') ? 'localhost:8000' : host;
+ const ws_uri = `${protocol === 'https:' ? 'wss:' : 'ws:'}//${host}${pathname}ws`;
+ const newSocket = new WebSocket(ws_uri);
+ setSocket(newSocket);
+
+ newSocket.onmessage = (event) => {
+ const data = JSON.parse(event.data) as WebSocketMessage;
+
+ if (data.type === 'logs') {
+ setAgentLogs((prevLogs: any[]) => [...prevLogs, data]);
+ } else if (data.type === 'report') {
+ setReport((prevReport: string) => prevReport + (data.output as string));
+ } else if (data.type === 'path') {
+ const output = data.output as OutputData;
+ setAccessData({
+ ...(output.pdf && { pdf: `outputs/${output.pdf}` }),
+ ...(output.docx && { docx: `outputs/${output.docx}` }),
+ ...(output.json && { json: `outputs/${output.json}` })
+ });
+ }
+ };
+
+ return () => {
+ newSocket.close();
+ };
+ }
+ }, []);
+
+ return (
+
+
+
+
+ {agentLogs?.length > 0 ? : ''}
+
+ {report ?
: ''}
+ {Object.keys(accessData).length > 0 &&
+
+ }
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/Settings/FileUpload.tsx b/frontend/nextjs/components/Settings/FileUpload.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..4aa1d5df42ca32e0d9139236ba0192d2d1c582e5
--- /dev/null
+++ b/frontend/nextjs/components/Settings/FileUpload.tsx
@@ -0,0 +1,81 @@
+import React, { useCallback, useEffect, useState } from "react";
+import axios from 'axios';
+import { useDropzone } from 'react-dropzone';
+import {getHost} from "@/helpers/getHost"
+
+const FileUpload = () => {
+ const [files, setFiles] = useState([]);
+ const host = getHost();
+
+ const fetchFiles = useCallback(async () => {
+ try {
+ const response = await axios.get(`${host}/files/`);
+ setFiles(response.data.files);
+ } catch (error) {
+ console.error('Error fetching files:', error);
+ }
+ }, [host]);
+
+ useEffect(() => {
+ fetchFiles();
+ }, [fetchFiles]);
+
+ const onDrop = async (acceptedFiles: any[]) => {
+ const formData = new FormData();
+ acceptedFiles.forEach(file => {
+ formData.append('file', file);
+ });
+
+ try {
+ await axios.post(`${host}/upload/`, formData, {
+ headers: {
+ 'Content-Type': 'multipart/form-data'
+ }
+ });
+ fetchFiles();
+ } catch (error) {
+ console.error('Error uploading files:', error);
+ }
+ };
+
+ const deleteFile = async (filename: never) => {
+ try {
+ await axios.delete(`${host}/files/${filename}`);
+ fetchFiles();
+ } catch (error) {
+ console.error('Error deleting file:', error);
+ }
+ };
+
+ const { getRootProps, getInputProps } = useDropzone({ onDrop });
+
+ return (
+
+
+
+
Drag 'n' drop some files here, or click to select files
+
+ {files.length > 0 && (
+ <>
+
Uploaded Files
+
+ {files.map(file => (
+
+ {file}
+ { e.preventDefault(); deleteFile(file) }}>
+
+
+
+
+
+ ))}
+
+ >
+ )}
+
+ );
+};
+
+export default FileUpload;
\ No newline at end of file
diff --git a/frontend/nextjs/components/Settings/Modal.tsx b/frontend/nextjs/components/Settings/Modal.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..e6ba54164cf71d10d16f4d03562b4d76444ba5b2
--- /dev/null
+++ b/frontend/nextjs/components/Settings/Modal.tsx
@@ -0,0 +1,228 @@
+import React, { useState, useEffect } from "react";
+import './App.css';
+import ChatBox from './ChatBox';
+import axios from 'axios';
+import { getHost } from '../../helpers/getHost';
+
+interface ChatBoxSettings {
+ report_source: string;
+ report_type: string;
+ tone: string;
+}
+
+interface ChatBoxProps {
+ chatBoxSettings: ChatBoxSettings;
+ setChatBoxSettings: React.Dispatch>;
+}
+export default function Modal({ setChatBoxSettings, chatBoxSettings }: ChatBoxProps) {
+ const [showModal, setShowModal] = useState(false);
+ const [activeTab, setActiveTab] = useState('search');
+ const [apiVariables, setApiVariables] = useState({
+ ANTHROPIC_API_KEY: '',
+ TAVILY_API_KEY: '',
+ LANGCHAIN_TRACING_V2: 'true',
+ LANGCHAIN_API_KEY: '',
+ OPENAI_API_KEY: '',
+ DOC_PATH: './my-docs',
+ RETRIEVER: 'tavily', // Set default retriever to Tavily
+ GOOGLE_API_KEY: '',
+ GOOGLE_CX_KEY: '',
+ BING_API_KEY: '',
+ SEARCHAPI_API_KEY: '',
+ SERPAPI_API_KEY: '',
+ SERPER_API_KEY: '',
+ SEARX_URL: '',
+ LANGGRAPH_HOST_URL: ''
+ });
+
+ useEffect(() => {
+ const storedConfig = localStorage.getItem('apiVariables');
+ if (storedConfig) {
+ setApiVariables(JSON.parse(storedConfig));
+ } else {
+ axios.get(`${getHost()}/getConfig`)
+ .then(response => {
+ setApiVariables(response.data);
+ localStorage.setItem('apiVariables', JSON.stringify(response.data));
+ })
+ .catch(error => {
+ console.error('Error fetching config:', error);
+ });
+ }
+ }, [showModal]);
+
+ const handleSaveChanges = () => {
+ setChatBoxSettings(chatBoxSettings);
+ localStorage.setItem('apiVariables', JSON.stringify(apiVariables));
+ setShowModal(false);
+ };
+
+ const handleInputChange = (e: { target: { name: any; value: any; }; }) => {
+ const { name, value } = e.target;
+ setApiVariables(prevState => ({
+ ...prevState,
+ [name]: value
+ }));
+ localStorage.setItem('apiVariables', JSON.stringify({
+ ...apiVariables,
+ [name]: value
+ }));
+ };
+
+ const renderConditionalInputs = () => {
+ switch (apiVariables.RETRIEVER) {
+ case 'google':
+ return (
+ <>
+
+ GOOGLE_API_KEY
+
+
+
+ GOOGLE_CX_KEY
+
+
+ >
+ );
+ case 'bing':
+ return (
+
+ BING_API_KEY
+
+
+ );
+ case 'searchapi':
+ return (
+
+ SEARCHAPI_API_KEY
+
+
+ );
+ case 'serpapi':
+ return (
+
+ SERPAPI_API_KEY
+
+
+ );
+ case 'googleSerp':
+ return (
+
+ SERPER_API_KEY
+
+
+ );
+ case 'searx':
+ return (
+
+ SEARX_URL
+
+
+ );
+ // Add cases for other retrievers if needed
+ default:
+ return null;
+ }
+ };
+
+ return (
+
+
setShowModal(true)}
+ >
+ Preferences
+
+ {showModal ? (
+ <>
+
+
+
+
+
+
+ Save & Close
+
+
+
+
+
+
+ >
+ ) : null}
+
+ );
+}
diff --git a/frontend/nextjs/components/Settings/ToneSelector.tsx b/frontend/nextjs/components/Settings/ToneSelector.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0e86b080292437c339bbda1e9381716703a8c76b
--- /dev/null
+++ b/frontend/nextjs/components/Settings/ToneSelector.tsx
@@ -0,0 +1,30 @@
+import React, { ChangeEvent } from 'react';
+
+interface ToneSelectorProps {
+ tone: string;
+ onToneChange: (event: ChangeEvent) => void;
+}
+export default function ToneSelector({ tone, onToneChange }: ToneSelectorProps) {
+ return (
+
+ Tone
+
+ Objective - Impartial and unbiased presentation of facts and findings
+ Formal - Adheres to academic standards with sophisticated language and structure
+ Analytical - Critical evaluation and detailed examination of data and theories
+ Persuasive - Convincing the audience of a particular viewpoint or argument
+ Informative - Providing clear and comprehensive information on a topic
+ Explanatory - Clarifying complex concepts and processes
+ Descriptive - Detailed depiction of phenomena, experiments, or case studies
+ Critical - Judging the validity and relevance of the research and its conclusions
+ Comparative - Juxtaposing different theories, data, or methods to highlight differences and similarities
+ Speculative - Exploring hypotheses and potential implications or future research directions
+ Reflective - Considering the research process and personal insights or experiences
+ Narrative - Telling a story to illustrate research findings or methodologies
+ Humorous - Light-hearted and engaging, usually to make the content more relatable
+ Optimistic - Highlighting positive findings and potential benefits
+ Pessimistic - Focusing on limitations, challenges, or negative outcomes
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/SimilarTopics.tsx b/frontend/nextjs/components/SimilarTopics.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..4dd27e8a5714798553b338e2f001cca16a5ba406
--- /dev/null
+++ b/frontend/nextjs/components/SimilarTopics.tsx
@@ -0,0 +1,73 @@
+import Image from "next/image";
+
+const SimilarTopics = ({
+ similarQuestions,
+ handleDisplayResult,
+ reset,
+}: {
+ similarQuestions: string[];
+ handleDisplayResult: (item: string) => void;
+ reset: () => void;
+}) => {
+ return (
+
+
+
+
+
+
+
+
+ Similar topics:{" "}
+
+
+
+
+ {similarQuestions.length > 0 ? (
+ similarQuestions.map((item) => (
+
{
+ reset();
+ handleDisplayResult(item);
+ }}
+ >
+
+
+
+
+ {item}
+
+
+ ))
+ ) : (
+ <>
+
+
+
+ >
+ )}
+
+
+
+ );
+};
+
+export default SimilarTopics;
diff --git a/frontend/nextjs/components/Task/Accordion.tsx b/frontend/nextjs/components/Task/Accordion.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..997fc85ccd1db42491d517da39be63d56583f655
--- /dev/null
+++ b/frontend/nextjs/components/Task/Accordion.tsx
@@ -0,0 +1,161 @@
+// Accordion.tsx
+import { useState } from 'react';
+
+type ProcessedData = {
+ field: string;
+ isMarkdown: boolean;
+ htmlContent: string | object;
+};
+
+type Log = {
+ header: string;
+ text: string;
+ processedData?: ProcessedData[];
+};
+
+interface AccordionProps {
+ logs: Log[];
+}
+
+const plainTextFields = ['task', 'sections', 'headers', 'sources', 'research_data'];
+
+const Accordion: React.FC = ({ logs }) => {
+
+ const getLogHeaderText = (log: Log): string => {
+ const regex = /📃 Source: (https?:\/\/[^\s]+)/;
+ const match = log.text.match(regex);
+ let sourceUrl = '';
+
+ if (match) {
+ sourceUrl = match[1];
+ }
+
+ return log.header === 'differences'
+ ? 'The following fields on the Langgraph were updated: ' + Object.keys(JSON.parse(log.text).data).join(', ')
+ : `📄 Retrieved relevant content from the source: ${sourceUrl}`;
+ };
+
+ const renderLogContent = (log: Log) => {
+ if (log.header === 'differences' && log.processedData) {
+ return log.processedData.map((data, index) => (
+
+
{data.field}:
+ {data.isMarkdown ? (
+
+ ) : (
+
+ {typeof data.htmlContent === 'object' ? JSON.stringify(data.htmlContent) : data.htmlContent}
+
+ )}
+
+
+ ));
+ } else {
+ return {log.text}
;
+ }
+ };
+
+ const [openIndex, setOpenIndex] = useState(null);
+
+ const handleToggle = (index: number) => {
+ setOpenIndex(openIndex === index ? null : index);
+ };
+
+ return (
+
+ {logs.map((log, index) => (
+
+
+ handleToggle(index)}
+ aria-expanded={openIndex === index}
+ aria-controls={`accordion-collapse-body-${index}`}
+ >
+ {getLogHeaderText(log)}
+
+
+
+
+
+
+
+ {renderLogContent(log)}
+
+
+
+ ))}
+
+ );
+};
+
+export default Accordion;
\ No newline at end of file
diff --git a/frontend/nextjs/components/Task/AgentLogs.tsx b/frontend/nextjs/components/Task/AgentLogs.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..bac5e8d886cd1833f0696a488811ddff7d2c917a
--- /dev/null
+++ b/frontend/nextjs/components/Task/AgentLogs.tsx
@@ -0,0 +1,16 @@
+export default function AgentLogs({agentLogs}:any){
+ const renderAgentLogs = (agentLogs:any)=>{
+ return agentLogs && agentLogs.map((agentLog:any, index:number)=>{
+ return ({agentLog.output}
)
+ })
+ }
+
+ return (
+
+
Agent Output
+
+ {renderAgentLogs(agentLogs)}
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/nextjs/components/Task/Report.tsx b/frontend/nextjs/components/Task/Report.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..79f039a8b6366d6fa206b44eba9f8100786fdc0a
--- /dev/null
+++ b/frontend/nextjs/components/Task/Report.tsx
@@ -0,0 +1,16 @@
+import React from 'react';
+
+export default function Report({report}:any) {
+
+ return (
+
+
Research Report
+
+ {/* */}
+
+
+ );
+};
\ No newline at end of file
diff --git a/frontend/nextjs/components/Task/ResearchForm.tsx b/frontend/nextjs/components/Task/ResearchForm.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0699a5bd3a090479e30f76456571cd9bdad4c69e
--- /dev/null
+++ b/frontend/nextjs/components/Task/ResearchForm.tsx
@@ -0,0 +1,121 @@
+import React, { useState, useEffect } from "react";
+import FileUpload from "../Settings/FileUpload";
+import ToneSelector from "../Settings/ToneSelector";
+
+interface ChatBoxSettings {
+ report_type: string;
+ report_source: string;
+ tone: string;
+}
+
+interface ResearchFormProps {
+ chatBoxSettings: ChatBoxSettings;
+ setChatBoxSettings: React.Dispatch>;
+ onFormSubmit?: (
+ task: string,
+ reportType: string,
+ reportSource: string,
+ ) => void;
+ defaultReportType: string;
+}
+
+export default function ResearchForm({
+ chatBoxSettings,
+ setChatBoxSettings,
+ onFormSubmit,
+ defaultReportType,
+}: ResearchFormProps) {
+ const [task, setTask] = useState(""); // You can use this to capture any specific task data if needed
+
+ // Destructure necessary fields from chatBoxSettings
+ let { report_type, report_source, tone } = chatBoxSettings;
+
+ const onFormChange = (e: { target: { name: any; value: any } }) => {
+ const { name, value } = e.target;
+ setChatBoxSettings((prevSettings: any) => ({
+ ...prevSettings,
+ [name]: value,
+ }));
+ };
+
+ const onToneChange = (e: { target: { value: any } }) => {
+ const { value } = e.target;
+ setChatBoxSettings((prevSettings: any) => ({
+ ...prevSettings,
+ tone: value,
+ }));
+ };
+
+ const handleSubmit = (e: React.FormEvent) => {
+ e.preventDefault();
+ if (onFormSubmit) {
+ onFormSubmit(task, report_type, report_source); // Trigger the onFormSubmit prop when form is submitted
+ } else {
+ console.warn("onFormSubmit is not defined");
+ }
+ };
+
+ useEffect(() => {
+ // Set default report type only if report_type is empty (initial mount)
+ if (!chatBoxSettings.report_type) {
+ setChatBoxSettings((prevSettings) => ({
+ ...prevSettings,
+ report_type: defaultReportType,
+ }));
+ }
+ }, [defaultReportType, setChatBoxSettings, chatBoxSettings.report_type]);
+
+ return (
+
+
+
+ Report Type{" "}
+
+
+
+ Multi Agents Report
+
+ Summary - Short and fast (~2 min)
+
+
+ Detailed - In depth and longer (~5 min)
+
+
+
+
+
+
+ Report Source{" "}
+
+
+ The Internet
+ My Documents
+ Hybrid
+
+
+ {/* Conditional file upload if the report source is 'local' or 'hybrid' */}
+ {report_source === "local" || report_source === "hybrid" ? (
+
+ ) : null}
+ {/* ToneSelector for changing the tone */}
+
+
+
+ );
+}
diff --git a/frontend/nextjs/components/TypeAnimation.tsx b/frontend/nextjs/components/TypeAnimation.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..a1cb9242012971ee2af49d9b780ab2127f8c16ef
--- /dev/null
+++ b/frontend/nextjs/components/TypeAnimation.tsx
@@ -0,0 +1,11 @@
+const TypeAnimation = () => {
+ return (
+
+
+
+
+
+ );
+};
+
+export default TypeAnimation;
diff --git a/frontend/nextjs/config/task.ts b/frontend/nextjs/config/task.ts
new file mode 100644
index 0000000000000000000000000000000000000000..e0d49c6ceac7c4eea0669da83afad33ebe0c98ea
--- /dev/null
+++ b/frontend/nextjs/config/task.ts
@@ -0,0 +1,37 @@
+export const task = {
+ "task": {
+ "query": "Is AI in a hype cycle?",
+ "include_human_feedback": false,
+ "model": "gpt-4o",
+ "max_sections": 3,
+ "publish_formats": {
+ "markdown": true,
+ "pdf": true,
+ "docx": true
+ },
+ "source": "web",
+ "follow_guidelines": true,
+ "guidelines": [
+ "The report MUST fully answer the original question",
+ "The report MUST be written in apa format",
+ "The report MUST be written in english"
+ ],
+ "verbose": true
+ },
+ "initial_research": "Initial research data here",
+ "sections": ["Section 1", "Section 2"],
+ "research_data": "Research data here",
+ "title": "Research Title",
+ "headers": {
+ "introduction": "Introduction header",
+ "table_of_contents": "Table of Contents header",
+ "conclusion": "Conclusion header",
+ "sources": "Sources header"
+ },
+ "date": "2023-10-01",
+ "table_of_contents": "- Introduction\n- Section 1\n- Section 2\n- Conclusion",
+ "introduction": "Introduction content here",
+ "conclusion": "Conclusion content here",
+ "sources": ["Source 1", "Source 2"],
+ "report": "Full report content here"
+}
\ No newline at end of file
diff --git a/frontend/nextjs/helpers/findDifferences.ts b/frontend/nextjs/helpers/findDifferences.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3049c5bf5ab21ca9a3fa601a3d97888b8d61c8af
--- /dev/null
+++ b/frontend/nextjs/helpers/findDifferences.ts
@@ -0,0 +1,48 @@
+type Value = string | number | boolean | null | undefined | object | Value[]; // Possible value types
+type Changes = { [key: string]: { before: Value; after: Value } | Changes }; // Recursive changes type
+
+function findDifferences>(obj1: T, obj2: T): Changes {
+ // Helper function to check if a value is an object (excluding arrays)
+ function isObject(obj: any): obj is Record {
+ return obj && typeof obj === 'object' && !Array.isArray(obj);
+ }
+
+ // Recursive function to compare two objects and return the differences
+ function compareObjects(o1: Record, o2: Record): Changes {
+ const changes: Changes = {};
+
+ // Iterate over keys in the first object (o1)
+ for (const key in o1) {
+ if (isObject(o1[key]) && isObject(o2[key])) {
+ // Recursively compare nested objects
+ const nestedChanges = compareObjects(o1[key], o2[key]);
+ if (Object.keys(nestedChanges).length > 0) {
+ changes[key] = nestedChanges; // Add nested changes if any
+ }
+ } else if (Array.isArray(o1[key]) && Array.isArray(o2[key])) {
+ // Compare arrays
+ if (o1[key].length !== o2[key].length || o1[key].some((val, index) => val !== o2[key][index])) {
+ changes[key] = { before: o1[key], after: o2[key] };
+ }
+ } else {
+ // Compare primitive values (or any non-object, non-array values)
+ if (o1[key] !== o2[key]) {
+ changes[key] = { before: o1[key], after: o2[key] };
+ }
+ }
+ }
+
+ // Iterate over keys in the second object (o2) to detect new keys
+ for (const key in o2) {
+ if (!(key in o1)) {
+ changes[key] = { before: undefined, after: o2[key] };
+ }
+ }
+
+ return changes; // Return the collected changes
+ }
+
+ return compareObjects(obj1, obj2); // Compare the two input objects
+}
+
+export default findDifferences;
\ No newline at end of file
diff --git a/frontend/nextjs/helpers/getHost.ts b/frontend/nextjs/helpers/getHost.ts
new file mode 100644
index 0000000000000000000000000000000000000000..c882de4a810071f4aaacec31f0628bb2aa4f7a62
--- /dev/null
+++ b/frontend/nextjs/helpers/getHost.ts
@@ -0,0 +1,15 @@
+interface GetHostParams {
+ purpose?: string;
+}
+
+export const getHost = ({ purpose }: GetHostParams = {}): string => {
+ if (typeof window !== 'undefined') {
+ let { host } = window.location;
+ if (purpose === 'langgraph-gui') {
+ return host.includes('localhost') ? 'http%3A%2F%2F127.0.0.1%3A8123' : `https://${host}`;
+ } else {
+ return host.includes('localhost') ? 'http://localhost:8000' : `https://${host}`;
+ }
+ }
+ return '';
+};
\ No newline at end of file
diff --git a/frontend/nextjs/hooks/useWebSocket.ts b/frontend/nextjs/hooks/useWebSocket.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3f6f68fe410583effe7311d763aa428de2e60869
--- /dev/null
+++ b/frontend/nextjs/hooks/useWebSocket.ts
@@ -0,0 +1,75 @@
+import { useRef, useState } from 'react';
+import { Data, ChatBoxSettings, QuestionData } from '../types/data';
+
+export const useWebSocket = (setOrderedData: React.Dispatch>, setAnswer: React.Dispatch>, setLoading: React.Dispatch>, setShowHumanFeedback: React.Dispatch>, setQuestionForHuman: React.Dispatch>) => {
+ const [socket, setSocket] = useState(null);
+ const heartbeatInterval = useRef();
+
+ const initializeWebSocket = (promptValue: string, chatBoxSettings: ChatBoxSettings) => {
+ const storedConfig = localStorage.getItem('apiVariables');
+ const apiVariables = storedConfig ? JSON.parse(storedConfig) : {};
+ const headers = {
+ 'retriever': apiVariables.RETRIEVER,
+ 'langchain_api_key': apiVariables.LANGCHAIN_API_KEY,
+ 'openai_api_key': apiVariables.OPENAI_API_KEY,
+ 'tavily_api_key': apiVariables.TAVILY_API_KEY,
+ 'google_api_key': apiVariables.GOOGLE_API_KEY,
+ 'google_cx_key': apiVariables.GOOGLE_CX_KEY,
+ 'bing_api_key': apiVariables.BING_API_KEY,
+ 'searchapi_api_key': apiVariables.SEARCHAPI_API_KEY,
+ 'serpapi_api_key': apiVariables.SERPAPI_API_KEY,
+ 'serper_api_key': apiVariables.SERPER_API_KEY,
+ 'searx_url': apiVariables.SEARX_URL
+ };
+
+ if (!socket && typeof window !== 'undefined') {
+ const { protocol, pathname } = window.location;
+ let { host } = window.location;
+ host = host.includes('localhost') ? 'localhost:8000' : host;
+ const ws_uri = `${protocol === 'https:' ? 'wss:' : 'ws:'}//${host}${pathname}ws`;
+
+ const newSocket = new WebSocket(ws_uri);
+ setSocket(newSocket);
+
+ newSocket.onmessage = (event) => {
+ const data = JSON.parse(event.data);
+ if (data.type === 'human_feedback' && data.content === 'request') {
+ setQuestionForHuman(data.output);
+ setShowHumanFeedback(true);
+ } else {
+ const contentAndType = `${data.content}-${data.type}`;
+ setOrderedData((prevOrder) => [...prevOrder, { ...data, contentAndType }]);
+
+ if (data.type === 'report') {
+ setAnswer((prev: any) => prev + data.output);
+ } else if (data.type === 'path' || data.type === 'chat') {
+ setLoading(false);
+ }
+ }
+ };
+
+ newSocket.onopen = () => {
+ const { report_type, report_source, tone } = chatBoxSettings;
+ let data = "start " + JSON.stringify({ task: promptValue, report_type, report_source, tone, headers });
+ newSocket.send(data);
+
+ heartbeatInterval.current = window.setInterval(() => {
+ socket?.send('ping');
+ }, 3000); // Send ping every 3 seconds
+ };
+
+ newSocket.onclose = () => {
+ if (heartbeatInterval.current) {
+ clearInterval(heartbeatInterval.current);
+ }
+ setSocket(null);
+ };
+ } else if (socket) {
+ const { report_type, report_source, tone } = chatBoxSettings;
+ let data = "start " + JSON.stringify({ task: promptValue, report_type, report_source, tone, headers });
+ socket.send(data);
+ }
+ };
+
+ return { socket, setSocket, initializeWebSocket };
+};
\ No newline at end of file
diff --git a/frontend/nextjs/next.config.mjs b/frontend/nextjs/next.config.mjs
new file mode 100644
index 0000000000000000000000000000000000000000..9e77aa24ec503471a5163a6bb43f45efbc17945d
--- /dev/null
+++ b/frontend/nextjs/next.config.mjs
@@ -0,0 +1,12 @@
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+ images: {
+ remotePatterns: [
+ {
+ hostname: 'www.google.com',
+ },
+ ],
+ },
+};
+
+export default nextConfig;
diff --git a/frontend/nextjs/nginx/default.conf b/frontend/nextjs/nginx/default.conf
new file mode 100644
index 0000000000000000000000000000000000000000..f71a23ff3c0c08f263af074ce87deea5ce2acfb7
--- /dev/null
+++ b/frontend/nextjs/nginx/default.conf
@@ -0,0 +1,9 @@
+server{
+ listen 3000;
+
+ location / {
+ root /usr/share/nginx/html;
+ index index.html index.htm;
+ try_files $uri $uri/ /index.html;
+ }
+}
\ No newline at end of file
diff --git a/frontend/nextjs/package.json b/frontend/nextjs/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..7fd1aa465eaeb7f56f1a9796c5548b308888dc89
--- /dev/null
+++ b/frontend/nextjs/package.json
@@ -0,0 +1,52 @@
+{
+ "name": "gpt-researcher",
+ "version": "0.1.0",
+ "private": true,
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "start": "next start",
+ "lint": "next lint"
+ },
+ "dependencies": {
+ "@chakra-ui/react": "^2.4.9",
+ "@emotion/react": "^11.10.5",
+ "@emotion/styled": "^11.10.5",
+ "@langchain/langgraph-sdk": "^0.0.1-rc.12",
+ "@mozilla/readability": "^0.5.0",
+ "@testing-library/jest-dom": "^5.16.5",
+ "@testing-library/react": "^13.4.0",
+ "@testing-library/user-event": "^13.5.0",
+ "axios": "^1.3.2",
+ "eventsource-parser": "^1.1.2",
+ "framer-motion": "^9.0.2",
+ "jsdom": "^24.1.0",
+ "next": "14.2.3",
+ "next-plausible": "^3.12.0",
+ "react": "^18",
+ "react-dom": "^18",
+ "react-dropzone": "^14.2.3",
+ "react-hot-toast": "^2.4.1",
+ "react-scripts": "5.0.1",
+ "remark": "^15.0.1",
+ "remark-html": "^16.0.1",
+ "remark-parse": "^11.0.0",
+ "together-ai": "^0.6.0-alpha.3",
+ "web-vitals": "^2.1.4",
+ "zod": "^3.0.0",
+ "zod-to-json-schema": "^3.23.0"
+ },
+ "devDependencies": {
+ "@types/jsdom": "^21.1.6",
+ "@types/node": "^20",
+ "@types/react": "^18",
+ "@types/react-dom": "^18",
+ "eslint": "^8",
+ "eslint-config-next": "14.2.3",
+ "postcss": "^8",
+ "prettier": "^3.2.5",
+ "prettier-plugin-tailwindcss": "^0.6.0",
+ "tailwindcss": "^3.4.1",
+ "typescript": "^5"
+ }
+}
diff --git a/frontend/nextjs/postcss.config.mjs b/frontend/nextjs/postcss.config.mjs
new file mode 100644
index 0000000000000000000000000000000000000000..78ebb47afd77754c040bd20971478487235e5cbb
--- /dev/null
+++ b/frontend/nextjs/postcss.config.mjs
@@ -0,0 +1,8 @@
+/** @type {import('postcss-load-config').Config} */
+const config = {
+ plugins: {
+ tailwindcss: {},
+ },
+};
+
+export default config;
diff --git a/frontend/nextjs/public/favicon.ico b/frontend/nextjs/public/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..0fcac8674eb61c7ed97594696cce8dd65e93e730
Binary files /dev/null and b/frontend/nextjs/public/favicon.ico differ
diff --git a/frontend/nextjs/public/img/F.svg b/frontend/nextjs/public/img/F.svg
new file mode 100644
index 0000000000000000000000000000000000000000..088de425245d355fed248047d2b106facc8a6616
--- /dev/null
+++ b/frontend/nextjs/public/img/F.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/Info.svg b/frontend/nextjs/public/img/Info.svg
new file mode 100644
index 0000000000000000000000000000000000000000..5a1971f7ddb1ab5777c9254b8680f3dabe37ee1c
--- /dev/null
+++ b/frontend/nextjs/public/img/Info.svg
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/W.svg b/frontend/nextjs/public/img/W.svg
new file mode 100644
index 0000000000000000000000000000000000000000..c34f0ed58ae9f5c0d1639fe8f2b36fdb6d06a57c
--- /dev/null
+++ b/frontend/nextjs/public/img/W.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/agents/academicResearchAgentAvatar.png b/frontend/nextjs/public/img/agents/academicResearchAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..6577d8131a150f882ce6874769dc8081b8b3e9c3
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/academicResearchAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:973db7494afb6143526ae2e47db1376286ffae67c16e85c5e3866df3840d7bd7
+size 250609
diff --git a/frontend/nextjs/public/img/agents/businessAnalystAgentAvatar.png b/frontend/nextjs/public/img/agents/businessAnalystAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..1ec0bed00a2264702ffe614b8cf723b2ed1d50d0
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/businessAnalystAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65fbb3bbd3f17a101574519bb84078398df008ee5dd783b0a6579c1f09d4cb99
+size 231610
diff --git a/frontend/nextjs/public/img/agents/computerSecurityanalystAvatar.png b/frontend/nextjs/public/img/agents/computerSecurityanalystAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..52b6d222789147eb3fc51eb2a44ccae523565ffc
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/computerSecurityanalystAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea4da6fe48e41f95cadea74e66ac66a087d9c17e3c3544010c3ec8acf53f5723
+size 187238
diff --git a/frontend/nextjs/public/img/agents/defaultAgentAvatar.JPG b/frontend/nextjs/public/img/agents/defaultAgentAvatar.JPG
new file mode 100644
index 0000000000000000000000000000000000000000..64ea9d9e81bc5934c897486f3e6ee12114a38f47
Binary files /dev/null and b/frontend/nextjs/public/img/agents/defaultAgentAvatar.JPG differ
diff --git a/frontend/nextjs/public/img/agents/financeAgentAvatar.png b/frontend/nextjs/public/img/agents/financeAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..a492cb15dfce14010030caade5e5f19a0868c572
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/financeAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0403d996ca0301365c7ab7c12f1d4f8cf50261196c27d295e5e90ef0f104a99e
+size 238786
diff --git a/frontend/nextjs/public/img/agents/mathAgentAvatar.png b/frontend/nextjs/public/img/agents/mathAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..db9bf7827a916249e0c4b1d5de26669c2f1a8a15
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/mathAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19034bf870cfbe98a62df80db0f9579616f4749525403a7036f5c33e4ef8dac8
+size 229464
diff --git a/frontend/nextjs/public/img/agents/travelAgentAvatar.png b/frontend/nextjs/public/img/agents/travelAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..f265aac8f1d2a08634bbb0bc4ad1087945ba6716
--- /dev/null
+++ b/frontend/nextjs/public/img/agents/travelAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e27aa79f62fc3b466fe6635c80c0034a8bc326f2f2758ee114de4dd83e6bb24
+size 226893
diff --git a/frontend/nextjs/public/img/arrow-circle-up-right.svg b/frontend/nextjs/public/img/arrow-circle-up-right.svg
new file mode 100644
index 0000000000000000000000000000000000000000..30024f7cac9c8f5b7b8f337fa841d7c113fd8197
--- /dev/null
+++ b/frontend/nextjs/public/img/arrow-circle-up-right.svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/arrow-narrow-right.svg b/frontend/nextjs/public/img/arrow-narrow-right.svg
new file mode 100644
index 0000000000000000000000000000000000000000..230c83edfb736bc3a55c0e35b283adc7ab6c6920
--- /dev/null
+++ b/frontend/nextjs/public/img/arrow-narrow-right.svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/browser.svg b/frontend/nextjs/public/img/browser.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2fc00d577ef58c4acd607f26e9d0e6e107b42e3a
--- /dev/null
+++ b/frontend/nextjs/public/img/browser.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/chat-check.svg b/frontend/nextjs/public/img/chat-check.svg
new file mode 100644
index 0000000000000000000000000000000000000000..b0e16373fd2ea1b0a9fdec668d77738ca5b505c6
--- /dev/null
+++ b/frontend/nextjs/public/img/chat-check.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/chat.svg b/frontend/nextjs/public/img/chat.svg
new file mode 100644
index 0000000000000000000000000000000000000000..c1651b5b24a06a03e198886b580fdcb065fcbc0d
--- /dev/null
+++ b/frontend/nextjs/public/img/chat.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/copy-white.svg b/frontend/nextjs/public/img/copy-white.svg
new file mode 100644
index 0000000000000000000000000000000000000000..6d57bef6d10f38e669e429d0e0260d868716ac50
--- /dev/null
+++ b/frontend/nextjs/public/img/copy-white.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/copy.svg b/frontend/nextjs/public/img/copy.svg
new file mode 100644
index 0000000000000000000000000000000000000000..f072d77fe8709ea14a4c9cff09fa83736d36fa02
--- /dev/null
+++ b/frontend/nextjs/public/img/copy.svg
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/dinosaur.svg b/frontend/nextjs/public/img/dinosaur.svg
new file mode 100644
index 0000000000000000000000000000000000000000..908bb3e048243bdbb1b4be231b9095d476124dd3
--- /dev/null
+++ b/frontend/nextjs/public/img/dinosaur.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/discord.svg b/frontend/nextjs/public/img/discord.svg
new file mode 100644
index 0000000000000000000000000000000000000000..9dc8e6753a08313f54c6ddba5e7bba33d1269d19
--- /dev/null
+++ b/frontend/nextjs/public/img/discord.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/docker-blue.svg b/frontend/nextjs/public/img/docker-blue.svg
new file mode 100644
index 0000000000000000000000000000000000000000..871a7a280a0c8dfb2a78b54ae49606f405a02bd7
--- /dev/null
+++ b/frontend/nextjs/public/img/docker-blue.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/docker.svg b/frontend/nextjs/public/img/docker.svg
new file mode 100644
index 0000000000000000000000000000000000000000..ab6025bdfae45ffc9ef1c28604b889fd6cc601c2
--- /dev/null
+++ b/frontend/nextjs/public/img/docker.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/dunk.svg b/frontend/nextjs/public/img/dunk.svg
new file mode 100644
index 0000000000000000000000000000000000000000..29dc148d1c49aaa2758dbc2c16b90d7e402afe54
--- /dev/null
+++ b/frontend/nextjs/public/img/dunk.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/github-blue.svg b/frontend/nextjs/public/img/github-blue.svg
new file mode 100644
index 0000000000000000000000000000000000000000..fe1d560c14e3492667883ebd82439fd6935aadb1
--- /dev/null
+++ b/frontend/nextjs/public/img/github-blue.svg
@@ -0,0 +1 @@
+ Github-color Created with Sketch.
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/github-footer.svg b/frontend/nextjs/public/img/github-footer.svg
new file mode 100644
index 0000000000000000000000000000000000000000..57ab7dedf7ae3cbfeffa327d98fe214fdbbc947c
--- /dev/null
+++ b/frontend/nextjs/public/img/github-footer.svg
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/github.svg b/frontend/nextjs/public/img/github.svg
new file mode 100644
index 0000000000000000000000000000000000000000..ccc45219450422bd28d6437ece0ff2da1f2e45e2
--- /dev/null
+++ b/frontend/nextjs/public/img/github.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
github [#142] Created with Sketch.
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/globe.svg b/frontend/nextjs/public/img/globe.svg
new file mode 100644
index 0000000000000000000000000000000000000000..69bf3896bd8b4413db3707bb0ce75d3f9f6876cc
--- /dev/null
+++ b/frontend/nextjs/public/img/globe.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/gptr-logo.png b/frontend/nextjs/public/img/gptr-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..722557636c225a88511954c68887319b3dedce96
--- /dev/null
+++ b/frontend/nextjs/public/img/gptr-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f435810648e5a47fca5b959ddbd28ecf99334c5b62df5ae1671d527d82ed6c59
+size 221134
diff --git a/frontend/nextjs/public/img/hiker.svg b/frontend/nextjs/public/img/hiker.svg
new file mode 100644
index 0000000000000000000000000000000000000000..7bcfef73ceafa980ee495371b9db32b87547e58b
--- /dev/null
+++ b/frontend/nextjs/public/img/hiker.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/icon _atom_.svg b/frontend/nextjs/public/img/icon _atom_.svg
new file mode 100644
index 0000000000000000000000000000000000000000..d6b5822ad107cd4f45aeff9516e2144daa4dd4f9
--- /dev/null
+++ b/frontend/nextjs/public/img/icon _atom_.svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/icon _dumbell_.svg b/frontend/nextjs/public/img/icon _dumbell_.svg
new file mode 100644
index 0000000000000000000000000000000000000000..186dcd59fae9ed60bfca2df3c327306223d5fb7d
--- /dev/null
+++ b/frontend/nextjs/public/img/icon _dumbell_.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/icon _leaf_.svg b/frontend/nextjs/public/img/icon _leaf_.svg
new file mode 100644
index 0000000000000000000000000000000000000000..26c2881453b8d92d519ccd87059dc94eea5b8b1c
--- /dev/null
+++ b/frontend/nextjs/public/img/icon _leaf_.svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/image.svg b/frontend/nextjs/public/img/image.svg
new file mode 100644
index 0000000000000000000000000000000000000000..33f6fab74e7ca63704b5897ebdeaf75e83704031
--- /dev/null
+++ b/frontend/nextjs/public/img/image.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/indeed.svg b/frontend/nextjs/public/img/indeed.svg
new file mode 100644
index 0000000000000000000000000000000000000000..02d55330db6a0573f350afb6eb47065e11a330cc
--- /dev/null
+++ b/frontend/nextjs/public/img/indeed.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/link.svg b/frontend/nextjs/public/img/link.svg
new file mode 100644
index 0000000000000000000000000000000000000000..5cddec941534e3123b5b7e501c16e53647269788
--- /dev/null
+++ b/frontend/nextjs/public/img/link.svg
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/message-question-circle.svg b/frontend/nextjs/public/img/message-question-circle.svg
new file mode 100644
index 0000000000000000000000000000000000000000..4795c26b41b36dbf154f30deca03033e38dba637
--- /dev/null
+++ b/frontend/nextjs/public/img/message-question-circle.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/news.svg b/frontend/nextjs/public/img/news.svg
new file mode 100644
index 0000000000000000000000000000000000000000..324d3ed323f61bb03ea321fdb2905ca1f6d6f33f
--- /dev/null
+++ b/frontend/nextjs/public/img/news.svg
@@ -0,0 +1,35 @@
+
+
+
+ News Icon
+ This is shape (source) for Clarity vector icon theme for gtk
+
+
+
+ News Icon
+ This is shape (source) for Clarity vector icon theme for gtk
+
+
+ Jakub Jankiewicz
+
+
+
+
+ Jakub Jankiewicz
+
+
+ 2010
+ image/svg+xml
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/search.svg b/frontend/nextjs/public/img/search.svg
new file mode 100644
index 0000000000000000000000000000000000000000..e0d67382fdcb7d443be63b967f5009bcecf9d1ec
--- /dev/null
+++ b/frontend/nextjs/public/img/search.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/share.svg b/frontend/nextjs/public/img/share.svg
new file mode 100644
index 0000000000000000000000000000000000000000..e5fefe3f81fed5531b7c004a088416d100335131
--- /dev/null
+++ b/frontend/nextjs/public/img/share.svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/similarTopics.svg b/frontend/nextjs/public/img/similarTopics.svg
new file mode 100644
index 0000000000000000000000000000000000000000..209e9d809d5e9f97f5e8f62ef9ba8a3c05e1967d
--- /dev/null
+++ b/frontend/nextjs/public/img/similarTopics.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/nextjs/public/img/sources.svg b/frontend/nextjs/public/img/sources.svg
new file mode 100644
index 0000000000000000000000000000000000000000..90b32a552c36b5fb8b1b18d76fb03bec44f1a8c6
--- /dev/null
+++ b/frontend/nextjs/public/img/sources.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/frontend/nextjs/public/img/stock.svg b/frontend/nextjs/public/img/stock.svg
new file mode 100644
index 0000000000000000000000000000000000000000..6e28420ad33e31f993d7c6511b1fa64726a61d72
--- /dev/null
+++ b/frontend/nextjs/public/img/stock.svg
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/stock2.svg b/frontend/nextjs/public/img/stock2.svg
new file mode 100644
index 0000000000000000000000000000000000000000..90c6695ffd536a243686f7106592a4a457e69d15
--- /dev/null
+++ b/frontend/nextjs/public/img/stock2.svg
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/thinking.svg b/frontend/nextjs/public/img/thinking.svg
new file mode 100644
index 0000000000000000000000000000000000000000..f291b7c71b27c48515e0ced821b78d5da1b585cf
--- /dev/null
+++ b/frontend/nextjs/public/img/thinking.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/white-books.svg b/frontend/nextjs/public/img/white-books.svg
new file mode 100644
index 0000000000000000000000000000000000000000..a62b5efcf326ff617288fc444f0ce4e2bc316f67
--- /dev/null
+++ b/frontend/nextjs/public/img/white-books.svg
@@ -0,0 +1 @@
+ books
\ No newline at end of file
diff --git a/frontend/nextjs/public/img/x.svg b/frontend/nextjs/public/img/x.svg
new file mode 100644
index 0000000000000000000000000000000000000000..a0b6afd39ed29741d5125a051a5d058d0dabe887
--- /dev/null
+++ b/frontend/nextjs/public/img/x.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/nextjs/public/next.svg b/frontend/nextjs/public/next.svg
new file mode 100644
index 0000000000000000000000000000000000000000..5174b28c565c285e3e312ec5178be64fbeca8398
--- /dev/null
+++ b/frontend/nextjs/public/next.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/public/vercel.svg b/frontend/nextjs/public/vercel.svg
new file mode 100644
index 0000000000000000000000000000000000000000..d2f84222734f27b623d1c80dda3561b04d1284af
--- /dev/null
+++ b/frontend/nextjs/public/vercel.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/nextjs/styles/markdown.css b/frontend/nextjs/styles/markdown.css
new file mode 100644
index 0000000000000000000000000000000000000000..0461f56a20e712d0f31794a1131b78f530c024f0
--- /dev/null
+++ b/frontend/nextjs/styles/markdown.css
@@ -0,0 +1,111 @@
+.markdown-content {
+ /* Base styles */
+ color: white;
+ font-family: Georgia, 'Times New Roman', Times, serif;
+ font-size: 18px;
+ line-height: 1.6;
+
+ /* Headings */
+ h1, h2, h3, h4, h5, h6 {
+ line-height: 1.2;
+ font-weight: 500;
+ }
+
+ h1 { font-size: 2.5em; }
+ h2 { font-size: 2em; }
+ h3 { font-size: 1.5em; }
+ h4 { font-size: 1.2em; }
+ h5 { font-size: 1.1em; }
+ h6 { font-size: 1em; }
+
+ /* Paragraphs and spacing */
+ p {
+ margin: 0;
+ line-height: 1.6;
+ }
+
+ /* Text formatting */
+ strong, b {
+ font-weight: 600;
+ }
+
+ /* Lists */
+ ul, ol {
+ margin: 0;
+ padding-left: 2em;
+ line-height: 1.6;
+ }
+
+ li {
+ margin: 0;
+ padding-left: 0.5em;
+ }
+
+ ul li {
+ list-style-type: disc;
+ }
+
+ ol li {
+ list-style-type: decimal;
+ }
+
+ /* Links */
+ a {
+ color: rgb(168 85 247);
+ text-decoration: underline;
+ font-weight: 500;
+
+ &:hover {
+ opacity: 0.8;
+ }
+ }
+
+ /* Code blocks */
+ pre {
+ background-color: #1e1e1e;
+ padding: 1em;
+ border-radius: 4px;
+ overflow-x: auto;
+ margin: 1em 0;
+ }
+
+ code {
+ font-family: 'Courier New', Courier, monospace;
+ font-size: 0.9em;
+ padding: 0 0.4em;
+ background-color: #1e1e1e;
+ border-radius: 3px;
+ }
+
+ /* Blockquotes */
+ blockquote {
+ border-left: 4px solid rgb(168 85 247);
+ margin: 0;
+ padding-left: 1em;
+ font-style: italic;
+ }
+
+ /* Tables */
+ table {
+ border-collapse: collapse;
+ width: 100%;
+ margin: 1em 0;
+ }
+
+ th, td {
+ border: 1px solid #444;
+ padding: 0.5em;
+ text-align: left;
+ }
+
+ th {
+ background-color: #333;
+ }
+
+ /* Horizontal rule */
+ hr {
+ border: 0;
+ border-top: 1px solid #444;
+ margin: 1em 0;
+ }
+}
\ No newline at end of file
diff --git a/frontend/nextjs/tailwind.config.ts b/frontend/nextjs/tailwind.config.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6f4bf73c3d603c1e1fd421ecba3187a17d33c690
--- /dev/null
+++ b/frontend/nextjs/tailwind.config.ts
@@ -0,0 +1,31 @@
+import type { Config } from 'tailwindcss';
+
+const config: Config = {
+ content: [
+ './pages/**/*.{js,ts,jsx,tsx,mdx}',
+ './components/**/*.{js,ts,jsx,tsx,mdx}',
+ './app/**/*.{js,ts,jsx,tsx,mdx}',
+ ],
+ theme: {
+ screens: {
+ sm: '640px',
+ md: '768px',
+ lg: '898px',
+ // xl:"1024px"
+ },
+ container: {
+ center: true,
+ },
+ extend: {
+ backgroundImage: {
+ 'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))',
+ 'custom-gradient':
+ 'linear-gradient(150deg, #1B1B16 1.28%, #565646 90.75%)',
+ 'gradient-conic':
+ 'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))',
+ },
+ },
+ },
+ plugins: [],
+};
+export default config;
diff --git a/frontend/nextjs/tsconfig.json b/frontend/nextjs/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5dfef49b8fcb8ff3beae0e978274aaa8f41ee8d
--- /dev/null
+++ b/frontend/nextjs/tsconfig.json
@@ -0,0 +1,26 @@
+{
+ "compilerOptions": {
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./*"]
+ }
+ },
+ "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "components/Task/ImagesCarousel.jsx"],
+ "exclude": ["node_modules"]
+}
diff --git a/frontend/nextjs/types/data.ts b/frontend/nextjs/types/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..43a20bcd9172a37f3974be2eebb2ebd403c35895
--- /dev/null
+++ b/frontend/nextjs/types/data.ts
@@ -0,0 +1,37 @@
+export interface BaseData {
+ type: string;
+}
+
+export interface BasicData extends BaseData {
+ type: 'basic';
+ content: string;
+}
+
+export interface LanggraphButtonData extends BaseData {
+ type: 'langgraphButton';
+ link: string;
+}
+
+export interface DifferencesData extends BaseData {
+ type: 'differences';
+ content: string;
+ output: string;
+}
+
+export interface QuestionData extends BaseData {
+ type: 'question';
+ content: string;
+}
+
+export interface ChatData extends BaseData {
+ type: 'chat';
+ content: string;
+}
+
+export type Data = BasicData | LanggraphButtonData | DifferencesData | QuestionData | ChatData;
+
+export interface ChatBoxSettings {
+ report_source: string;
+ report_type: string;
+ tone: string;
+}
\ No newline at end of file
diff --git a/frontend/nextjs/utils/consolidateBlocks.ts b/frontend/nextjs/utils/consolidateBlocks.ts
new file mode 100644
index 0000000000000000000000000000000000000000..5b9b2669b43aa9bf194c6739e3f69af5a2a19d43
--- /dev/null
+++ b/frontend/nextjs/utils/consolidateBlocks.ts
@@ -0,0 +1,35 @@
+export const consolidateSourceAndImageBlocks = (groupedData: any[]) => {
+ // Consolidate sourceBlocks
+ const consolidatedSourceBlock = {
+ type: 'sourceBlock',
+ items: groupedData
+ .filter(item => item.type === 'sourceBlock')
+ .flatMap(block => block.items || [])
+ .filter((item, index, self) =>
+ index === self.findIndex(t => t.url === item.url)
+ )
+ };
+
+ // Consolidate imageBlocks
+ const consolidatedImageBlock = {
+ type: 'imagesBlock',
+ metadata: groupedData
+ .filter(item => item.type === 'imagesBlock')
+ .flatMap(block => block.metadata || [])
+ };
+
+ // Remove all existing sourceBlocks and imageBlocks
+ groupedData = groupedData.filter(item =>
+ item.type !== 'sourceBlock' && item.type !== 'imagesBlock'
+ );
+
+ // Add consolidated blocks if they have items
+ if (consolidatedSourceBlock.items.length > 0) {
+ groupedData.push(consolidatedSourceBlock);
+ }
+ if (consolidatedImageBlock.metadata.length > 0) {
+ groupedData.push(consolidatedImageBlock);
+ }
+
+ return groupedData;
+};
\ No newline at end of file
diff --git a/frontend/nextjs/utils/dataProcessing.ts b/frontend/nextjs/utils/dataProcessing.ts
new file mode 100644
index 0000000000000000000000000000000000000000..b7c88332f8c6778a4b6088e7bc291941180c58db
--- /dev/null
+++ b/frontend/nextjs/utils/dataProcessing.ts
@@ -0,0 +1,121 @@
+import { Data } from '../types/data';
+import { consolidateSourceAndImageBlocks } from './consolidateBlocks';
+
+export const preprocessOrderedData = (data: Data[]) => {
+ let groupedData: any[] = [];
+ let currentAccordionGroup: any = null;
+ let currentSourceGroup: any = null;
+ let currentReportGroup: any = null;
+ let finalReportGroup: any = null;
+ let sourceBlockEncountered = false;
+ let lastSubqueriesIndex = -1;
+ const seenUrls = new Set();
+ console.log('websocket data before its processed',data)
+
+ data.forEach((item: any) => {
+ const { type, content, metadata, output, link } = item;
+
+ if (type === 'question') {
+ groupedData.push({ type: 'question', content });
+ } else if (type === 'report') {
+ // Start a new report group if we don't have one
+ if (!currentReportGroup) {
+ currentReportGroup = { type: 'reportBlock', content: '' };
+ groupedData.push(currentReportGroup);
+ }
+ currentReportGroup.content += output;
+ } else if (content === 'selected_images') {
+ groupedData.push({ type: 'imagesBlock', metadata });
+ } else if (type === 'logs' && content === 'research_report') {
+ if (!finalReportGroup) {
+ finalReportGroup = { type: 'reportBlock', content: '' };
+ groupedData.push(finalReportGroup);
+ }
+ finalReportGroup.content += output.report;
+ } else if (type === 'langgraphButton') {
+ groupedData.push({ type: 'langgraphButton', link });
+ } else if (type === 'chat') {
+ groupedData.push({ type: 'chat', content: content });
+ } else {
+ if (currentReportGroup) {
+ currentReportGroup = null;
+ }
+
+ if (content === 'subqueries') {
+ if (currentAccordionGroup) {
+ currentAccordionGroup = null;
+ }
+ if (currentSourceGroup) {
+ groupedData.push(currentSourceGroup);
+ currentSourceGroup = null;
+ }
+ groupedData.push(item);
+ lastSubqueriesIndex = groupedData.length - 1;
+ } else if (type === 'sourceBlock') {
+ currentSourceGroup = item;
+ if (lastSubqueriesIndex !== -1) {
+ groupedData.splice(lastSubqueriesIndex + 1, 0, currentSourceGroup);
+ lastSubqueriesIndex = -1;
+ } else {
+ groupedData.push(currentSourceGroup);
+ }
+ sourceBlockEncountered = true;
+ currentSourceGroup = null;
+ } else if (content === 'added_source_url') {
+ if (!currentSourceGroup) {
+ currentSourceGroup = { type: 'sourceBlock', items: [] };
+ }
+
+ if (!seenUrls.has(metadata)) {
+ seenUrls.add(metadata);
+ let hostname = "";
+ try {
+ if (typeof metadata === 'string') {
+ hostname = new URL(metadata).hostname.replace('www.', '');
+ }
+ } catch (e) {
+ hostname = "unknown";
+ }
+ currentSourceGroup.items.push({ name: hostname, url: metadata });
+ }
+
+ // Add this block to ensure the source group is added to groupedData
+ if (currentSourceGroup.items.length > 0 && !groupedData.includes(currentSourceGroup)) {
+ groupedData.push(currentSourceGroup);
+ sourceBlockEncountered = true;
+ }
+ } else if (type !== 'path' && content !== '') {
+ if (sourceBlockEncountered) {
+ if (!currentAccordionGroup) {
+ currentAccordionGroup = { type: 'accordionBlock', items: [] };
+ groupedData.push(currentAccordionGroup);
+ }
+ currentAccordionGroup.items.push(item);
+ } else {
+ groupedData.push(item);
+ }
+ } else {
+ if (currentAccordionGroup) {
+ currentAccordionGroup = null;
+ }
+ if (currentSourceGroup) {
+ currentSourceGroup = null;
+ }
+ if (currentReportGroup) {
+ // Find and remove the previous reportBlock
+ const reportBlockIndex = groupedData.findIndex(
+ item => item === currentReportGroup
+ );
+ if (reportBlockIndex !== -1) {
+ groupedData.splice(reportBlockIndex, 1);
+ }
+ currentReportGroup = null; // Reset the current report group
+ }
+ groupedData.push(item);
+ }
+ }
+ });
+
+ groupedData = consolidateSourceAndImageBlocks(groupedData);
+ return groupedData;
+};
\ No newline at end of file
diff --git a/frontend/pdf_styles.css b/frontend/pdf_styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..d2743e7b0a1e5fbdf4c5cdc21d9622a9216eebed
--- /dev/null
+++ b/frontend/pdf_styles.css
@@ -0,0 +1,53 @@
+body {
+ font-family: 'Libre Baskerville', serif;
+ font-size: 12pt; /* standard size for academic papers */
+ line-height: 1.6; /* for readability */
+ color: #333; /* softer on the eyes than black */
+ background-color: #fff; /* white background */
+ margin: 0;
+ padding: 0;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ font-family: 'Libre Baskerville', serif;
+ color: #000; /* darker than the body text */
+ margin-top: 1em; /* space above headers */
+}
+
+h1 {
+ font-size: 2em; /* make h1 twice the size of the body text */
+}
+
+h2 {
+ font-size: 1.5em;
+}
+
+/* Add some space between paragraphs */
+p {
+ margin-bottom: 1em;
+}
+
+/* Style for blockquotes, often used in academic papers */
+blockquote {
+ font-style: italic;
+ margin: 1em 0;
+ padding: 1em;
+ background-color: #f9f9f9; /* a light grey background */
+}
+
+/* You might want to style tables, figures, etc. too */
+table {
+ border-collapse: collapse;
+ width: 100%;
+}
+
+table, th, td {
+ border: 1px solid #ddd;
+ text-align: left;
+ padding: 8px;
+}
+
+th {
+ background-color: #f2f2f2;
+ color: black;
+}
\ No newline at end of file
diff --git a/frontend/scripts.js b/frontend/scripts.js
new file mode 100644
index 0000000000000000000000000000000000000000..f0c875f12aea4ebf115f0866595138c98b433537
--- /dev/null
+++ b/frontend/scripts.js
@@ -0,0 +1,277 @@
+const GPTResearcher = (() => {
+ const init = () => {
+ // Not sure, but I think it would be better to add event handlers here instead of in the HTML
+ //document.getElementById("startResearch").addEventListener("click", startResearch);
+ document
+ .getElementById('copyToClipboard')
+ .addEventListener('click', copyToClipboard)
+
+ updateState('initial')
+ }
+
+ const changeSource = () => {
+ const report_source = document.querySelector('select[name="report_source"]').value
+ if (report_source === 'sources') {
+ document.getElementById('sources').style.display = 'block'
+ } else {
+ document.getElementById('sources').style.display = 'none'
+ }
+ }
+
+ const startResearch = () => {
+ document.getElementById('output').innerHTML = ''
+ document.getElementById('reportContainer').innerHTML = ''
+
+ const imageContainer = document.getElementById('selectedImagesContainer')
+ imageContainer.innerHTML = ''
+ imageContainer.style.display = 'none'
+
+ updateState('in_progress')
+
+ addAgentResponse({
+ output: '🤔 Thinking about research questions for the task...',
+ })
+
+ listenToSockEvents()
+ }
+
+ const listenToSockEvents = () => {
+ const { protocol, host, pathname } = window.location
+ const ws_uri = `${
+ protocol === 'https:' ? 'wss:' : 'ws:'
+ }//${host}${pathname}ws`
+ const converter = new showdown.Converter()
+ const socket = new WebSocket(ws_uri)
+
+ socket.onmessage = (event) => {
+ const data = JSON.parse(event.data)
+ console.log("Received message:", data); // Debug log
+ if (data.type === 'logs') {
+ addAgentResponse(data)
+ } else if (data.type === 'images') {
+ console.log("Received images:", data); // Debug log
+ displaySelectedImages(data)
+ } else if (data.type === 'report') {
+ writeReport(data, converter)
+ } else if (data.type === 'path') {
+ updateState('finished')
+ updateDownloadLink(data)
+ }
+ }
+
+ socket.onopen = (event) => {
+ const task = document.querySelector('input[name="task"]').value
+ const report_type = document.querySelector(
+ 'select[name="report_type"]'
+ ).value
+ const report_source = document.querySelector(
+ 'select[name="report_source"]'
+ ).value
+ const tone = document.querySelector('select[name="tone"]').value
+ const agent = document.querySelector('input[name="agent"]:checked').value
+ let source_urls = tags
+
+ if (report_source !== 'sources' && source_urls.length > 0) {
+ source_urls = source_urls.slice(0, source_urls.length - 1)
+ }
+
+ const requestData = {
+ task: task,
+ report_type: report_type,
+ report_source: report_source,
+ source_urls: source_urls,
+ tone: tone,
+ agent: agent,
+ }
+
+ socket.send(`start ${JSON.stringify(requestData)}`)
+ }
+ }
+
+ const addAgentResponse = (data) => {
+ const output = document.getElementById('output')
+ output.innerHTML += '' + data.output + '
'
+ output.scrollTop = output.scrollHeight
+ output.style.display = 'block'
+ updateScroll()
+ }
+
+ const writeReport = (data, converter) => {
+ const reportContainer = document.getElementById('reportContainer')
+ const markdownOutput = converter.makeHtml(data.output)
+ reportContainer.innerHTML += markdownOutput
+ updateScroll()
+ }
+
+ const updateDownloadLink = (data) => {
+ if (!data.output) {
+ console.error('No output data received');
+ return;
+ }
+
+ const { pdf, docx, md, json } = data.output;
+ console.log('Received paths:', { pdf, docx, md, json });
+
+ // Helper function to safely update link
+ const updateLink = (id, path) => {
+ const element = document.getElementById(id);
+ if (element && path) {
+ console.log(`Setting ${id} href to:`, path);
+ element.setAttribute('href', path);
+ element.classList.remove('disabled');
+ } else {
+ console.warn(`Either element ${id} not found or path not provided`);
+ }
+ };
+
+ updateLink('downloadLink', pdf);
+ updateLink('downloadLinkWord', docx);
+ updateLink('downloadLinkMd', md);
+ updateLink('downloadLinkJson', json);
+ }
+
+ const updateScroll = () => {
+ window.scrollTo(0, document.body.scrollHeight)
+ }
+
+ const copyToClipboard = () => {
+ const textarea = document.createElement('textarea')
+ textarea.id = 'temp_element'
+ textarea.style.height = 0
+ document.body.appendChild(textarea)
+ textarea.value = document.getElementById('reportContainer').innerText
+ const selector = document.querySelector('#temp_element')
+ selector.select()
+ document.execCommand('copy')
+ document.body.removeChild(textarea)
+ }
+
+ const updateState = (state) => {
+ var status = ''
+ switch (state) {
+ case 'in_progress':
+ status = 'Research in progress...'
+ setReportActionsStatus('disabled')
+ break
+ case 'finished':
+ status = 'Research finished!'
+ setReportActionsStatus('enabled')
+ break
+ case 'error':
+ status = 'Research failed!'
+ setReportActionsStatus('disabled')
+ break
+ case 'initial':
+ status = ''
+ setReportActionsStatus('hidden')
+ break
+ default:
+ setReportActionsStatus('disabled')
+ }
+ document.getElementById('status').innerHTML = status
+ if (document.getElementById('status').innerHTML == '') {
+ document.getElementById('status').style.display = 'none'
+ } else {
+ document.getElementById('status').style.display = 'block'
+ }
+ }
+
+ /**
+ * Shows or hides the download and copy buttons
+ * @param {str} status Kind of hacky. Takes "enabled", "disabled", or "hidden". "Hidden is same as disabled but also hides the div"
+ */
+ const setReportActionsStatus = (status) => {
+ const reportActions = document.getElementById('reportActions')
+ // Disable everything in reportActions until research is finished
+
+ if (status == 'enabled') {
+ reportActions.querySelectorAll('a').forEach((link) => {
+ link.classList.remove('disabled')
+ link.removeAttribute('onclick')
+ reportActions.style.display = 'block'
+ })
+ } else {
+ reportActions.querySelectorAll('a').forEach((link) => {
+ link.classList.add('disabled')
+ link.setAttribute('onclick', 'return false;')
+ })
+ if (status == 'hidden') {
+ reportActions.style.display = 'none'
+ }
+ }
+ }
+
+ const tagsInput = document.getElementById('tags-input');
+ const input = document.getElementById('custom_source');
+
+ const tags = [];
+
+ const addTag = (url) => {
+ if (tags.includes(url)) return;
+ tags.push(url);
+
+ const tagElement = document.createElement('span');
+ tagElement.className = 'tag';
+ tagElement.textContent = url;
+
+ const removeButton = document.createElement('span');
+ removeButton.className = 'remove-tag';
+ removeButton.textContent = 'x';
+ removeButton.onclick = function () {
+ tagsInput.removeChild(tagElement);
+ tags.splice(tags.indexOf(url), 1);
+ };
+
+ tagElement.appendChild(removeButton);
+ tagsInput.insertBefore(tagElement, input);
+ }
+
+ const displaySelectedImages = (data) => {
+ const imageContainer = document.getElementById('selectedImagesContainer')
+ //imageContainer.innerHTML = 'Selected Images '
+ const images = JSON.parse(data.output)
+ console.log("Received images:", images); // Debug log
+ if (images && images.length > 0) {
+ images.forEach(imageUrl => {
+ const imgElement = document.createElement('img')
+ imgElement.src = imageUrl
+ imgElement.alt = 'Research Image'
+ imgElement.style.maxWidth = '200px'
+ imgElement.style.margin = '5px'
+ imgElement.style.cursor = 'pointer'
+ imgElement.onclick = () => showImageDialog(imageUrl)
+ imageContainer.appendChild(imgElement)
+ })
+ imageContainer.style.display = 'block'
+ } else {
+ imageContainer.innerHTML += 'No images found for this research.
'
+ }
+ }
+
+ const showImageDialog = (imageUrl) => {
+ const dialog = document.createElement('div');
+ dialog.className = 'image-dialog';
+
+ const img = document.createElement('img');
+ img.src = imageUrl;
+ img.alt = 'Full-size Research Image';
+
+ const closeBtn = document.createElement('button');
+ closeBtn.textContent = 'Close';
+ closeBtn.onclick = () => document.body.removeChild(dialog);
+
+ dialog.appendChild(img);
+ dialog.appendChild(closeBtn);
+ document.body.appendChild(dialog);
+ }
+
+ document.addEventListener('DOMContentLoaded', init)
+ return {
+ startResearch,
+ copyToClipboard,
+ changeSource,
+ addTag,
+ displaySelectedImages,
+ showImageDialog,
+ }
+})()
diff --git a/frontend/static/academicResearchAgentAvatar.png b/frontend/static/academicResearchAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..6577d8131a150f882ce6874769dc8081b8b3e9c3
--- /dev/null
+++ b/frontend/static/academicResearchAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:973db7494afb6143526ae2e47db1376286ffae67c16e85c5e3866df3840d7bd7
+size 250609
diff --git a/frontend/static/businessAnalystAgentAvatar.png b/frontend/static/businessAnalystAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..1ec0bed00a2264702ffe614b8cf723b2ed1d50d0
--- /dev/null
+++ b/frontend/static/businessAnalystAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65fbb3bbd3f17a101574519bb84078398df008ee5dd783b0a6579c1f09d4cb99
+size 231610
diff --git a/frontend/static/computerSecurityanalystAvatar.png b/frontend/static/computerSecurityanalystAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..52b6d222789147eb3fc51eb2a44ccae523565ffc
--- /dev/null
+++ b/frontend/static/computerSecurityanalystAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea4da6fe48e41f95cadea74e66ac66a087d9c17e3c3544010c3ec8acf53f5723
+size 187238
diff --git a/frontend/static/defaultAgentAvatar.JPG b/frontend/static/defaultAgentAvatar.JPG
new file mode 100644
index 0000000000000000000000000000000000000000..64ea9d9e81bc5934c897486f3e6ee12114a38f47
Binary files /dev/null and b/frontend/static/defaultAgentAvatar.JPG differ
diff --git a/frontend/static/favicon.ico b/frontend/static/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..0fcac8674eb61c7ed97594696cce8dd65e93e730
Binary files /dev/null and b/frontend/static/favicon.ico differ
diff --git a/frontend/static/financeAgentAvatar.png b/frontend/static/financeAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..a492cb15dfce14010030caade5e5f19a0868c572
--- /dev/null
+++ b/frontend/static/financeAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0403d996ca0301365c7ab7c12f1d4f8cf50261196c27d295e5e90ef0f104a99e
+size 238786
diff --git a/frontend/static/gptr-logo.png b/frontend/static/gptr-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..76ec0b5e92c20a788103416236cbf37bc7829be9
Binary files /dev/null and b/frontend/static/gptr-logo.png differ
diff --git a/frontend/static/mathAgentAvatar.png b/frontend/static/mathAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..db9bf7827a916249e0c4b1d5de26669c2f1a8a15
--- /dev/null
+++ b/frontend/static/mathAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19034bf870cfbe98a62df80db0f9579616f4749525403a7036f5c33e4ef8dac8
+size 229464
diff --git a/frontend/static/travelAgentAvatar.png b/frontend/static/travelAgentAvatar.png
new file mode 100644
index 0000000000000000000000000000000000000000..f265aac8f1d2a08634bbb0bc4ad1087945ba6716
--- /dev/null
+++ b/frontend/static/travelAgentAvatar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e27aa79f62fc3b466fe6635c80c0034a8bc326f2f2758ee114de4dd83e6bb24
+size 226893
diff --git a/frontend/styles.css b/frontend/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..1eef58eff87f17a8f3b261d563a69658a1acf89b
--- /dev/null
+++ b/frontend/styles.css
@@ -0,0 +1,261 @@
+@keyframes gradientBG {
+ 0% {
+ background-position: 0 50%;
+ }
+ 50% {
+ background-position: 100% 50%;
+ }
+ 100% {
+ background-position: 0 50%;
+ }
+}
+
+html {
+ scroll-behavior: smooth;
+}
+
+body {
+ font-family: 'Montserrat', sans-serif;
+ color: #fff;
+ line-height: 1.6;
+ background-color: #1e272e;
+}
+
+.landing {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 100vh;
+ text-align: center;
+}
+
+.landing h1 {
+ font-size: 3.5rem;
+ font-weight: 700;
+ margin-bottom: 2rem;
+}
+
+.landing p {
+ font-size: 1.5rem;
+ font-weight: 400;
+ max-width: 1000px;
+ padding: 0 25px 0 25px;
+ margin: auto auto 2rem auto;
+}
+
+.container {
+ padding: 20px;
+ background-color: rgba(255, 255, 255, 0.1);
+ border-radius: 12px;
+ box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
+ transition: all .3s ease-in-out;
+ margin: auto auto 180px auto;
+}
+
+.container:hover {
+ transform: scale(1.01);
+ box-shadow: 0 15px 30px rgba(0, 0, 0, 0.2);
+}
+
+input, select, #output, #reportContainer {
+ background-color: rgba(255, 255, 255, 0.1);
+ border: none;
+ color: #fff;
+ transition: all .3s ease-in-out;
+}
+
+input:hover, input:focus, select:hover, select:focus {
+ background-color: #dfe4ea;
+ border: 1px solid rgba(255, 255, 255, 0.5);
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
+ transition: all 0.3s ease-in-out;
+}
+
+.btn-primary {
+ background: linear-gradient(to right, #0062cc, #007bff);
+ border: none;
+ transition: all .3s ease-in-out;
+}
+
+.btn-secondary {
+ background: linear-gradient(to right, #6c757d, #6c757d);
+ border: none;
+ transition: all .3s ease-in-out;
+}
+
+.btn:hover {
+ opacity: 0.8;
+ transform: scale(1.1);
+ box-shadow: 0 10px 20px rgba(0, 0, 0, 0.3);
+}
+
+.agent_question {
+ font-size: 1.4rem;
+ font-weight: 500;
+ margin-bottom: 0.2rem;
+}
+
+footer {
+ position: fixed;
+ left: 0;
+ bottom: 0;
+ width: 100%;
+ color: white;
+ text-align: center;
+ padding: 10px 0;
+}
+
+footer p {
+ margin-top: 5px;
+ margin-bottom: 0;
+}
+
+.margin-div {
+ margin-top: 20px;
+ margin-bottom: 20px;
+ padding: 25px;
+}
+
+.images_div {
+ padding: 0 25px 0 25px;
+}
+
+.agent_response {
+ background-color: #747d8c;
+ margin: 10px;
+ padding: 10px;
+ border-radius: 12px;
+}
+
+#output {
+ height: 300px;
+ overflow: auto;
+ padding: 10px;
+ margin-bottom: 10px;
+ margin-top: 10px;
+ border-radius: 12px;
+}
+
+#reportContainer {
+ font-family: 'Georgia', 'Times New Roman', Times, "Courier New", serif;
+ font-size: 18px !important;
+ background-color: rgba(255, 255, 255, 0.1);
+ font-family: 'Times New Roman', Times, "Courier New", serif;
+ border: none;
+ color: #fff;
+ transition: all .3s ease-in-out;
+ padding: 25px;
+ border-radius: 12px;
+}
+
+.tags-input {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 5px;
+ border: 1px solid #ccc;
+ padding: 5px;
+ border-radius: 5px;
+}
+
+.tag {
+ background-color: #007bff;
+ color: white;
+ padding: 5px 10px;
+ border-radius: 3px;
+ display: flex;
+ align-items: center;
+}
+
+.tag .remove-tag {
+ margin-left: 10px;
+ cursor: pointer;
+ font-weight: bold;
+}
+
+.tag-input {
+ border: none;
+ outline: none;
+ flex-grow: 1;
+}
+
+footer a {
+ color: #ffffff;
+ font-weight: bold;
+ text-decoration: none;
+}
+
+a:hover {
+ text-decoration: underline;
+}
+
+/* Add or modify these styles at the end of the file */
+#selectedImagesContainer {
+ background-color: rgba(255, 255, 255, 0.1);
+ border-radius: 12px;
+ padding: 15px;
+ margin-bottom: 20px;
+ color: #fff;
+ display: flex;
+ flex-wrap: wrap;
+ gap: 10px;
+ justify-content: center;
+}
+
+#selectedImagesContainer h3 {
+ width: 100%;
+ margin-top: 0;
+ margin-bottom: 10px;
+ color: #fff;
+}
+
+#selectedImagesContainer img {
+ width: 150px;
+ height: 150px;
+ object-fit: cover;
+ cursor: pointer;
+ transition: transform 0.3s ease, box-shadow 0.3s ease;
+ border-radius: 8px;
+}
+
+#selectedImagesContainer img:hover {
+ transform: scale(1.05);
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
+}
+
+.image-dialog {
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(0, 0, 0, 0.8);
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ align-items: center;
+ z-index: 1000;
+}
+
+.image-dialog img {
+ max-width: 90%;
+ max-height: 80%;
+ object-fit: contain;
+ border-radius: 8px;
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
+}
+
+.image-dialog button {
+ margin-top: 20px;
+ padding: 10px 20px;
+ background-color: #007bff;
+ color: white;
+ border: none;
+ border-radius: 5px;
+ cursor: pointer;
+ font-size: 16px;
+ transition: background-color 0.3s ease;
+}
+
+.image-dialog button:hover {
+ background-color: #0056b3;
+}
diff --git a/gpt_researcher/README.md b/gpt_researcher/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..421228ed792a74dd5882ed1e32222bde4cc28c3f
--- /dev/null
+++ b/gpt_researcher/README.md
@@ -0,0 +1,86 @@
+# 🔎 GPT Researcher
+[](https://gptr.dev)
+[](https://discord.com/invite/QgZXvJAccX)
+
+[](https://github.com/assafelovic/gpt-researcher)
+[](https://twitter.com/tavilyai)
+[](https://badge.fury.io/py/gpt-researcher)
+
+**GPT Researcher is an autonomous agent designed for comprehensive online research on a variety of tasks.**
+
+The agent can produce detailed, factual and unbiased research reports, with customization options for focusing on relevant resources, outlines, and lessons. Inspired by the recent [Plan-and-Solve](https://arxiv.org/abs/2305.04091) and [RAG](https://arxiv.org/abs/2005.11401) papers, GPT Researcher addresses issues of speed, determinism and reliability, offering a more stable performance and increased speed through parallelized agent work, as opposed to synchronous operations.
+
+**Our mission is to empower individuals and organizations with accurate, unbiased, and factual information by leveraging the power of AI.**
+
+#### PIP Package
+> **Step 0** - Install Python 3.11 or later. [See here](https://www.tutorialsteacher.com/python/install-python) for a step-by-step guide.
+> **Step 1** - install GPT Researcher package [PyPI page](https://pypi.org/project/gpt-researcher/)
+```bash
+$ pip install gpt-researcher
+```
+> **Step 2** - Create .env file with your OpenAI Key and Tavily API key or simply export it
+```bash
+$ export OPENAI_API_KEY={Your OpenAI API Key here}
+```
+```bash
+$ export TAVILY_API_KEY={Your Tavily API Key here}
+```
+> **Step 3** - Start Coding using GPT Researcher in your own code, example:
+```python
+from gpt_researcher import GPTResearcher
+import asyncio
+
+
+async def get_report(query: str, report_type: str) -> str:
+ researcher = GPTResearcher(query, report_type)
+ report = await researcher.run()
+ return report
+
+if __name__ == "__main__":
+ query = "what team may win the NBA finals?"
+ report_type = "research_report"
+
+ report = asyncio.run(get_report(query, report_type))
+ print(report)
+
+```
+
+### Customize the configuration (optional)
+This will override the default settings with your custom configuration. You can find all available configuration options in the [GPT Researcher documentation](https://docs.gptr.dev/docs/gpt-researcher/gptr/config).
+
+
+#### Using a Custom JSON Configuration
+
+If you want to modify the default configuration of GPT Researcher, you can create a custom JSON configuration file. This allows you to tailor the researcher's behavior to your specific needs. Here's how to do it:
+
+a. Create a JSON file (e.g., `your_config.json`) with your desired settings:
+
+```json
+{
+ "retrievers": ["google"],
+ "fast_llm": "cohere:command",
+ "smart_llm": "cohere:command-nightly",
+ "max_iterations": 3,
+ "max_subtopics": 1
+}
+```
+
+b. When initializing the GPTResearcher, pass the path to your custom configuration file:
+
+```python
+researcher = GPTResearcher(query, report_type, config_path="your_config.json")
+```
+
+#### Using Environment Variables
+
+Alternatively, you can set up the same configuration using environment variables instead of a JSON file. Here's how the example from Part 1 would look in your `.env` file:
+
+```
+RETRIEVERS=google
+FAST_LLM=cohere:command
+SMART_LLM=cohere:command-nightly
+MAX_ITERATIONS=3
+MAX_SUBTOPICS=1
+```
+
+Simply add these lines to your `.env` file, and GPT Researcher will use the environment variables to configure its behavior. This approach provides flexibility when deploying in different environments.
\ No newline at end of file
diff --git a/gpt_researcher/__init__.py b/gpt_researcher/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f749cf7e60a1e83bad167d135d72e5c402bec29
--- /dev/null
+++ b/gpt_researcher/__init__.py
@@ -0,0 +1,3 @@
+from .agent import GPTResearcher
+
+__all__ = ['GPTResearcher']
\ No newline at end of file
diff --git a/gpt_researcher/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7485a14de182febf30a932f0e589e5174574d14f
Binary files /dev/null and b/gpt_researcher/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/__pycache__/agent.cpython-312.pyc b/gpt_researcher/__pycache__/agent.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a717d59d8740647eb3bfd82ffad65be14866c6d8
Binary files /dev/null and b/gpt_researcher/__pycache__/agent.cpython-312.pyc differ
diff --git a/gpt_researcher/__pycache__/prompts.cpython-312.pyc b/gpt_researcher/__pycache__/prompts.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45dd1cb1cd841878ec28a01e2b7c2b34b7c0e98b
Binary files /dev/null and b/gpt_researcher/__pycache__/prompts.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__init__.py b/gpt_researcher/actions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fba61c105b19be2252fb1fccd9360be2d3c48833
--- /dev/null
+++ b/gpt_researcher/actions/__init__.py
@@ -0,0 +1,26 @@
+from .retriever import get_retriever, get_retrievers
+from .query_processing import plan_research_outline
+from .agent_creator import extract_json_with_regex, choose_agent
+from .web_scraping import scrape_urls
+from .report_generation import write_conclusion, summarize_url, generate_draft_section_titles, generate_report, write_report_introduction
+from .markdown_processing import extract_headers, extract_sections, table_of_contents, add_references
+from .utils import stream_output
+
+__all__ = [
+ "get_retriever",
+ "get_retrievers",
+ "plan_research_outline",
+ "extract_json_with_regex",
+ "scrape_urls",
+ "write_conclusion",
+ "summarize_url",
+ "generate_draft_section_titles",
+ "generate_report",
+ "write_report_introduction",
+ "extract_headers",
+ "extract_sections",
+ "table_of_contents",
+ "add_references",
+ "stream_output",
+ "choose_agent"
+]
\ No newline at end of file
diff --git a/gpt_researcher/actions/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/actions/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2eea5cd2fed34024ed6c2dfb4e0f7ee543f50ab8
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/agent_creator.cpython-312.pyc b/gpt_researcher/actions/__pycache__/agent_creator.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20ef45fbf77a8e65c2a938c5bb9ed870118e881a
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/agent_creator.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/markdown_processing.cpython-312.pyc b/gpt_researcher/actions/__pycache__/markdown_processing.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3856ba7f976830f7342719b686d4e7ad8c7d2453
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/markdown_processing.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/query_processing.cpython-312.pyc b/gpt_researcher/actions/__pycache__/query_processing.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91449f90b5951590f86af6494b4039cc4e0a7b28
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/query_processing.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/report_generation.cpython-312.pyc b/gpt_researcher/actions/__pycache__/report_generation.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a810a39b1f2d18cdb803253eadeb6290629ce33
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/report_generation.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/retriever.cpython-312.pyc b/gpt_researcher/actions/__pycache__/retriever.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63d754d933cf36ff576ca12e0503ba0175f21847
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/retriever.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/utils.cpython-312.pyc b/gpt_researcher/actions/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f16b076c4bc238e432dd578c04f53addad0e53a5
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/utils.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/__pycache__/web_scraping.cpython-312.pyc b/gpt_researcher/actions/__pycache__/web_scraping.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b302779e0ca64c58d5066d07071fe879468761fa
Binary files /dev/null and b/gpt_researcher/actions/__pycache__/web_scraping.cpython-312.pyc differ
diff --git a/gpt_researcher/actions/agent_creator.py b/gpt_researcher/actions/agent_creator.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a23e02922f383a1646b033eb56ede63e0b210ab
--- /dev/null
+++ b/gpt_researcher/actions/agent_creator.py
@@ -0,0 +1,74 @@
+import json
+import re
+import json_repair
+from ..utils.llm import create_chat_completion
+from ..prompts import auto_agent_instructions
+
+async def choose_agent(
+ query, cfg, parent_query=None, cost_callback: callable = None, headers=None
+):
+ """
+ Chooses the agent automatically
+ Args:
+ parent_query: In some cases the research is conducted on a subtopic from the main query.
+ The parent query allows the agent to know the main context for better reasoning.
+ query: original query
+ cfg: Config
+ cost_callback: callback for calculating llm costs
+
+ Returns:
+ agent: Agent name
+ agent_role_prompt: Agent role prompt
+ """
+ query = f"{parent_query} - {query}" if parent_query else f"{query}"
+ response = None # Initialize response to ensure it's defined
+
+ try:
+ response = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{auto_agent_instructions()}"},
+ {"role": "user", "content": f"task: {query}"},
+ ],
+ temperature=0.15,
+ llm_provider=cfg.smart_llm_provider,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+
+ agent_dict = json.loads(response)
+ return agent_dict["server"], agent_dict["agent_role_prompt"]
+
+ except Exception as e:
+ print("⚠️ Error in reading JSON, attempting to repair JSON")
+ return await handle_json_error(response)
+
+
+async def handle_json_error(response):
+ try:
+ agent_dict = json_repair.loads(response)
+ if agent_dict.get("server") and agent_dict.get("agent_role_prompt"):
+ return agent_dict["server"], agent_dict["agent_role_prompt"]
+ except Exception as e:
+ print(f"Error using json_repair: {e}")
+
+ json_string = extract_json_with_regex(response)
+ if json_string:
+ try:
+ json_data = json.loads(json_string)
+ return json_data["server"], json_data["agent_role_prompt"]
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {e}")
+
+ print("No JSON found in the string. Falling back to Default Agent.")
+ return "Default Agent", (
+ "You are an AI critical thinker research assistant. Your sole purpose is to write well written, "
+ "critically acclaimed, objective and structured reports on given text."
+ )
+
+
+def extract_json_with_regex(response):
+ json_match = re.search(r"{.*?}", response, re.DOTALL)
+ if json_match:
+ return json_match.group(0)
+ return None
\ No newline at end of file
diff --git a/gpt_researcher/actions/markdown_processing.py b/gpt_researcher/actions/markdown_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..92c65c413ca9c3761f305f9cf007ff294ab413ae
--- /dev/null
+++ b/gpt_researcher/actions/markdown_processing.py
@@ -0,0 +1,112 @@
+import re
+import markdown
+from typing import List, Dict
+
+def extract_headers(markdown_text: str) -> List[Dict]:
+ """
+ Extract headers from markdown text.
+
+ Args:
+ markdown_text (str): The markdown text to process.
+
+ Returns:
+ List[Dict]: A list of dictionaries representing the header structure.
+ """
+ headers = []
+ parsed_md = markdown.markdown(markdown_text)
+ lines = parsed_md.split("\n")
+
+ stack = []
+ for line in lines:
+ if line.startswith(" 2 and line[2].isdigit():
+ level = int(line[2])
+ header_text = line[line.index(">") + 1 : line.rindex("<")]
+
+ while stack and stack[-1]["level"] >= level:
+ stack.pop()
+
+ header = {
+ "level": level,
+ "text": header_text,
+ }
+ if stack:
+ stack[-1].setdefault("children", []).append(header)
+ else:
+ headers.append(header)
+
+ stack.append(header)
+
+ return headers
+
+def extract_sections(markdown_text: str) -> List[Dict[str, str]]:
+ """
+ Extract all written sections from subtopic report.
+
+ Args:
+ markdown_text (str): Subtopic report text.
+
+ Returns:
+ List[Dict[str, str]]: List of sections, each section is a dictionary containing
+ 'section_title' and 'written_content'.
+ """
+ sections = []
+ parsed_md = markdown.markdown(markdown_text)
+
+ pattern = r'(.*?) (.*?)(?=|$)'
+ matches = re.findall(pattern, parsed_md, re.DOTALL)
+
+ for title, content in matches:
+ clean_content = re.sub(r'<.*?>', '', content).strip()
+ if clean_content:
+ sections.append({
+ "section_title": title.strip(),
+ "written_content": clean_content
+ })
+
+ return sections
+
+def table_of_contents(markdown_text: str) -> str:
+ """
+ Generate a table of contents for the given markdown text.
+
+ Args:
+ markdown_text (str): The markdown text to process.
+
+ Returns:
+ str: The generated table of contents.
+ """
+ def generate_table_of_contents(headers, indent_level=0):
+ toc = ""
+ for header in headers:
+ toc += " " * (indent_level * 4) + "- " + header["text"] + "\n"
+ if "children" in header:
+ toc += generate_table_of_contents(header["children"], indent_level + 1)
+ return toc
+
+ try:
+ headers = extract_headers(markdown_text)
+ toc = "## Table of Contents\n\n" + generate_table_of_contents(headers)
+ return toc
+ except Exception as e:
+ print("table_of_contents Exception : ", e)
+ return markdown_text
+
+def add_references(report_markdown: str, visited_urls: set) -> str:
+ """
+ Add references to the markdown report.
+
+ Args:
+ report_markdown (str): The existing markdown report.
+ visited_urls (set): A set of URLs that have been visited during research.
+
+ Returns:
+ str: The updated markdown report with added references.
+ """
+ try:
+ url_markdown = "\n\n\n## References\n\n"
+ url_markdown += "".join(f"- [{url}]({url})\n" for url in visited_urls)
+ updated_markdown_report = report_markdown + url_markdown
+ return updated_markdown_report
+ except Exception as e:
+ print(f"Encountered exception in adding source urls : {e}")
+ return report_markdown
\ No newline at end of file
diff --git a/gpt_researcher/actions/query_processing.py b/gpt_researcher/actions/query_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..320a4b44265d43618b01506f2084cd351a082906
--- /dev/null
+++ b/gpt_researcher/actions/query_processing.py
@@ -0,0 +1,128 @@
+import json_repair
+from ..utils.llm import create_chat_completion
+from ..prompts import generate_search_queries_prompt
+from typing import Any, List, Dict
+from ..config import Config
+import logging
+
+logger = logging.getLogger(__name__)
+
+async def get_search_results(query: str, retriever: Any) -> List[Dict[str, Any]]:
+ """
+ Get web search results for a given query.
+
+ Args:
+ query: The search query
+ retriever: The retriever instance
+
+ Returns:
+ A list of search results
+ """
+ search_retriever = retriever(query)
+ return search_retriever.search()
+
+async def generate_sub_queries(
+ query: str,
+ parent_query: str,
+ report_type: str,
+ context: List[Dict[str, Any]],
+ cfg: Config,
+ cost_callback: callable = None
+) -> List[str]:
+ """
+ Generate sub-queries using the specified LLM model.
+
+ Args:
+ query: The original query
+ parent_query: The parent query
+ report_type: The type of report
+ max_iterations: Maximum number of research iterations
+ context: Search results context
+ cfg: Configuration object
+ cost_callback: Callback for cost calculation
+
+ Returns:
+ A list of sub-queries
+ """
+ gen_queries_prompt = generate_search_queries_prompt(
+ query,
+ parent_query,
+ report_type,
+ max_iterations=cfg.max_iterations or 1,
+ context=context
+ )
+
+ try:
+ response = await create_chat_completion(
+ model=cfg.strategic_llm_model,
+ messages=[{"role": "user", "content": gen_queries_prompt}],
+ temperature=1,
+ llm_provider=cfg.strategic_llm_provider,
+ max_tokens=None,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ except Exception as e:
+ logger.warning(f"Error with strategic LLM: {e}. Retrying with max_tokens={cfg.strategic_token_limit}.")
+ logger.warning(f"See https://github.com/assafelovic/gpt-researcher/issues/1022")
+ try:
+ response = await create_chat_completion(
+ model=cfg.strategic_llm_model,
+ messages=[{"role": "user", "content": gen_queries_prompt}],
+ temperature=1,
+ llm_provider=cfg.strategic_llm_provider,
+ max_tokens=cfg.strategic_token_limit,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ logger.warning(f"Retrying with max_tokens={cfg.strategic_token_limit} successful.")
+ except Exception as e:
+ logger.warning(f"Retrying with max_tokens={cfg.strategic_token_limit} failed.")
+ logger.warning(f"Error with strategic LLM: {e}. Falling back to smart LLM.")
+ response = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages=[{"role": "user", "content": gen_queries_prompt}],
+ temperature=cfg.temperature,
+ max_tokens=cfg.smart_token_limit,
+ llm_provider=cfg.smart_llm_provider,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+
+ return json_repair.loads(response)
+
+async def plan_research_outline(
+ query: str,
+ search_results: List[Dict[str, Any]],
+ agent_role_prompt: str,
+ cfg: Config,
+ parent_query: str,
+ report_type: str,
+ cost_callback: callable = None,
+) -> List[str]:
+ """
+ Plan the research outline by generating sub-queries.
+
+ Args:
+ query: Original query
+ retriever: Retriever instance
+ agent_role_prompt: Agent role prompt
+ cfg: Configuration object
+ parent_query: Parent query
+ report_type: Report type
+ cost_callback: Callback for cost calculation
+
+ Returns:
+ A list of sub-queries
+ """
+
+ sub_queries = await generate_sub_queries(
+ query,
+ parent_query,
+ report_type,
+ search_results,
+ cfg,
+ cost_callback
+ )
+
+ return sub_queries
diff --git a/gpt_researcher/actions/report_generation.py b/gpt_researcher/actions/report_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a34db268e087990c383b331f17cb254c3623e99
--- /dev/null
+++ b/gpt_researcher/actions/report_generation.py
@@ -0,0 +1,266 @@
+import asyncio
+from typing import List, Dict, Any
+from ..config.config import Config
+from ..utils.llm import create_chat_completion
+from ..utils.logger import get_formatted_logger
+from ..prompts import (
+ generate_report_introduction,
+ generate_draft_titles_prompt,
+ generate_report_conclusion,
+ get_prompt_by_report_type,
+)
+from ..utils.enum import Tone
+
+logger = get_formatted_logger()
+
+
+async def write_report_introduction(
+ query: str,
+ context: str,
+ agent_role_prompt: str,
+ config: Config,
+ websocket=None,
+ cost_callback: callable = None
+) -> str:
+ """
+ Generate an introduction for the report.
+
+ Args:
+ query (str): The research query.
+ context (str): Context for the report.
+ role (str): The role of the agent.
+ config (Config): Configuration object.
+ websocket: WebSocket connection for streaming output.
+ cost_callback (callable, optional): Callback for calculating LLM costs.
+
+ Returns:
+ str: The generated introduction.
+ """
+ try:
+ introduction = await create_chat_completion(
+ model=config.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{agent_role_prompt}"},
+ {"role": "user", "content": generate_report_introduction(
+ query, context)},
+ ],
+ temperature=0.25,
+ llm_provider=config.smart_llm_provider,
+ stream=True,
+ websocket=websocket,
+ max_tokens=config.smart_token_limit,
+ llm_kwargs=config.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ return introduction
+ except Exception as e:
+ logger.error(f"Error in generating report introduction: {e}")
+ return ""
+
+
+async def write_conclusion(
+ query: str,
+ context: str,
+ agent_role_prompt: str,
+ config: Config,
+ websocket=None,
+ cost_callback: callable = None
+) -> str:
+ """
+ Write a conclusion for the report.
+
+ Args:
+ query (str): The research query.
+ context (str): Context for the report.
+ role (str): The role of the agent.
+ config (Config): Configuration object.
+ websocket: WebSocket connection for streaming output.
+ cost_callback (callable, optional): Callback for calculating LLM costs.
+
+ Returns:
+ str: The generated conclusion.
+ """
+ try:
+ conclusion = await create_chat_completion(
+ model=config.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{agent_role_prompt}"},
+ {"role": "user", "content": generate_report_conclusion(query, context)},
+ ],
+ temperature=0.25,
+ llm_provider=config.smart_llm_provider,
+ stream=True,
+ websocket=websocket,
+ max_tokens=config.smart_token_limit,
+ llm_kwargs=config.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ return conclusion
+ except Exception as e:
+ logger.error(f"Error in writing conclusion: {e}")
+ return ""
+
+
+async def summarize_url(
+ url: str,
+ content: str,
+ role: str,
+ config: Config,
+ websocket=None,
+ cost_callback: callable = None
+) -> str:
+ """
+ Summarize the content of a URL.
+
+ Args:
+ url (str): The URL to summarize.
+ content (str): The content of the URL.
+ role (str): The role of the agent.
+ config (Config): Configuration object.
+ websocket: WebSocket connection for streaming output.
+ cost_callback (callable, optional): Callback for calculating LLM costs.
+
+ Returns:
+ str: The summarized content.
+ """
+ try:
+ summary = await create_chat_completion(
+ model=config.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{role}"},
+ {"role": "user", "content": f"Summarize the following content from {url}:\n\n{content}"},
+ ],
+ temperature=0.25,
+ llm_provider=config.smart_llm_provider,
+ stream=True,
+ websocket=websocket,
+ max_tokens=config.smart_token_limit,
+ llm_kwargs=config.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ return summary
+ except Exception as e:
+ logger.error(f"Error in summarizing URL: {e}")
+ return ""
+
+
+async def generate_draft_section_titles(
+ query: str,
+ current_subtopic: str,
+ context: str,
+ role: str,
+ config: Config,
+ websocket=None,
+ cost_callback: callable = None
+) -> List[str]:
+ """
+ Generate draft section titles for the report.
+
+ Args:
+ query (str): The research query.
+ context (str): Context for the report.
+ role (str): The role of the agent.
+ config (Config): Configuration object.
+ websocket: WebSocket connection for streaming output.
+ cost_callback (callable, optional): Callback for calculating LLM costs.
+
+ Returns:
+ List[str]: A list of generated section titles.
+ """
+ try:
+ section_titles = await create_chat_completion(
+ model=config.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{role}"},
+ {"role": "user", "content": generate_draft_titles_prompt(
+ current_subtopic, query, context)},
+ ],
+ temperature=0.25,
+ llm_provider=config.smart_llm_provider,
+ stream=True,
+ websocket=None,
+ max_tokens=config.smart_token_limit,
+ llm_kwargs=config.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ return section_titles.split("\n")
+ except Exception as e:
+ logger.error(f"Error in generating draft section titles: {e}")
+ return []
+
+
+async def generate_report(
+ query: str,
+ context,
+ agent_role_prompt: str,
+ report_type: str,
+ tone: Tone,
+ report_source: str,
+ websocket,
+ cfg,
+ main_topic: str = "",
+ existing_headers: list = [],
+ relevant_written_contents: list = [],
+ cost_callback: callable = None,
+ headers=None,
+):
+ """
+ generates the final report
+ Args:
+ query:
+ context:
+ agent_role_prompt:
+ report_type:
+ websocket:
+ tone:
+ cfg:
+ main_topic:
+ existing_headers:
+ relevant_written_contents:
+ cost_callback:
+
+ Returns:
+ report:
+
+ """
+ generate_prompt = get_prompt_by_report_type(report_type)
+ report = ""
+
+ if report_type == "subtopic_report":
+ content = f"{generate_prompt(query, existing_headers, relevant_written_contents, main_topic, context, report_format=cfg.report_format, tone=tone, total_words=cfg.total_words, language=cfg.language)}"
+ else:
+ content = f"{generate_prompt(query, context, report_source, report_format=cfg.report_format, tone=tone, total_words=cfg.total_words, language=cfg.language)}"
+ try:
+ report = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{agent_role_prompt}"},
+ {"role": "user", "content": content},
+ ],
+ temperature=0.35,
+ llm_provider=cfg.smart_llm_provider,
+ stream=True,
+ websocket=websocket,
+ max_tokens=cfg.smart_token_limit,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ except:
+ try:
+ report = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages=[
+ {"role": "user", "content": f"{agent_role_prompt}\n\n{content}"},
+ ],
+ temperature=0.35,
+ llm_provider=cfg.smart_llm_provider,
+ stream=True,
+ websocket=websocket,
+ max_tokens=cfg.smart_token_limit,
+ llm_kwargs=cfg.llm_kwargs,
+ cost_callback=cost_callback,
+ )
+ except Exception as e:
+ print(f"Error in generate_report: {e}")
+
+ return report
diff --git a/gpt_researcher/actions/retriever.py b/gpt_researcher/actions/retriever.py
new file mode 100644
index 0000000000000000000000000000000000000000..fffbdb9a9766a6a4a84e8d93ec355c8afcc63f73
--- /dev/null
+++ b/gpt_researcher/actions/retriever.py
@@ -0,0 +1,109 @@
+from typing import List, Type
+from ..config.config import Config
+
+def get_retriever(retriever):
+ """
+ Gets the retriever
+ Args:
+ retriever: retriever name
+
+ Returns:
+ retriever: Retriever class
+
+ """
+ match retriever:
+ case "google":
+ from gpt_researcher.retrievers import GoogleSearch
+
+ retriever = GoogleSearch
+ case "searx":
+ from gpt_researcher.retrievers import SearxSearch
+
+ retriever = SearxSearch
+ case "searchapi":
+ from gpt_researcher.retrievers import SearchApiSearch
+
+ retriever = SearchApiSearch
+ case "serpapi":
+ from gpt_researcher.retrievers import SerpApiSearch
+
+ retriever = SerpApiSearch
+ case "serper":
+ from gpt_researcher.retrievers import SerperSearch
+
+ retriever = SerperSearch
+ case "duckduckgo":
+ from gpt_researcher.retrievers import Duckduckgo
+
+ retriever = Duckduckgo
+ case "bing":
+ from gpt_researcher.retrievers import BingSearch
+
+ retriever = BingSearch
+ case "arxiv":
+ from gpt_researcher.retrievers import ArxivSearch
+
+ retriever = ArxivSearch
+ case "tavily":
+ from gpt_researcher.retrievers import TavilySearch
+
+ retriever = TavilySearch
+ case "exa":
+ from gpt_researcher.retrievers import ExaSearch
+
+ retriever = ExaSearch
+ case "semantic_scholar":
+ from gpt_researcher.retrievers import SemanticScholarSearch
+
+ retriever = SemanticScholarSearch
+ case "pubmed_central":
+ from gpt_researcher.retrievers import PubMedCentralSearch
+
+ retriever = PubMedCentralSearch
+ case "custom":
+ from gpt_researcher.retrievers import CustomRetriever
+
+ retriever = CustomRetriever
+
+ case _:
+ retriever = None
+
+ return retriever
+
+
+def get_retrievers(headers, cfg):
+ """
+ Determine which retriever(s) to use based on headers, config, or default.
+
+ Args:
+ headers (dict): The headers dictionary
+ cfg (Config): The configuration object
+
+ Returns:
+ list: A list of retriever classes to be used for searching.
+ """
+ # Check headers first for multiple retrievers
+ if headers.get("retrievers"):
+ retrievers = headers.get("retrievers").split(",")
+ # If not found, check headers for a single retriever
+ elif headers.get("retriever"):
+ retrievers = [headers.get("retriever")]
+ # If not in headers, check config for multiple retrievers
+ elif cfg.retrievers:
+ retrievers = cfg.retrievers
+ # If not found, check config for a single retriever
+ elif cfg.retriever:
+ retrievers = [cfg.retriever]
+ # If still not set, use default retriever
+ else:
+ retrievers = [get_default_retriever().__name__]
+
+ # Convert retriever names to actual retriever classes
+ # Use get_default_retriever() as a fallback for any invalid retriever names
+ return [get_retriever(r) or get_default_retriever() for r in retrievers]
+
+
+def get_default_retriever(retriever):
+ from gpt_researcher.retrievers import TavilySearch
+
+ return TavilySearch
\ No newline at end of file
diff --git a/gpt_researcher/actions/utils.py b/gpt_researcher/actions/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9eb398d600c621f9a4f6b12dcce918507cfdcd10
--- /dev/null
+++ b/gpt_researcher/actions/utils.py
@@ -0,0 +1,149 @@
+from typing import Dict, Any, Callable
+from ..utils.logger import get_formatted_logger
+
+logger = get_formatted_logger()
+
+
+async def stream_output(
+ type, content, output, websocket=None, output_log=True, metadata=None
+):
+ """
+ Streams output to the websocket
+ Args:
+ type:
+ content:
+ output:
+
+ Returns:
+ None
+ """
+ if (not websocket or output_log) and type != "images":
+ try:
+ logger.info(f"{output}")
+ except UnicodeEncodeError:
+ # Option 1: Replace problematic characters with a placeholder
+ logger.error(output.encode(
+ 'cp1252', errors='replace').decode('cp1252'))
+
+ if websocket:
+ await websocket.send_json(
+ {"type": type, "content": content,
+ "output": output, "metadata": metadata}
+ )
+
+
+async def safe_send_json(websocket: Any, data: Dict[str, Any]) -> None:
+ """
+ Safely send JSON data through a WebSocket connection.
+
+ Args:
+ websocket (WebSocket): The WebSocket connection to send data through.
+ data (Dict[str, Any]): The data to send as JSON.
+
+ Returns:
+ None
+ """
+ try:
+ await websocket.send_json(data)
+ except Exception as e:
+ logger.error(f"Error sending JSON through WebSocket: {e}")
+
+
+def calculate_cost(
+ prompt_tokens: int,
+ completion_tokens: int,
+ model: str
+) -> float:
+ """
+ Calculate the cost of API usage based on the number of tokens and the model used.
+
+ Args:
+ prompt_tokens (int): Number of tokens in the prompt.
+ completion_tokens (int): Number of tokens in the completion.
+ model (str): The model used for the API call.
+
+ Returns:
+ float: The calculated cost in USD.
+ """
+ # Define cost per 1k tokens for different models
+ costs = {
+ "gpt-3.5-turbo": 0.002,
+ "gpt-4": 0.03,
+ "gpt-4-32k": 0.06,
+ # Add more models and their costs as needed
+ }
+
+ model = model.lower()
+ if model not in costs:
+ logger.warning(
+ f"Unknown model: {model}. Cost calculation may be inaccurate.")
+ return 0.0
+
+ cost_per_1k = costs[model]
+ total_tokens = prompt_tokens + completion_tokens
+ return (total_tokens / 1000) * cost_per_1k
+
+
+def format_token_count(count: int) -> str:
+ """
+ Format the token count with commas for better readability.
+
+ Args:
+ count (int): The token count to format.
+
+ Returns:
+ str: The formatted token count.
+ """
+ return f"{count:,}"
+
+
+async def update_cost(
+ prompt_tokens: int,
+ completion_tokens: int,
+ model: str,
+ websocket: Any
+) -> None:
+ """
+ Update and send the cost information through the WebSocket.
+
+ Args:
+ prompt_tokens (int): Number of tokens in the prompt.
+ completion_tokens (int): Number of tokens in the completion.
+ model (str): The model used for the API call.
+ websocket (WebSocket): The WebSocket connection to send data through.
+
+ Returns:
+ None
+ """
+ cost = calculate_cost(prompt_tokens, completion_tokens, model)
+ total_tokens = prompt_tokens + completion_tokens
+
+ await safe_send_json(websocket, {
+ "type": "cost",
+ "data": {
+ "total_tokens": format_token_count(total_tokens),
+ "prompt_tokens": format_token_count(prompt_tokens),
+ "completion_tokens": format_token_count(completion_tokens),
+ "total_cost": f"${cost:.4f}"
+ }
+ })
+
+
+def create_cost_callback(websocket: Any) -> Callable:
+ """
+ Create a callback function for updating costs.
+
+ Args:
+ websocket (WebSocket): The WebSocket connection to send data through.
+
+ Returns:
+ Callable: A callback function that can be used to update costs.
+ """
+ async def cost_callback(
+ prompt_tokens: int,
+ completion_tokens: int,
+ model: str
+ ) -> None:
+ await update_cost(prompt_tokens, completion_tokens, model, websocket)
+
+ return cost_callback
diff --git a/gpt_researcher/actions/web_scraping.py b/gpt_researcher/actions/web_scraping.py
new file mode 100644
index 0000000000000000000000000000000000000000..971e198927c3d49f46eb0b34ef725658c675acca
--- /dev/null
+++ b/gpt_researcher/actions/web_scraping.py
@@ -0,0 +1,95 @@
+from typing import List, Dict, Any, Tuple
+from colorama import Fore, Style
+from ..scraper import Scraper
+from ..config.config import Config
+from ..utils.logger import get_formatted_logger
+
+logger = get_formatted_logger()
+
+def scrape_urls(urls, cfg=None) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
+ """
+ Scrapes the urls
+ Args:
+ urls: List of urls
+ cfg: Config (optional)
+
+ Returns:
+ Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: Tuple containing scraped content and images
+
+ """
+ scraped_data = []
+ images = []
+ user_agent = (
+ cfg.user_agent
+ if cfg
+ else "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+ )
+
+ try:
+ scraper = Scraper(urls, user_agent, cfg.scraper)
+ scraped_data = scraper.run()
+ for item in scraped_data:
+ if 'image_urls' in item:
+ images.extend([img for img in item['image_urls']])
+ except Exception as e:
+ print(f"{Fore.RED}Error in scrape_urls: {e}{Style.RESET_ALL}")
+
+ return scraped_data, images
+
+async def filter_urls(urls: List[str], config: Config) -> List[str]:
+ """
+ Filter URLs based on configuration settings.
+
+ Args:
+ urls (List[str]): List of URLs to filter.
+ config (Config): Configuration object.
+
+ Returns:
+ List[str]: Filtered list of URLs.
+ """
+ filtered_urls = []
+ for url in urls:
+ # Add your filtering logic here
+ # For example, you might want to exclude certain domains or URL patterns
+ if not any(excluded in url for excluded in config.excluded_domains):
+ filtered_urls.append(url)
+ return filtered_urls
+
+async def extract_main_content(html_content: str) -> str:
+ """
+ Extract the main content from HTML.
+
+ Args:
+ html_content (str): Raw HTML content.
+
+ Returns:
+ str: Extracted main content.
+ """
+ # Implement content extraction logic here
+ # This could involve using libraries like BeautifulSoup or custom parsing logic
+ # For now, we'll just return the raw HTML as a placeholder
+ return html_content
+
+async def process_scraped_data(scraped_data: List[Dict[str, Any]], config: Config) -> List[Dict[str, Any]]:
+ """
+ Process the scraped data to extract and clean the main content.
+
+ Args:
+ scraped_data (List[Dict[str, Any]]): List of dictionaries containing scraped data.
+ config (Config): Configuration object.
+
+ Returns:
+ List[Dict[str, Any]]: Processed scraped data.
+ """
+ processed_data = []
+ for item in scraped_data:
+ if item['status'] == 'success':
+ main_content = await extract_main_content(item['content'])
+ processed_data.append({
+ 'url': item['url'],
+ 'content': main_content,
+ 'status': 'success'
+ })
+ else:
+ processed_data.append(item)
+ return processed_data
diff --git a/gpt_researcher/agent.py b/gpt_researcher/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..0faf426b88d27d983c51cf4f42daaf6577aa17ba
--- /dev/null
+++ b/gpt_researcher/agent.py
@@ -0,0 +1,240 @@
+from typing import Optional, List, Dict, Any, Set
+import json
+
+from .config import Config
+from .memory import Memory
+from .utils.enum import ReportSource, ReportType, Tone
+from .llm_provider import GenericLLMProvider
+from .vector_store import VectorStoreWrapper
+
+# Research skills
+from .skills.researcher import ResearchConductor
+from .skills.writer import ReportGenerator
+from .skills.context_manager import ContextManager
+from .skills.browser import BrowserManager
+from .skills.curator import SourceCurator
+
+from .actions import (
+ add_references,
+ extract_headers,
+ extract_sections,
+ table_of_contents,
+ get_retrievers,
+ choose_agent
+)
+
+
+class GPTResearcher:
+ def __init__(
+ self,
+ query: str,
+ report_type: str = ReportType.ResearchReport.value,
+ report_format: str = "markdown",
+ report_source: str = ReportSource.Web.value,
+ tone: Tone = Tone.Objective,
+ source_urls=None,
+ document_urls=None,
+ complement_source_urls=False,
+ documents=None,
+ vector_store=None,
+ vector_store_filter=None,
+ config_path=None,
+ websocket=None,
+ agent=None,
+ role=None,
+ parent_query: str = "",
+ subtopics: list = [],
+ visited_urls: set = set(),
+ verbose: bool = True,
+ context=[],
+ headers: dict = None,
+ max_subtopics: int = 5,
+ log_handler=None,
+ ):
+ self.query = query
+ self.report_type = report_type
+ self.cfg = Config(config_path)
+ self.llm = GenericLLMProvider(self.cfg)
+ self.report_source = report_source if report_source else getattr(self.cfg, 'report_source', None)
+ self.report_format = report_format
+ self.max_subtopics = max_subtopics
+ self.tone = tone if isinstance(tone, Tone) else Tone.Objective
+ self.source_urls = source_urls
+ self.document_urls = document_urls
+ self.complement_source_urls: bool = complement_source_urls
+ self.research_sources = [] # The list of scraped sources including title, content and images
+ self.research_images = [] # The list of selected research images
+ self.documents = documents
+ self.vector_store = VectorStoreWrapper(vector_store) if vector_store else None
+ self.vector_store_filter = vector_store_filter
+ self.websocket = websocket
+ self.agent = agent
+ self.role = role
+ self.parent_query = parent_query
+ self.subtopics = subtopics
+ self.visited_urls = visited_urls
+ self.verbose = verbose
+ self.context = context
+ self.headers = headers or {}
+ self.research_costs = 0.0
+ self.retrievers = get_retrievers(self.headers, self.cfg)
+ self.memory = Memory(
+ self.cfg.embedding_provider, self.cfg.embedding_model, **self.cfg.embedding_kwargs
+ )
+ self.log_handler = log_handler
+
+ # Initialize components
+ self.research_conductor: ResearchConductor = ResearchConductor(self)
+ self.report_generator: ReportGenerator = ReportGenerator(self)
+ self.context_manager: ContextManager = ContextManager(self)
+ self.scraper_manager: BrowserManager = BrowserManager(self)
+ self.source_curator: SourceCurator = SourceCurator(self)
+
+ async def _log_event(self, event_type: str, **kwargs):
+ """Helper method to handle logging events"""
+ if self.log_handler:
+ try:
+ if event_type == "tool":
+ await self.log_handler.on_tool_start(kwargs.get('tool_name', ''), **kwargs)
+ elif event_type == "action":
+ await self.log_handler.on_agent_action(kwargs.get('action', ''), **kwargs)
+ elif event_type == "research":
+ await self.log_handler.on_research_step(kwargs.get('step', ''), kwargs.get('details', {}))
+
+ # Add direct logging as backup
+ import logging
+ research_logger = logging.getLogger('research')
+ research_logger.info(f"{event_type}: {json.dumps(kwargs, default=str)}")
+
+ except Exception as e:
+ import logging
+ logging.getLogger('research').error(f"Error in _log_event: {e}", exc_info=True)
+
+ async def conduct_research(self):
+ await self._log_event("research", step="start", details={
+ "query": self.query,
+ "report_type": self.report_type,
+ "agent": self.agent,
+ "role": self.role
+ })
+
+ if not (self.agent and self.role):
+ await self._log_event("action", action="choose_agent")
+ self.agent, self.role = await choose_agent(
+ query=self.query,
+ cfg=self.cfg,
+ parent_query=self.parent_query,
+ cost_callback=self.add_costs,
+ headers=self.headers,
+ )
+ await self._log_event("action", action="agent_selected", details={
+ "agent": self.agent,
+ "role": self.role
+ })
+
+ await self._log_event("research", step="conducting_research", details={
+ "agent": self.agent,
+ "role": self.role
+ })
+ self.context = await self.research_conductor.conduct_research()
+
+ await self._log_event("research", step="research_completed", details={
+ "context_length": len(self.context)
+ })
+ return self.context
+
+ async def write_report(self, existing_headers: list = [], relevant_written_contents: list = [], ext_context=None) -> str:
+ await self._log_event("research", step="writing_report", details={
+ "existing_headers": existing_headers,
+ "context_source": "external" if ext_context else "internal"
+ })
+
+ report = await self.report_generator.write_report(
+ existing_headers,
+ relevant_written_contents,
+ ext_context or self.context
+ )
+
+ await self._log_event("research", step="report_completed", details={
+ "report_length": len(report)
+ })
+ return report
+
+ async def write_report_conclusion(self, report_body: str) -> str:
+ await self._log_event("research", step="writing_conclusion")
+ conclusion = await self.report_generator.write_report_conclusion(report_body)
+ await self._log_event("research", step="conclusion_completed")
+ return conclusion
+
+ async def write_introduction(self):
+ await self._log_event("research", step="writing_introduction")
+ intro = await self.report_generator.write_introduction()
+ await self._log_event("research", step="introduction_completed")
+ return intro
+
+ async def get_subtopics(self):
+ return await self.report_generator.get_subtopics()
+
+ async def get_draft_section_titles(self, current_subtopic: str):
+ return await self.report_generator.get_draft_section_titles(current_subtopic)
+
+ async def get_similar_written_contents_by_draft_section_titles(
+ self,
+ current_subtopic: str,
+ draft_section_titles: List[str],
+ written_contents: List[Dict],
+ max_results: int = 10
+ ) -> List[str]:
+ return await self.context_manager.get_similar_written_contents_by_draft_section_titles(
+ current_subtopic,
+ draft_section_titles,
+ written_contents,
+ max_results
+ )
+
+ # Utility methods
+ def get_research_images(self, top_k=10) -> List[Dict[str, Any]]:
+ return self.research_images[:top_k]
+
+ def add_research_images(self, images: List[Dict[str, Any]]) -> None:
+ self.research_images.extend(images)
+
+ def get_research_sources(self) -> List[Dict[str, Any]]:
+ return self.research_sources
+
+ def add_research_sources(self, sources: List[Dict[str, Any]]) -> None:
+ self.research_sources.extend(sources)
+
+ def add_references(self, report_markdown: str, visited_urls: set) -> str:
+ return add_references(report_markdown, visited_urls)
+
+ def extract_headers(self, markdown_text: str) -> List[Dict]:
+ return extract_headers(markdown_text)
+
+ def extract_sections(self, markdown_text: str) -> List[Dict]:
+ return extract_sections(markdown_text)
+
+ def table_of_contents(self, markdown_text: str) -> str:
+ return table_of_contents(markdown_text)
+
+ def get_source_urls(self) -> list:
+ return list(self.visited_urls)
+
+ def get_research_context(self) -> list:
+ return self.context
+
+ def get_costs(self) -> float:
+ return self.research_costs
+
+ def set_verbose(self, verbose: bool):
+ self.verbose = verbose
+
+ def add_costs(self, cost: float) -> None:
+ if not isinstance(cost, (float, int)):
+ raise ValueError("Cost must be an integer or float")
+ self.research_costs += cost
+ if self.log_handler:
+ self._log_event("research", step="cost_update", details={
+ "cost": cost,
+ "total_cost": self.research_costs
+ })
diff --git a/gpt_researcher/config/__init__.py b/gpt_researcher/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..df38f95385d6b5662b88ef8c4f8f43f2b044465a
--- /dev/null
+++ b/gpt_researcher/config/__init__.py
@@ -0,0 +1,5 @@
+from .config import Config
+from .variables.base import BaseConfig
+from .variables.default import DEFAULT_CONFIG as DefaultConfig
+
+__all__ = ["Config", "BaseConfig", "DefaultConfig"]
diff --git a/gpt_researcher/config/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/config/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d650f39ee005c651c981d31d9c40a6b425d623af
Binary files /dev/null and b/gpt_researcher/config/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/config/__pycache__/config.cpython-312.pyc b/gpt_researcher/config/__pycache__/config.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fffbc507b6570d45ce5c35dfb1b7036abb1797ad
Binary files /dev/null and b/gpt_researcher/config/__pycache__/config.cpython-312.pyc differ
diff --git a/gpt_researcher/config/config.py b/gpt_researcher/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fc59bdb772907422a4e4780f0cb72414cbdeee4
--- /dev/null
+++ b/gpt_researcher/config/config.py
@@ -0,0 +1,224 @@
+import json
+import os
+import warnings
+from typing import Dict, Any, List, Union, Type, get_origin, get_args
+from .variables.default import DEFAULT_CONFIG
+from .variables.base import BaseConfig
+from ..retrievers.utils import get_all_retriever_names
+
+
+class Config:
+ """Config class for GPT Researcher."""
+
+ CONFIG_DIR = os.path.join(os.path.dirname(__file__), "variables")
+
+ def __init__(self, config_path: str | None = None):
+ """Initialize the config class."""
+ self.config_path = config_path
+ self.llm_kwargs: Dict[str, Any] = {}
+ self.embedding_kwargs: Dict[str, Any] = {}
+
+ config_to_use = self.load_config(config_path)
+ self._set_attributes(config_to_use)
+ self._set_embedding_attributes()
+ self._set_llm_attributes()
+ self._handle_deprecated_attributes()
+ self._set_doc_path(config_to_use)
+
+ def _set_attributes(self, config: Dict[str, Any]) -> None:
+ for key, value in config.items():
+ env_value = os.getenv(key)
+ if env_value is not None:
+ value = self.convert_env_value(key, env_value, BaseConfig.__annotations__[key])
+ setattr(self, key.lower(), value)
+
+ # Handle RETRIEVER with default value
+ retriever_env = os.environ.get("RETRIEVER", config.get("RETRIEVER", "tavily"))
+ try:
+ self.retrievers = self.parse_retrievers(retriever_env)
+ except ValueError as e:
+ print(f"Warning: {str(e)}. Defaulting to 'tavily' retriever.")
+ self.retrievers = ["tavily"]
+
+ def _set_embedding_attributes(self) -> None:
+ self.embedding_provider, self.embedding_model = self.parse_embedding(
+ self.embedding
+ )
+
+ def _set_llm_attributes(self) -> None:
+ self.fast_llm_provider, self.fast_llm_model = self.parse_llm(self.fast_llm)
+ self.smart_llm_provider, self.smart_llm_model = self.parse_llm(self.smart_llm)
+ self.strategic_llm_provider, self.strategic_llm_model = self.parse_llm(self.strategic_llm)
+
+ def _handle_deprecated_attributes(self) -> None:
+ if os.getenv("EMBEDDING_PROVIDER") is not None:
+ warnings.warn(
+ "EMBEDDING_PROVIDER is deprecated and will be removed soon. Use EMBEDDING instead.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ self.embedding_provider = (
+ os.environ["EMBEDDING_PROVIDER"] or self.embedding_provider
+ )
+
+ match os.environ["EMBEDDING_PROVIDER"]:
+ case "ollama":
+ self.embedding_model = os.environ["OLLAMA_EMBEDDING_MODEL"]
+ case "custom":
+ self.embedding_model = os.getenv("OPENAI_EMBEDDING_MODEL", "custom")
+ case "openai":
+ self.embedding_model = "text-embedding-3-large"
+ case "azure_openai":
+ self.embedding_model = "text-embedding-3-large"
+ case "huggingface":
+ self.embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
+ case _:
+ raise Exception("Embedding provider not found.")
+
+ _deprecation_warning = (
+ "LLM_PROVIDER, FAST_LLM_MODEL and SMART_LLM_MODEL are deprecated and "
+ "will be removed soon. Use FAST_LLM and SMART_LLM instead."
+ )
+ if os.getenv("LLM_PROVIDER") is not None:
+ warnings.warn(_deprecation_warning, FutureWarning, stacklevel=2)
+ self.fast_llm_provider = (
+ os.environ["LLM_PROVIDER"] or self.fast_llm_provider
+ )
+ self.smart_llm_provider = (
+ os.environ["LLM_PROVIDER"] or self.smart_llm_provider
+ )
+ if os.getenv("FAST_LLM_MODEL") is not None:
+ warnings.warn(_deprecation_warning, FutureWarning, stacklevel=2)
+ self.fast_llm_model = os.environ["FAST_LLM_MODEL"] or self.fast_llm_model
+ if os.getenv("SMART_LLM_MODEL") is not None:
+ warnings.warn(_deprecation_warning, FutureWarning, stacklevel=2)
+ self.smart_llm_model = os.environ["SMART_LLM_MODEL"] or self.smart_llm_model
+
+ def _set_doc_path(self, config: Dict[str, Any]) -> None:
+ self.doc_path = config['DOC_PATH']
+ if self.doc_path:
+ try:
+ self.validate_doc_path()
+ except Exception as e:
+ print(f"Warning: Error validating doc_path: {str(e)}. Using default doc_path.")
+ self.doc_path = DEFAULT_CONFIG['DOC_PATH']
+
+ @classmethod
+ def load_config(cls, config_path: str | None) -> Dict[str, Any]:
+ """Load a configuration by name."""
+ if config_path is None:
+ return DEFAULT_CONFIG
+
+ # config_path = os.path.join(cls.CONFIG_DIR, config_path)
+ if not os.path.exists(config_path):
+ if config_path and config_path != "default":
+ print(f"Warning: Configuration not found at '{config_path}'. Using default configuration.")
+ if not config_path.endswith(".json"):
+ print(f"Do you mean '{config_path}.json'?")
+ return DEFAULT_CONFIG
+
+ with open(config_path, "r") as f:
+ custom_config = json.load(f)
+
+ # Merge with default config to ensure all keys are present
+ merged_config = DEFAULT_CONFIG.copy()
+ merged_config.update(custom_config)
+ return merged_config
+
+ @classmethod
+ def list_available_configs(cls) -> List[str]:
+ """List all available configuration names."""
+ configs = ["default"]
+ for file in os.listdir(cls.CONFIG_DIR):
+ if file.endswith(".json"):
+ configs.append(file[:-5]) # Remove .json extension
+ return configs
+
+ def parse_retrievers(self, retriever_str: str) -> List[str]:
+ """Parse the retriever string into a list of retrievers and validate them."""
+ retrievers = [retriever.strip()
+ for retriever in retriever_str.split(",")]
+ valid_retrievers = get_all_retriever_names() or []
+ invalid_retrievers = [r for r in retrievers if r not in valid_retrievers]
+ if invalid_retrievers:
+ raise ValueError(
+ f"Invalid retriever(s) found: {', '.join(invalid_retrievers)}. "
+ f"Valid options are: {', '.join(valid_retrievers)}."
+ )
+ return retrievers
+
+ @staticmethod
+ def parse_llm(llm_str: str | None) -> tuple[str | None, str | None]:
+ """Parse llm string into (llm_provider, llm_model)."""
+ from gpt_researcher.llm_provider.generic.base import _SUPPORTED_PROVIDERS
+
+ if llm_str is None:
+ return None, None
+ try:
+ llm_provider, llm_model = llm_str.split(":", 1)
+ assert llm_provider in _SUPPORTED_PROVIDERS, (
+ f"Unsupported {llm_provider}.\nSupported llm providers are: "
+ + ", ".join(_SUPPORTED_PROVIDERS)
+ )
+ return llm_provider, llm_model
+ except ValueError:
+ raise ValueError(
+ "Set SMART_LLM or FAST_LLM = ':' "
+ "Eg 'openai:gpt-4o-mini'"
+ )
+
+ @staticmethod
+ def parse_embedding(embedding_str: str | None) -> tuple[str | None, str | None]:
+ """Parse embedding string into (embedding_provider, embedding_model)."""
+ from gpt_researcher.memory.embeddings import _SUPPORTED_PROVIDERS
+
+ if embedding_str is None:
+ return None, None
+ try:
+ embedding_provider, embedding_model = embedding_str.split(":", 1)
+ assert embedding_provider in _SUPPORTED_PROVIDERS, (
+ f"Unsupported {embedding_provider}.\nSupported embedding providers are: "
+ + ", ".join(_SUPPORTED_PROVIDERS)
+ )
+ return embedding_provider, embedding_model
+ except ValueError:
+ raise ValueError(
+ "Set EMBEDDING = ':' "
+ "Eg 'openai:text-embedding-3-large'"
+ )
+
+ def validate_doc_path(self):
+ """Ensure that the folder exists at the doc path"""
+ os.makedirs(self.doc_path, exist_ok=True)
+
+ @staticmethod
+ def convert_env_value(key: str, env_value: str, type_hint: Type) -> Any:
+ """Convert environment variable to the appropriate type based on the type hint."""
+ origin = get_origin(type_hint)
+ args = get_args(type_hint)
+
+ if origin is Union:
+ # Handle Union types (e.g., Union[str, None])
+ for arg in args:
+ if arg is type(None):
+ if env_value.lower() in ("none", "null", ""):
+ return None
+ else:
+ try:
+ return Config.convert_env_value(key, env_value, arg)
+ except ValueError:
+ continue
+ raise ValueError(f"Cannot convert {env_value} to any of {args}")
+
+ if type_hint is bool:
+ return env_value.lower() in ("true", "1", "yes", "on")
+ elif type_hint is int:
+ return int(env_value)
+ elif type_hint is float:
+ return float(env_value)
+ elif type_hint in (str, Any):
+ return env_value
+ elif origin is list or origin is List:
+ return json.loads(env_value)
+ else:
+ raise ValueError(f"Unsupported type {type_hint} for key {key}")
diff --git a/gpt_researcher/config/variables/__init__.py b/gpt_researcher/config/variables/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/config/variables/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/config/variables/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae636945dacc05de07956e63715405f93050c6bc
Binary files /dev/null and b/gpt_researcher/config/variables/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/config/variables/__pycache__/base.cpython-312.pyc b/gpt_researcher/config/variables/__pycache__/base.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5e30b2e4e59e79cdd69dedf2a25b385884b8d1d
Binary files /dev/null and b/gpt_researcher/config/variables/__pycache__/base.cpython-312.pyc differ
diff --git a/gpt_researcher/config/variables/__pycache__/default.cpython-312.pyc b/gpt_researcher/config/variables/__pycache__/default.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab8f9bc0b58830ae4c3ca932eb6ea1c0d2b51483
Binary files /dev/null and b/gpt_researcher/config/variables/__pycache__/default.cpython-312.pyc differ
diff --git a/gpt_researcher/config/variables/base.py b/gpt_researcher/config/variables/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ed7f98b8fa46362c2c20428740c5cb5611a82f3
--- /dev/null
+++ b/gpt_researcher/config/variables/base.py
@@ -0,0 +1,31 @@
+from typing import Union
+from typing_extensions import TypedDict
+
+
+class BaseConfig(TypedDict):
+ RETRIEVER: str
+ EMBEDDING: str
+ SIMILARITY_THRESHOLD: float
+ FAST_LLM: str
+ SMART_LLM: str
+ STRATEGIC_LLM: str
+ FAST_TOKEN_LIMIT: int
+ SMART_TOKEN_LIMIT: int
+ STRATEGIC_TOKEN_LIMIT: int
+ BROWSE_CHUNK_MAX_LENGTH: int
+ SUMMARY_TOKEN_LIMIT: int
+ TEMPERATURE: float
+ LLM_TEMPERATURE: float
+ USER_AGENT: str
+ MAX_SEARCH_RESULTS_PER_QUERY: int
+ MEMORY_BACKEND: str
+ TOTAL_WORDS: int
+ REPORT_FORMAT: str
+ CURATE_SOURCES: bool
+ MAX_ITERATIONS: int
+ LANGUAGE: str
+ AGENT_ROLE: Union[str, None]
+ SCRAPER: str
+ MAX_SUBTOPICS: int
+ REPORT_SOURCE: Union[str, None]
+ DOC_PATH: str
diff --git a/gpt_researcher/config/variables/default.py b/gpt_researcher/config/variables/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d12ee7196350564bcc175163c002c2ac028b1e9
--- /dev/null
+++ b/gpt_researcher/config/variables/default.py
@@ -0,0 +1,30 @@
+from .base import BaseConfig
+
+DEFAULT_CONFIG: BaseConfig = {
+ "RETRIEVER": "tavily",
+ "EMBEDDING": "openai:text-embedding-3-small",
+ "SIMILARITY_THRESHOLD": 0.42,
+ "FAST_LLM": "openai:gpt-4o-mini",
+ "SMART_LLM": "openai:gpt-4o-2024-11-20",
+ "STRATEGIC_LLM": "openai:gpt-4o", # Can be used with gpt-o1
+ "FAST_TOKEN_LIMIT": 2000,
+ "SMART_TOKEN_LIMIT": 4000,
+ "STRATEGIC_TOKEN_LIMIT": 4000,
+ "BROWSE_CHUNK_MAX_LENGTH": 8192,
+ "CURATE_SOURCES": False,
+ "SUMMARY_TOKEN_LIMIT": 700,
+ "TEMPERATURE": 0.4,
+ "LLM_TEMPERATURE": 0.55,
+ "USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0",
+ "MAX_SEARCH_RESULTS_PER_QUERY": 5,
+ "MEMORY_BACKEND": "local",
+ "TOTAL_WORDS": 1000,
+ "REPORT_FORMAT": "APA",
+ "MAX_ITERATIONS": 4,
+ "AGENT_ROLE": None,
+ "SCRAPER": "bs",
+ "MAX_SUBTOPICS": 3,
+ "LANGUAGE": "english",
+ "REPORT_SOURCE": "web",
+ "DOC_PATH": "./my-docs"
+}
diff --git a/gpt_researcher/config/variables/test_local.json b/gpt_researcher/config/variables/test_local.json
new file mode 100644
index 0000000000000000000000000000000000000000..1104fa4d7cc495949d53e8ff80885420dc40136f
--- /dev/null
+++ b/gpt_researcher/config/variables/test_local.json
@@ -0,0 +1,3 @@
+{
+ "DOC_PATH": "tests/docs"
+}
diff --git a/gpt_researcher/context/__init__.py b/gpt_researcher/context/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..06a0cb523c23c3c60b3594042655a5b52688e006
--- /dev/null
+++ b/gpt_researcher/context/__init__.py
@@ -0,0 +1,4 @@
+from .compression import ContextCompressor
+from .retriever import SearchAPIRetriever
+
+__all__ = ['ContextCompressor', 'SearchAPIRetriever']
diff --git a/gpt_researcher/context/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/context/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..43c657ec6b0a8b9bfe914ba524f6e3ea3cedd9c1
Binary files /dev/null and b/gpt_researcher/context/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/context/__pycache__/compression.cpython-312.pyc b/gpt_researcher/context/__pycache__/compression.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16c9e0f32d0b8be79513bcdf524d0ce4aa923d1a
Binary files /dev/null and b/gpt_researcher/context/__pycache__/compression.cpython-312.pyc differ
diff --git a/gpt_researcher/context/__pycache__/retriever.cpython-312.pyc b/gpt_researcher/context/__pycache__/retriever.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..451d0db6872a93fa67c5e23a3ac5116e159a3361
Binary files /dev/null and b/gpt_researcher/context/__pycache__/retriever.cpython-312.pyc differ
diff --git a/gpt_researcher/context/compression.py b/gpt_researcher/context/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb3f6339c253495b6722a22a20046e9b98a26c14
--- /dev/null
+++ b/gpt_researcher/context/compression.py
@@ -0,0 +1,105 @@
+import os
+import asyncio
+from typing import Optional
+from .retriever import SearchAPIRetriever, SectionRetriever
+from langchain.retrievers import (
+ ContextualCompressionRetriever,
+)
+from langchain.retrievers.document_compressors import (
+ DocumentCompressorPipeline,
+ EmbeddingsFilter,
+)
+from langchain.text_splitter import RecursiveCharacterTextSplitter
+from ..vector_store import VectorStoreWrapper
+from ..utils.costs import estimate_embedding_cost
+from ..memory.embeddings import OPENAI_EMBEDDING_MODEL
+
+
+class VectorstoreCompressor:
+ def __init__(self, vector_store: VectorStoreWrapper, max_results:int = 7, filter: Optional[dict] = None, **kwargs):
+
+ self.vector_store = vector_store
+ self.max_results = max_results
+ self.filter = filter
+ self.kwargs = kwargs
+
+ def __pretty_print_docs(self, docs):
+ return f"\n".join(f"Source: {d.metadata.get('source')}\n"
+ f"Title: {d.metadata.get('title')}\n"
+ f"Content: {d.page_content}\n"
+ for d in docs)
+
+ async def async_get_context(self, query, max_results=5):
+ """Get relevant context from vector store"""
+ results = await self.vector_store.asimilarity_search(query=query, k=max_results, filter=self.filter)
+ return self.__pretty_print_docs(results)
+
+
+class ContextCompressor:
+ def __init__(self, documents, embeddings, max_results=5, **kwargs):
+ self.max_results = max_results
+ self.documents = documents
+ self.kwargs = kwargs
+ self.embeddings = embeddings
+ self.similarity_threshold = os.environ.get("SIMILARITY_THRESHOLD", 0.35)
+
+ def __get_contextual_retriever(self):
+ splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
+ relevance_filter = EmbeddingsFilter(embeddings=self.embeddings,
+ similarity_threshold=self.similarity_threshold)
+ pipeline_compressor = DocumentCompressorPipeline(
+ transformers=[splitter, relevance_filter]
+ )
+ base_retriever = SearchAPIRetriever(
+ pages=self.documents
+ )
+ contextual_retriever = ContextualCompressionRetriever(
+ base_compressor=pipeline_compressor, base_retriever=base_retriever
+ )
+ return contextual_retriever
+
+ def __pretty_print_docs(self, docs, top_n):
+ return f"\n".join(f"Source: {d.metadata.get('source')}\n"
+ f"Title: {d.metadata.get('title')}\n"
+ f"Content: {d.page_content}\n"
+ for i, d in enumerate(docs) if i < top_n)
+
+ async def async_get_context(self, query, max_results=5, cost_callback=None):
+ compressed_docs = self.__get_contextual_retriever()
+ if cost_callback:
+ cost_callback(estimate_embedding_cost(model=OPENAI_EMBEDDING_MODEL, docs=self.documents))
+ relevant_docs = await asyncio.to_thread(compressed_docs.invoke, query)
+ return self.__pretty_print_docs(relevant_docs, max_results)
+
+
+class WrittenContentCompressor:
+ def __init__(self, documents, embeddings, similarity_threshold, **kwargs):
+ self.documents = documents
+ self.kwargs = kwargs
+ self.embeddings = embeddings
+ self.similarity_threshold = similarity_threshold
+
+ def __get_contextual_retriever(self):
+ splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
+ relevance_filter = EmbeddingsFilter(embeddings=self.embeddings,
+ similarity_threshold=self.similarity_threshold)
+ pipeline_compressor = DocumentCompressorPipeline(
+ transformers=[splitter, relevance_filter]
+ )
+ base_retriever = SectionRetriever(
+ sections=self.documents
+ )
+ contextual_retriever = ContextualCompressionRetriever(
+ base_compressor=pipeline_compressor, base_retriever=base_retriever
+ )
+ return contextual_retriever
+
+ def __pretty_docs_list(self, docs, top_n):
+ return [f"Title: {d.metadata.get('section_title')}\nContent: {d.page_content}\n" for i, d in enumerate(docs) if i < top_n]
+
+ async def async_get_context(self, query, max_results=5, cost_callback=None):
+ compressed_docs = self.__get_contextual_retriever()
+ if cost_callback:
+ cost_callback(estimate_embedding_cost(model=OPENAI_EMBEDDING_MODEL, docs=self.documents))
+ relevant_docs = await asyncio.to_thread(compressed_docs.invoke, query)
+ return self.__pretty_docs_list(relevant_docs, max_results)
diff --git a/gpt_researcher/context/retriever.py b/gpt_researcher/context/retriever.py
new file mode 100644
index 0000000000000000000000000000000000000000..4403ac145e320f3a476872a7de5324ae1b58e193
--- /dev/null
+++ b/gpt_researcher/context/retriever.py
@@ -0,0 +1,62 @@
+import os
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+from langchain.callbacks.manager import CallbackManagerForRetrieverRun
+from langchain.schema import Document
+from langchain.schema.retriever import BaseRetriever
+
+
+class SearchAPIRetriever(BaseRetriever):
+ """Search API retriever."""
+ pages: List[Dict] = []
+
+ def _get_relevant_documents(
+ self, query: str, *, run_manager: CallbackManagerForRetrieverRun
+ ) -> List[Document]:
+
+ docs = [
+ Document(
+ page_content=page.get("raw_content", ""),
+ metadata={
+ "title": page.get("title", ""),
+ "source": page.get("url", ""),
+ },
+ )
+ for page in self.pages
+ ]
+
+ return docs
+
+class SectionRetriever(BaseRetriever):
+ """
+ SectionRetriever:
+ This class is used to retrieve sections while avoiding redundant subtopics.
+ """
+ sections: List[Dict] = []
+ """
+ sections example:
+ [
+ {
+ "section_title": "Example Title",
+ "written_content": "Example content"
+ },
+ ...
+ ]
+ """
+
+ def _get_relevant_documents(
+ self, query: str, *, run_manager: CallbackManagerForRetrieverRun
+ ) -> List[Document]:
+
+ docs = [
+ Document(
+ page_content=page.get("written_content", ""),
+ metadata={
+ "section_title": page.get("section_title", ""),
+ },
+ )
+ for page in self.sections # Changed 'self.pages' to 'self.sections'
+ ]
+
+ return docs
\ No newline at end of file
diff --git a/gpt_researcher/document/__init__.py b/gpt_researcher/document/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e3760092d5a402639330426a7b8a7f841055184
--- /dev/null
+++ b/gpt_researcher/document/__init__.py
@@ -0,0 +1,5 @@
+from .document import DocumentLoader
+from .online_document import OnlineDocumentLoader
+from .langchain_document import LangChainDocumentLoader
+
+__all__ = ['DocumentLoader', 'OnlineDocumentLoader', 'LangChainDocumentLoader']
diff --git a/gpt_researcher/document/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/document/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae40d86a6f4467e873b63a7b7669b1d100d50ee7
Binary files /dev/null and b/gpt_researcher/document/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/document/__pycache__/document.cpython-312.pyc b/gpt_researcher/document/__pycache__/document.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67866b3c6b23f6866191bd024c156f2260f11597
Binary files /dev/null and b/gpt_researcher/document/__pycache__/document.cpython-312.pyc differ
diff --git a/gpt_researcher/document/__pycache__/langchain_document.cpython-312.pyc b/gpt_researcher/document/__pycache__/langchain_document.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f9c2439fabd22266bc6bfe39a46fd945864bbd7
Binary files /dev/null and b/gpt_researcher/document/__pycache__/langchain_document.cpython-312.pyc differ
diff --git a/gpt_researcher/document/__pycache__/online_document.cpython-312.pyc b/gpt_researcher/document/__pycache__/online_document.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b1c6715d319a016385ae5a1b0528972db7c54a9
Binary files /dev/null and b/gpt_researcher/document/__pycache__/online_document.cpython-312.pyc differ
diff --git a/gpt_researcher/document/document.py b/gpt_researcher/document/document.py
new file mode 100644
index 0000000000000000000000000000000000000000..092904cec7420f8114a61e0e12572468eb8bc014
--- /dev/null
+++ b/gpt_researcher/document/document.py
@@ -0,0 +1,66 @@
+import asyncio
+import os
+
+from langchain_community.document_loaders import (
+ PyMuPDFLoader,
+ TextLoader,
+ UnstructuredCSVLoader,
+ UnstructuredExcelLoader,
+ UnstructuredMarkdownLoader,
+ UnstructuredPowerPointLoader,
+ UnstructuredWordDocumentLoader
+)
+
+
+class DocumentLoader:
+
+ def __init__(self, path):
+ self.path = path
+
+ async def load(self) -> list:
+ tasks = []
+ for root, dirs, files in os.walk(self.path):
+ for file in files:
+ file_path = os.path.join(root, file)
+ file_name, file_extension_with_dot = os.path.splitext(file_path)
+ file_extension = file_extension_with_dot.strip(".")
+ tasks.append(self._load_document(file_path, file_extension))
+
+ docs = []
+ for pages in await asyncio.gather(*tasks):
+ for page in pages:
+ if page.page_content:
+ docs.append({
+ "raw_content": page.page_content,
+ "url": os.path.basename(page.metadata['source'])
+ })
+
+ if not docs:
+ raise ValueError("🤷 Failed to load any documents!")
+
+ return docs
+
+ async def _load_document(self, file_path: str, file_extension: str) -> list:
+ ret_data = []
+ try:
+ loader_dict = {
+ "pdf": PyMuPDFLoader(file_path),
+ "txt": TextLoader(file_path),
+ "doc": UnstructuredWordDocumentLoader(file_path),
+ "docx": UnstructuredWordDocumentLoader(file_path),
+ "pptx": UnstructuredPowerPointLoader(file_path),
+ "csv": UnstructuredCSVLoader(file_path, mode="elements"),
+ "xls": UnstructuredExcelLoader(file_path, mode="elements"),
+ "xlsx": UnstructuredExcelLoader(file_path, mode="elements"),
+ "md": UnstructuredMarkdownLoader(file_path)
+ }
+
+ loader = loader_dict.get(file_extension, None)
+ if loader:
+ ret_data = loader.load()
+
+ except Exception as e:
+ print(f"Failed to load document : {file_path}")
+ print(e)
+
+ return ret_data
diff --git a/gpt_researcher/document/langchain_document.py b/gpt_researcher/document/langchain_document.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c3433b7ea5199892841062f4a27a1eb6929a75a
--- /dev/null
+++ b/gpt_researcher/document/langchain_document.py
@@ -0,0 +1,24 @@
+import asyncio
+import os
+
+from langchain_core.documents import Document
+from typing import List, Dict
+
+
+# Supports the base Document class from langchain
+# - https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/documents/base.py
+class LangChainDocumentLoader:
+
+ def __init__(self, documents: List[Document]):
+ self.documents = documents
+
+ async def load(self, metadata_source_index="title") -> List[Dict[str, str]]:
+ docs = []
+ for document in self.documents:
+ docs.append(
+ {
+ "raw_content": document.page_content,
+ "url": document.metadata.get(metadata_source_index, ""),
+ }
+ )
+ return docs
diff --git a/gpt_researcher/document/online_document.py b/gpt_researcher/document/online_document.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecb33d5933f9fb11257959be36364bb8f25d734b
--- /dev/null
+++ b/gpt_researcher/document/online_document.py
@@ -0,0 +1,88 @@
+import os
+import aiohttp
+import tempfile
+from langchain_community.document_loaders import (
+ PyMuPDFLoader,
+ TextLoader,
+ UnstructuredCSVLoader,
+ UnstructuredExcelLoader,
+ UnstructuredMarkdownLoader,
+ UnstructuredPowerPointLoader,
+ UnstructuredWordDocumentLoader
+)
+
+
+class OnlineDocumentLoader:
+
+ def __init__(self, urls):
+ self.urls = urls
+
+ async def load(self) -> list:
+ docs = []
+ for url in self.urls:
+ pages = await self._download_and_process(url)
+ for page in pages:
+ if page.page_content:
+ docs.append({
+ "raw_content": page.page_content,
+ "url": page.metadata.get("source")
+ })
+
+ if not docs:
+ raise ValueError("🤷 Failed to load any documents!")
+
+ return docs
+
+ async def _download_and_process(self, url: str) -> list:
+ try:
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url, timeout=6) as response:
+ if response.status != 200:
+ print(f"Failed to download {url}: HTTP {response.status}")
+ return []
+
+ content = await response.read()
+ with tempfile.NamedTemporaryFile(delete=False, suffix=self._get_extension(url)) as tmp_file:
+ tmp_file.write(content)
+ tmp_file_path = tmp_file.name
+
+ return await self._load_document(tmp_file_path, self._get_extension(url).strip('.'))
+ except aiohttp.ClientError as e:
+ print(f"Failed to process {url}")
+ print(e)
+ return []
+ except Exception as e:
+ print(f"Unexpected error processing {url}")
+ print(e)
+ return []
+
+ async def _load_document(self, file_path: str, file_extension: str) -> list:
+ ret_data = []
+ try:
+ loader_dict = {
+ "pdf": PyMuPDFLoader(file_path),
+ "txt": TextLoader(file_path),
+ "doc": UnstructuredWordDocumentLoader(file_path),
+ "docx": UnstructuredWordDocumentLoader(file_path),
+ "pptx": UnstructuredPowerPointLoader(file_path),
+ "csv": UnstructuredCSVLoader(file_path, mode="elements"),
+ "xls": UnstructuredExcelLoader(file_path, mode="elements"),
+ "xlsx": UnstructuredExcelLoader(file_path, mode="elements"),
+ "md": UnstructuredMarkdownLoader(file_path)
+ }
+
+ loader = loader_dict.get(file_extension, None)
+ if loader:
+ ret_data = loader.load()
+
+ except Exception as e:
+ print(f"Failed to load document : {file_path}")
+ print(e)
+ finally:
+ os.remove(file_path) # 删除临时文件
+
+ return ret_data
+
+ @staticmethod
+ def _get_extension(url: str) -> str:
+ return os.path.splitext(url.split("?")[0])[1]
diff --git a/gpt_researcher/llm_provider/__init__.py b/gpt_researcher/llm_provider/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..31e9e1d8189a85b81d52885e7677cbfe69ccdb94
--- /dev/null
+++ b/gpt_researcher/llm_provider/__init__.py
@@ -0,0 +1,5 @@
+from .generic import GenericLLMProvider
+
+__all__ = [
+ "GenericLLMProvider",
+]
diff --git a/gpt_researcher/llm_provider/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/llm_provider/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..274fcec02a2170a6a8ff3960a70547fd070de97d
Binary files /dev/null and b/gpt_researcher/llm_provider/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/llm_provider/generic/__init__.py b/gpt_researcher/llm_provider/generic/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b60409f6fcfa1d0bef08a6dca28b35eafb56ac5f
--- /dev/null
+++ b/gpt_researcher/llm_provider/generic/__init__.py
@@ -0,0 +1,3 @@
+from .base import GenericLLMProvider
+
+__all__ = ["GenericLLMProvider"]
\ No newline at end of file
diff --git a/gpt_researcher/llm_provider/generic/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/llm_provider/generic/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9458de290b6b64cfb59d0ea8b69f74276ac73b5
Binary files /dev/null and b/gpt_researcher/llm_provider/generic/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/llm_provider/generic/__pycache__/base.cpython-312.pyc b/gpt_researcher/llm_provider/generic/__pycache__/base.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f23054bbcebca2e3f985b81405cb41e62b2f6bae
Binary files /dev/null and b/gpt_researcher/llm_provider/generic/__pycache__/base.cpython-312.pyc differ
diff --git a/gpt_researcher/llm_provider/generic/base.py b/gpt_researcher/llm_provider/generic/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c074ae2c16faf3ff3f174adc645e606f44454bf
--- /dev/null
+++ b/gpt_researcher/llm_provider/generic/base.py
@@ -0,0 +1,185 @@
+import importlib
+from typing import Any
+from colorama import Fore, Style, init
+import os
+
+_SUPPORTED_PROVIDERS = {
+ "openai",
+ "anthropic",
+ "azure_openai",
+ "cohere",
+ "google_vertexai",
+ "google_genai",
+ "fireworks",
+ "ollama",
+ "together",
+ "mistralai",
+ "huggingface",
+ "groq",
+ "bedrock",
+ "dashscope",
+ "xai",
+ "deepseek",
+ "litellm",
+}
+
+
+class GenericLLMProvider:
+
+ def __init__(self, llm):
+ self.llm = llm
+
+ @classmethod
+ def from_provider(cls, provider: str, **kwargs: Any):
+ if provider == "openai":
+ _check_pkg("langchain_openai")
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(**kwargs)
+ elif provider == "anthropic":
+ _check_pkg("langchain_anthropic")
+ from langchain_anthropic import ChatAnthropic
+
+ llm = ChatAnthropic(**kwargs)
+ elif provider == "azure_openai":
+ _check_pkg("langchain_openai")
+ from langchain_openai import AzureChatOpenAI
+
+ if "model" in kwargs:
+ model_name = kwargs.get("model", None)
+ kwargs = {"azure_deployment": model_name, **kwargs}
+
+ llm = AzureChatOpenAI(**kwargs)
+ elif provider == "cohere":
+ _check_pkg("langchain_cohere")
+ from langchain_cohere import ChatCohere
+
+ llm = ChatCohere(**kwargs)
+ elif provider == "google_vertexai":
+ _check_pkg("langchain_google_vertexai")
+ from langchain_google_vertexai import ChatVertexAI
+
+ llm = ChatVertexAI(**kwargs)
+ elif provider == "google_genai":
+ _check_pkg("langchain_google_genai")
+ from langchain_google_genai import ChatGoogleGenerativeAI
+
+ llm = ChatGoogleGenerativeAI(**kwargs)
+ elif provider == "fireworks":
+ _check_pkg("langchain_fireworks")
+ from langchain_fireworks import ChatFireworks
+
+ llm = ChatFireworks(**kwargs)
+ elif provider == "ollama":
+ _check_pkg("langchain_community")
+ from langchain_ollama import ChatOllama
+
+ llm = ChatOllama(base_url=os.environ["OLLAMA_BASE_URL"], **kwargs)
+ elif provider == "together":
+ _check_pkg("langchain_together")
+ from langchain_together import ChatTogether
+
+ llm = ChatTogether(**kwargs)
+ elif provider == "mistralai":
+ _check_pkg("langchain_mistralai")
+ from langchain_mistralai import ChatMistralAI
+
+ llm = ChatMistralAI(**kwargs)
+ elif provider == "huggingface":
+ _check_pkg("langchain_huggingface")
+ from langchain_huggingface import ChatHuggingFace
+
+ if "model" in kwargs or "model_name" in kwargs:
+ model_id = kwargs.pop("model", None) or kwargs.pop("model_name", None)
+ kwargs = {"model_id": model_id, **kwargs}
+ llm = ChatHuggingFace(**kwargs)
+ elif provider == "groq":
+ _check_pkg("langchain_groq")
+ from langchain_groq import ChatGroq
+
+ llm = ChatGroq(**kwargs)
+ elif provider == "bedrock":
+ _check_pkg("langchain_aws")
+ from langchain_aws import ChatBedrock
+
+ if "model" in kwargs or "model_name" in kwargs:
+ model_id = kwargs.pop("model", None) or kwargs.pop("model_name", None)
+ kwargs = {"model_id": model_id, "model_kwargs": kwargs}
+ llm = ChatBedrock(**kwargs)
+ elif provider == "dashscope":
+ _check_pkg("langchain_dashscope")
+ from langchain_dashscope import ChatDashScope
+
+ llm = ChatDashScope(**kwargs)
+ elif provider == "xai":
+ _check_pkg("langchain_xai")
+ from langchain_xai import ChatXAI
+
+ llm = ChatXAI(**kwargs)
+ elif provider == "deepseek":
+ _check_pkg("langchain_openai")
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(openai_api_base='https://api.deepseek.com',
+ openai_api_key=os.environ["DEEPSEEK_API_KEY"],
+ **kwargs
+ )
+ elif provider == "litellm":
+ _check_pkg("langchain_community")
+ from langchain_community.chat_models.litellm import ChatLiteLLM
+
+ llm = ChatLiteLLM(**kwargs)
+ else:
+ supported = ", ".join(_SUPPORTED_PROVIDERS)
+ raise ValueError(
+ f"Unsupported {provider}.\n\nSupported model providers are: {supported}"
+ )
+ return cls(llm)
+
+
+ async def get_chat_response(self, messages, stream, websocket=None):
+ if not stream:
+ # Getting output from the model chain using ainvoke for asynchronous invoking
+ output = await self.llm.ainvoke(messages)
+
+ return output.content
+
+ else:
+ return await self.stream_response(messages, websocket)
+
+ async def stream_response(self, messages, websocket=None):
+ paragraph = ""
+ response = ""
+
+ # Streaming the response using the chain astream method from langchain
+ async for chunk in self.llm.astream(messages):
+ content = chunk.content
+ if content is not None:
+ response += content
+ paragraph += content
+ if "\n" in paragraph:
+ await self._send_output(paragraph, websocket)
+ paragraph = ""
+
+ if paragraph:
+ await self._send_output(paragraph, websocket)
+
+ return response
+
+ async def _send_output(self, content, websocket=None):
+ if websocket is not None:
+ await websocket.send_json({"type": "report", "output": content})
+ else:
+ print(f"{Fore.GREEN}{content}{Style.RESET_ALL}")
+
+
+def _check_pkg(pkg: str) -> None:
+ if not importlib.util.find_spec(pkg):
+ pkg_kebab = pkg.replace("_", "-")
+ # Import colorama and initialize it
+ init(autoreset=True)
+ # Use Fore.RED to color the error message
+ raise ImportError(
+ Fore.RED + f"Unable to import {pkg_kebab}. Please install with "
+ f"`pip install -U {pkg_kebab}`"
+ )
diff --git a/gpt_researcher/memory/__init__.py b/gpt_researcher/memory/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccd6546f57590e6514ccdcb83f48775408021282
--- /dev/null
+++ b/gpt_researcher/memory/__init__.py
@@ -0,0 +1 @@
+from .embeddings import Memory
diff --git a/gpt_researcher/memory/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/memory/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e13b9d21ea690dacef7287d187c78854b438258
Binary files /dev/null and b/gpt_researcher/memory/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/memory/__pycache__/embeddings.cpython-312.pyc b/gpt_researcher/memory/__pycache__/embeddings.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77f4a2e4c96e4cd66c76b26905ed811f375229dd
Binary files /dev/null and b/gpt_researcher/memory/__pycache__/embeddings.cpython-312.pyc differ
diff --git a/gpt_researcher/memory/embeddings.py b/gpt_researcher/memory/embeddings.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c0ebd63a09f5e3c396ddd9930b5c4c84326f49a
--- /dev/null
+++ b/gpt_researcher/memory/embeddings.py
@@ -0,0 +1,121 @@
+import os
+from typing import Any
+
+OPENAI_EMBEDDING_MODEL = os.environ.get(
+ "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small"
+)
+
+_SUPPORTED_PROVIDERS = {
+ "openai",
+ "azure_openai",
+ "cohere",
+ "google_vertexai",
+ "google_genai",
+ "fireworks",
+ "ollama",
+ "together",
+ "mistralai",
+ "huggingface",
+ "nomic",
+ "voyageai",
+ "dashscope",
+ "custom",
+ "bedrock",
+}
+
+
+class Memory:
+ def __init__(self, embedding_provider: str, model: str, **embdding_kwargs: Any):
+ _embeddings = None
+ match embedding_provider:
+ case "custom":
+ from langchain_openai import OpenAIEmbeddings
+
+ _embeddings = OpenAIEmbeddings(
+ model=model,
+ openai_api_key=os.getenv("OPENAI_API_KEY", "custom"),
+ openai_api_base=os.getenv(
+ "OPENAI_BASE_URL", "http://localhost:1234/v1"
+ ), # default for lmstudio
+ check_embedding_ctx_length=False,
+ **embdding_kwargs,
+ ) # quick fix for lmstudio
+ case "openai":
+ from langchain_openai import OpenAIEmbeddings
+
+ _embeddings = OpenAIEmbeddings(model=model, **embdding_kwargs)
+ case "azure_openai":
+ from langchain_openai import AzureOpenAIEmbeddings
+
+ _embeddings = AzureOpenAIEmbeddings(
+ model=model,
+ azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
+ openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
+ openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
+ **embdding_kwargs,
+ )
+ case "cohere":
+ from langchain_cohere import CohereEmbeddings
+
+ _embeddings = CohereEmbeddings(model=model, **embdding_kwargs)
+ case "google_vertexai":
+ from langchain_google_vertexai import VertexAIEmbeddings
+
+ _embeddings = VertexAIEmbeddings(model=model, **embdding_kwargs)
+ case "google_genai":
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
+
+ _embeddings = GoogleGenerativeAIEmbeddings(
+ model=model, **embdding_kwargs
+ )
+ case "fireworks":
+ from langchain_fireworks import FireworksEmbeddings
+
+ _embeddings = FireworksEmbeddings(model=model, **embdding_kwargs)
+ case "ollama":
+ from langchain_ollama import OllamaEmbeddings
+
+ _embeddings = OllamaEmbeddings(
+ model=model,
+ base_url=os.environ["OLLAMA_BASE_URL"],
+ **embdding_kwargs,
+ )
+ case "together":
+ from langchain_together import TogetherEmbeddings
+
+ _embeddings = TogetherEmbeddings(model=model, **embdding_kwargs)
+ case "mistralai":
+ from langchain_mistralai import MistralAIEmbeddings
+
+ _embeddings = MistralAIEmbeddings(model=model, **embdding_kwargs)
+ case "huggingface":
+ from langchain_huggingface import HuggingFaceEmbeddings
+
+ _embeddings = HuggingFaceEmbeddings(model_name=model, **embdding_kwargs)
+ case "nomic":
+ from langchain_nomic import NomicEmbeddings
+
+ _embeddings = NomicEmbeddings(model=model, **embdding_kwargs)
+ case "voyageai":
+ from langchain_voyageai import VoyageAIEmbeddings
+
+ _embeddings = VoyageAIEmbeddings(
+ voyage_api_key=os.environ["VOYAGE_API_KEY"],
+ model=model,
+ **embdding_kwargs,
+ )
+ case "dashscope":
+ from langchain_community.embeddings import DashScopeEmbeddings
+
+ _embeddings = DashScopeEmbeddings(model=model, **embdding_kwargs)
+ case "bedrock":
+ from langchain_aws.embeddings import BedrockEmbeddings
+
+ _embeddings = BedrockEmbeddings(model_id=model, **embdding_kwargs)
+ case _:
+ raise Exception("Embedding not found.")
+
+ self._embeddings = _embeddings
+
+ def get_embeddings(self):
+ return self._embeddings
diff --git a/gpt_researcher/prompts.py b/gpt_researcher/prompts.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d24842a9162d3e7798dbfad8b1e6cb5f2fb159c
--- /dev/null
+++ b/gpt_researcher/prompts.py
@@ -0,0 +1,460 @@
+import warnings
+from datetime import date, datetime, timezone
+
+from .utils.enum import ReportSource, ReportType, Tone
+from typing import List, Dict, Any
+
+
+def generate_search_queries_prompt(
+ question: str,
+ parent_query: str,
+ report_type: str,
+ max_iterations: int = 3,
+ context: List[Dict[str, Any]] = [],
+):
+ """Generates the search queries prompt for the given question.
+ Args:
+ question (str): The question to generate the search queries prompt for
+ parent_query (str): The main question (only relevant for detailed reports)
+ report_type (str): The report type
+ max_iterations (int): The maximum number of search queries to generate
+ context (str): Context for better understanding of the task with realtime web information
+
+ Returns: str: The search queries prompt for the given question
+ """
+
+ if (
+ report_type == ReportType.DetailedReport.value
+ or report_type == ReportType.SubtopicReport.value
+ ):
+ task = f"{parent_query} - {question}"
+ else:
+ task = question
+
+ context_prompt = f"""
+You are a seasoned research assistant tasked with generating search queries to find relevant information for the following task: "{task}".
+Context: {context}
+
+Use this context to inform and refine your search queries. The context provides real-time web information that can help you generate more specific and relevant queries. Consider any current events, recent developments, or specific details mentioned in the context that could enhance the search queries.
+""" if context else ""
+
+ dynamic_example = ", ".join([f'"query {i+1}"' for i in range(max_iterations)])
+
+ return f"""Write {max_iterations} google search queries to search online that form an objective opinion from the following task: "{task}"
+
+Assume the current date is {datetime.now(timezone.utc).strftime('%B %d, %Y')} if required.
+
+{context_prompt}
+You must respond with a list of strings in the following format: [{dynamic_example}].
+The response should contain ONLY the list.
+"""
+
+
+def generate_report_prompt(
+ question: str,
+ context,
+ report_source: str,
+ report_format="apa",
+ total_words=1000,
+ tone=None,
+ language="english",
+):
+ """Generates the report prompt for the given question and research summary.
+ Args: question (str): The question to generate the report prompt for
+ research_summary (str): The research summary to generate the report prompt for
+ Returns: str: The report prompt for the given question and research summary
+ """
+
+ reference_prompt = ""
+ if report_source == ReportSource.Web.value:
+ reference_prompt = f"""
+You MUST write all used source urls at the end of the report as references, and make sure to not add duplicated sources, but only one reference for each.
+Every url should be hyperlinked: [url website](url)
+Additionally, you MUST include hyperlinks to the relevant URLs wherever they are referenced in the report:
+
+eg: Author, A. A. (Year, Month Date). Title of web page. Website Name. [url website](url)
+"""
+ else:
+ reference_prompt = f"""
+You MUST write all used source document names at the end of the report as references, and make sure to not add duplicated sources, but only one reference for each."
+"""
+
+ tone_prompt = f"Write the report in a {tone.value} tone." if tone else ""
+
+ return f"""
+Information: "{context}"
+---
+Using the above information, answer the following query or task: "{question}" in a detailed report --
+The report should focus on the answer to the query, should be well structured, informative,
+in-depth, and comprehensive, with facts and numbers if available and at least {total_words} words.
+You should strive to write the report as long as you can using all relevant and necessary information provided.
+
+Please follow all of the following guidelines in your report:
+- You MUST determine your own concrete and valid opinion based on the given information. Do NOT defer to general and meaningless conclusions.
+- You MUST write the report with markdown syntax and {report_format} format.
+- You MUST prioritize the relevance, reliability, and significance of the sources you use. Choose trusted sources over less reliable ones.
+- You must also prioritize new articles over older articles if the source can be trusted.
+- Use in-text citation references in {report_format} format and make it with markdown hyperlink placed at the end of the sentence or paragraph that references them like this: ([in-text citation](url)).
+- Don't forget to add a reference list at the end of the report in {report_format} format and full url links without hyperlinks.
+- {reference_prompt}
+- {tone_prompt}
+
+You MUST write the report in the following language: {language}.
+Please do your best, this is very important to my career.
+Assume that the current date is {date.today()}.
+"""
+
+def curate_sources(query, sources, max_results=10):
+ return f"""Your goal is to evaluate and curate the provided scraped content for the research task: "{query}"
+ while prioritizing the inclusion of relevant and high-quality information, especially sources containing statistics, numbers, or concrete data.
+
+The final curated list will be used as context for creating a research report, so prioritize:
+- Retaining as much original information as possible, with extra emphasis on sources featuring quantitative data or unique insights
+- Including a wide range of perspectives and insights
+- Filtering out only clearly irrelevant or unusable content
+
+EVALUATION GUIDELINES:
+1. Assess each source based on:
+ - Relevance: Include sources directly or partially connected to the research query. Err on the side of inclusion.
+ - Credibility: Favor authoritative sources but retain others unless clearly untrustworthy.
+ - Currency: Prefer recent information unless older data is essential or valuable.
+ - Objectivity: Retain sources with bias if they provide a unique or complementary perspective.
+ - Quantitative Value: Give higher priority to sources with statistics, numbers, or other concrete data.
+2. Source Selection:
+ - Include as many relevant sources as possible, up to {max_results}, focusing on broad coverage and diversity.
+ - Prioritize sources with statistics, numerical data, or verifiable facts.
+ - Overlapping content is acceptable if it adds depth, especially when data is involved.
+ - Exclude sources only if they are entirely irrelevant, severely outdated, or unusable due to poor content quality.
+3. Content Retention:
+ - DO NOT rewrite, summarize, or condense any source content.
+ - Retain all usable information, cleaning up only clear garbage or formatting issues.
+ - Keep marginally relevant or incomplete sources if they contain valuable data or insights.
+
+SOURCES LIST TO EVALUATE:
+{sources}
+
+You MUST return your response in the EXACT sources JSON list format as the original sources.
+The response MUST not contain any markdown format or additional text (like ```json), just the JSON list!
+"""
+
+
+
+
+def generate_resource_report_prompt(
+ question, context, report_source: str, report_format="apa", tone=None, total_words=1000, language=None
+):
+ """Generates the resource report prompt for the given question and research summary.
+
+ Args:
+ question (str): The question to generate the resource report prompt for.
+ context (str): The research summary to generate the resource report prompt for.
+
+ Returns:
+ str: The resource report prompt for the given question and research summary.
+ """
+
+ reference_prompt = ""
+ if report_source == ReportSource.Web.value:
+ reference_prompt = f"""
+ You MUST include all relevant source urls.
+ Every url should be hyperlinked: [url website](url)
+ """
+ else:
+ reference_prompt = f"""
+ You MUST write all used source document names at the end of the report as references, and make sure to not add duplicated sources, but only one reference for each."
+ """
+
+ return (
+ f'"""{context}"""\n\nBased on the above information, generate a bibliography recommendation report for the following'
+ f' question or topic: "{question}". The report should provide a detailed analysis of each recommended resource,'
+ " explaining how each source can contribute to finding answers to the research question.\n"
+ "Focus on the relevance, reliability, and significance of each source.\n"
+ "Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax.\n"
+ "Include relevant facts, figures, and numbers whenever available.\n"
+ f"The report should have a minimum length of {total_words} words.\n"
+ "You MUST include all relevant source urls."
+ "Every url should be hyperlinked: [url website](url)"
+ f"{reference_prompt}"
+ )
+
+
+def generate_custom_report_prompt(
+ query_prompt, context, report_source: str, report_format="apa", tone=None, total_words=1000, language: str = "english"
+):
+ return f'"{context}"\n\n{query_prompt}'
+
+
+def generate_outline_report_prompt(
+ question, context, report_source: str, report_format="apa", tone=None, total_words=1000
+):
+ """Generates the outline report prompt for the given question and research summary.
+ Args: question (str): The question to generate the outline report prompt for
+ research_summary (str): The research summary to generate the outline report prompt for
+ Returns: str: The outline report prompt for the given question and research summary
+ """
+
+ return (
+ f'"""{context}""" Using the above information, generate an outline for a research report in Markdown syntax'
+ f' for the following question or topic: "{question}". The outline should provide a well-structured framework'
+ " for the research report, including the main sections, subsections, and key points to be covered."
+ f" The research report should be detailed, informative, in-depth, and a minimum of {total_words} words."
+ " Use appropriate Markdown syntax to format the outline and ensure readability."
+ )
+
+
+def get_report_by_type(report_type: str):
+ report_type_mapping = {
+ ReportType.ResearchReport.value: generate_report_prompt,
+ ReportType.ResourceReport.value: generate_resource_report_prompt,
+ ReportType.OutlineReport.value: generate_outline_report_prompt,
+ ReportType.CustomReport.value: generate_custom_report_prompt,
+ ReportType.SubtopicReport.value: generate_subtopic_report_prompt,
+ }
+ return report_type_mapping[report_type]
+
+
+def auto_agent_instructions():
+ return """
+This task involves researching a given topic, regardless of its complexity or the availability of a definitive answer. The research is conducted by a specific server, defined by its type and role, with each server requiring distinct instructions.
+Agent
+The server is determined by the field of the topic and the specific name of the server that could be utilized to research the topic provided. Agents are categorized by their area of expertise, and each server type is associated with a corresponding emoji.
+
+examples:
+task: "should I invest in apple stocks?"
+response:
+{
+ "server": "💰 Finance Agent",
+ "agent_role_prompt: "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends."
+}
+task: "could reselling sneakers become profitable?"
+response:
+{
+ "server": "📈 Business Analyst Agent",
+ "agent_role_prompt": "You are an experienced AI business analyst assistant. Your main objective is to produce comprehensive, insightful, impartial, and systematically structured business reports based on provided business data, market trends, and strategic analysis."
+}
+task: "what are the most interesting sites in Tel Aviv?"
+response:
+{
+ "server: "🌍 Travel Agent",
+ "agent_role_prompt": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights."
+}
+"""
+
+
+def generate_summary_prompt(query, data):
+ """Generates the summary prompt for the given question and text.
+ Args: question (str): The question to generate the summary prompt for
+ text (str): The text to generate the summary prompt for
+ Returns: str: The summary prompt for the given question and text
+ """
+
+ return (
+ f'{data}\n Using the above text, summarize it based on the following task or query: "{query}".\n If the '
+ f"query cannot be answered using the text, YOU MUST summarize the text in short.\n Include all factual "
+ f"information such as numbers, stats, quotes, etc if available. "
+ )
+
+
+################################################################################################
+
+# DETAILED REPORT PROMPTS
+
+
+def generate_subtopics_prompt() -> str:
+ return """
+Provided the main topic:
+
+{task}
+
+and research data:
+
+{data}
+
+- Construct a list of subtopics which indicate the headers of a report document to be generated on the task.
+- These are a possible list of subtopics : {subtopics}.
+- There should NOT be any duplicate subtopics.
+- Limit the number of subtopics to a maximum of {max_subtopics}
+- Finally order the subtopics by their tasks, in a relevant and meaningful order which is presentable in a detailed report
+
+"IMPORTANT!":
+- Every subtopic MUST be relevant to the main topic and provided research data ONLY!
+
+{format_instructions}
+"""
+
+
+def generate_subtopic_report_prompt(
+ current_subtopic,
+ existing_headers: list,
+ relevant_written_contents: list,
+ main_topic: str,
+ context,
+ report_format: str = "apa",
+ max_subsections=5,
+ total_words=800,
+ tone: Tone = Tone.Objective,
+ language: str = "english",
+) -> str:
+ return f"""
+Context:
+"{context}"
+
+Main Topic and Subtopic:
+Using the latest information available, construct a detailed report on the subtopic: {current_subtopic} under the main topic: {main_topic}.
+You must limit the number of subsections to a maximum of {max_subsections}.
+
+Content Focus:
+- The report should focus on answering the question, be well-structured, informative, in-depth, and include facts and numbers if available.
+- Use markdown syntax and follow the {report_format.upper()} format.
+
+IMPORTANT:Content and Sections Uniqueness:
+- This part of the instructions is crucial to ensure the content is unique and does not overlap with existing reports.
+- Carefully review the existing headers and existing written contents provided below before writing any new subsections.
+- Prevent any content that is already covered in the existing written contents.
+- Do not use any of the existing headers as the new subsection headers.
+- Do not repeat any information already covered in the existing written contents or closely related variations to avoid duplicates.
+- If you have nested subsections, ensure they are unique and not covered in the existing written contents.
+- Ensure that your content is entirely new and does not overlap with any information already covered in the previous subtopic reports.
+
+"Existing Subtopic Reports":
+- Existing subtopic reports and their section headers:
+
+ {existing_headers}
+
+- Existing written contents from previous subtopic reports:
+
+ {relevant_written_contents}
+
+"Structure and Formatting":
+- As this sub-report will be part of a larger report, include only the main body divided into suitable subtopics without any introduction or conclusion section.
+
+- You MUST include markdown hyperlinks to relevant source URLs wherever referenced in the report, for example:
+
+ ### Section Header
+
+ This is a sample text. ([url website](url))
+
+- Use H2 for the main subtopic header (##) and H3 for subsections (###).
+- Use smaller Markdown headers (e.g., H2 or H3) for content structure, avoiding the largest header (H1) as it will be used for the larger report's heading.
+- Organize your content into distinct sections that complement but do not overlap with existing reports.
+- When adding similar or identical subsections to your report, you should clearly indicate the differences between and the new content and the existing written content from previous subtopic reports. For example:
+
+ ### New header (similar to existing header)
+
+ While the previous section discussed [topic A], this section will explore [topic B]."
+
+"Date":
+Assume the current date is {datetime.now(timezone.utc).strftime('%B %d, %Y')} if required.
+
+"IMPORTANT!":
+- You MUST write the report in the following language: {language}.
+- The focus MUST be on the main topic! You MUST Leave out any information un-related to it!
+- Must NOT have any introduction, conclusion, summary or reference section.
+- You MUST include hyperlinks with markdown syntax ([url website](url)) related to the sentences wherever necessary.
+- You MUST mention the difference between the existing content and the new content in the report if you are adding the similar or same subsections wherever necessary.
+- The report should have a minimum length of {total_words} words.
+- Use an {tone.value} tone throughout the report.
+
+Do NOT add a conclusion section.
+"""
+
+
+def generate_draft_titles_prompt(
+ current_subtopic: str,
+ main_topic: str,
+ context: str,
+ max_subsections: int = 5
+) -> str:
+ return f"""
+"Context":
+"{context}"
+
+"Main Topic and Subtopic":
+Using the latest information available, construct a draft section title headers for a detailed report on the subtopic: {current_subtopic} under the main topic: {main_topic}.
+
+"Task":
+1. Create a list of draft section title headers for the subtopic report.
+2. Each header should be concise and relevant to the subtopic.
+3. The header should't be too high level, but detailed enough to cover the main aspects of the subtopic.
+4. Use markdown syntax for the headers, using H3 (###) as H1 and H2 will be used for the larger report's heading.
+5. Ensure the headers cover main aspects of the subtopic.
+
+"Structure and Formatting":
+Provide the draft headers in a list format using markdown syntax, for example:
+
+### Header 1
+### Header 2
+### Header 3
+
+"IMPORTANT!":
+- The focus MUST be on the main topic! You MUST Leave out any information un-related to it!
+- Must NOT have any introduction, conclusion, summary or reference section.
+- Focus solely on creating headers, not content.
+"""
+
+
+def generate_report_introduction(question: str, research_summary: str = "") -> str:
+ return f"""{research_summary}\n
+Using the above latest information, Prepare a detailed report introduction on the topic -- {question}.
+- The introduction should be succinct, well-structured, informative with markdown syntax.
+- As this introduction will be part of a larger report, do NOT include any other sections, which are generally present in a report.
+- The introduction should be preceded by an H1 heading with a suitable topic for the entire report.
+- You must include hyperlinks with markdown syntax ([url website](url)) related to the sentences wherever necessary.
+Assume that the current date is {datetime.now(timezone.utc).strftime('%B %d, %Y')} if required.
+"""
+
+
+def generate_report_conclusion(query: str, report_content: str) -> str:
+ """
+ Generate a concise conclusion summarizing the main findings and implications of a research report.
+
+ Args:
+ report_content (str): The content of the research report.
+
+ Returns:
+ str: A concise conclusion summarizing the report's main findings and implications.
+ """
+ prompt = f"""
+ Based on the research report below and research task, please write a concise conclusion that summarizes the main findings and their implications:
+
+ Research task: {query}
+
+ Research Report: {report_content}
+
+ Your conclusion should:
+ 1. Recap the main points of the research
+ 2. Highlight the most important findings
+ 3. Discuss any implications or next steps
+ 4. Be approximately 2-3 paragraphs long
+
+ If there is no "## Conclusion" section title written at the end of the report, please add it to the top of your conclusion.
+ You must include hyperlinks with markdown syntax ([url website](url)) related to the sentences wherever necessary.
+
+ Write the conclusion:
+ """
+
+ return prompt
+
+
+report_type_mapping = {
+ ReportType.ResearchReport.value: generate_report_prompt,
+ ReportType.ResourceReport.value: generate_resource_report_prompt,
+ ReportType.OutlineReport.value: generate_outline_report_prompt,
+ ReportType.CustomReport.value: generate_custom_report_prompt,
+ ReportType.SubtopicReport.value: generate_subtopic_report_prompt,
+}
+
+
+def get_prompt_by_report_type(report_type):
+ prompt_by_type = report_type_mapping.get(report_type)
+ default_report_type = ReportType.ResearchReport.value
+ if not prompt_by_type:
+ warnings.warn(
+ f"Invalid report type: {report_type}.\n"
+ f"Please use one of the following: {', '.join([enum_value for enum_value in report_type_mapping.keys()])}\n"
+ f"Using default report type: {default_report_type} prompt.",
+ UserWarning,
+ )
+ prompt_by_type = report_type_mapping.get(default_report_type)
+ return prompt_by_type
+
diff --git a/gpt_researcher/retrievers/__init__.py b/gpt_researcher/retrievers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea41ed13673699031c9ebedcaa9db5bf6bd0564a
--- /dev/null
+++ b/gpt_researcher/retrievers/__init__.py
@@ -0,0 +1,29 @@
+from .arxiv.arxiv import ArxivSearch
+from .bing.bing import BingSearch
+from .custom.custom import CustomRetriever
+from .duckduckgo.duckduckgo import Duckduckgo
+from .google.google import GoogleSearch
+from .pubmed_central.pubmed_central import PubMedCentralSearch
+from .searx.searx import SearxSearch
+from .semantic_scholar.semantic_scholar import SemanticScholarSearch
+from .searchapi.searchapi import SearchApiSearch
+from .serpapi.serpapi import SerpApiSearch
+from .serper.serper import SerperSearch
+from .tavily.tavily_search import TavilySearch
+from .exa.exa import ExaSearch
+
+__all__ = [
+ "TavilySearch",
+ "CustomRetriever",
+ "Duckduckgo",
+ "SearchApiSearch",
+ "SerperSearch",
+ "SerpApiSearch",
+ "GoogleSearch",
+ "SearxSearch",
+ "BingSearch",
+ "ArxivSearch",
+ "SemanticScholarSearch",
+ "PubMedCentralSearch",
+ "ExaSearch"
+]
diff --git a/gpt_researcher/retrievers/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a454918e29ce06f76b27075eb7a13384ada0fff
Binary files /dev/null and b/gpt_researcher/retrievers/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/__pycache__/utils.cpython-312.pyc b/gpt_researcher/retrievers/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67b652f70eb892bda722035af39100930f28cebe
Binary files /dev/null and b/gpt_researcher/retrievers/__pycache__/utils.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/arxiv/__init__.py b/gpt_researcher/retrievers/arxiv/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/arxiv/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/arxiv/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67848faea5441eee115dd2a5b8d91dce8f5ddc09
Binary files /dev/null and b/gpt_researcher/retrievers/arxiv/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/arxiv/__pycache__/arxiv.cpython-312.pyc b/gpt_researcher/retrievers/arxiv/__pycache__/arxiv.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e988f30f06c1ac8eeb690a393a36bf02344b0e97
Binary files /dev/null and b/gpt_researcher/retrievers/arxiv/__pycache__/arxiv.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/arxiv/arxiv.py b/gpt_researcher/retrievers/arxiv/arxiv.py
new file mode 100644
index 0000000000000000000000000000000000000000..4febc33294f0e2f417bff780b20023adda4a8bbd
--- /dev/null
+++ b/gpt_researcher/retrievers/arxiv/arxiv.py
@@ -0,0 +1,40 @@
+import arxiv
+
+
+class ArxivSearch:
+ """
+ Arxiv API Retriever
+ """
+ def __init__(self, query, sort='Relevance'):
+ self.arxiv = arxiv
+ self.query = query
+ assert sort in ['Relevance', 'SubmittedDate'], "Invalid sort criterion"
+ self.sort = arxiv.SortCriterion.SubmittedDate if sort == 'SubmittedDate' else arxiv.SortCriterion.Relevance
+
+
+ def search(self, max_results=5):
+ """
+ Performs the search
+ :param query:
+ :param max_results:
+ :return:
+ """
+
+ arxiv_gen = list(arxiv.Client().results(
+ self.arxiv.Search(
+ query= self.query, #+
+ max_results=max_results,
+ sort_by=self.sort,
+ )))
+
+ search_result = []
+
+ for result in arxiv_gen:
+
+ search_result.append({
+ "title": result.title,
+ "href": result.pdf_url,
+ "body": result.summary,
+ })
+
+ return search_result
\ No newline at end of file
diff --git a/gpt_researcher/retrievers/bing/__init__.py b/gpt_researcher/retrievers/bing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/bing/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/bing/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0507db0166af799e3ee88da475180519a48b857f
Binary files /dev/null and b/gpt_researcher/retrievers/bing/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/bing/__pycache__/bing.cpython-312.pyc b/gpt_researcher/retrievers/bing/__pycache__/bing.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf61c0f6bb5b1565b4dfb0d7c737f58ad4fc11f6
Binary files /dev/null and b/gpt_researcher/retrievers/bing/__pycache__/bing.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/bing/bing.py b/gpt_researcher/retrievers/bing/bing.py
new file mode 100644
index 0000000000000000000000000000000000000000..edf7c4d2568d77dd95b069d73cfebea8d003971e
--- /dev/null
+++ b/gpt_researcher/retrievers/bing/bing.py
@@ -0,0 +1,93 @@
+# Bing Search Retriever
+
+# libraries
+import os
+import requests
+import json
+import logging
+
+
+class BingSearch():
+ """
+ Bing Search Retriever
+ """
+
+ def __init__(self, query):
+ """
+ Initializes the BingSearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.api_key = self.get_api_key()
+ self.logger = logging.getLogger(__name__)
+
+ def get_api_key(self):
+ """
+ Gets the Bing API key
+ Returns:
+
+ """
+ try:
+ api_key = os.environ["BING_API_KEY"]
+ except:
+ raise Exception(
+ "Bing API key not found. Please set the BING_API_KEY environment variable.")
+ return api_key
+
+ def search(self, max_results=7) -> list[dict[str]]:
+ """
+ Searches the query
+ Returns:
+
+ """
+ print("Searching with query {0}...".format(self.query))
+ """Useful for general internet search queries using the Bing API."""
+
+ # Search the query
+ url = "https://api.bing.microsoft.com/v7.0/search"
+
+ headers = {
+ 'Ocp-Apim-Subscription-Key': self.api_key,
+ 'Content-Type': 'application/json'
+ }
+ params = {
+ "responseFilter": "Webpages",
+ "q": self.query,
+ "count": max_results,
+ "setLang": "en-GB",
+ "textDecorations": False,
+ "textFormat": "HTML",
+ "safeSearch": "Strict"
+ }
+
+ resp = requests.get(url, headers=headers, params=params)
+
+ # Preprocess the results
+ if resp is None:
+ return []
+ try:
+ search_results = json.loads(resp.text)
+ results = search_results["webPages"]["value"]
+ except Exception as e:
+ self.logger.error(
+ f"Error parsing Bing search results: {e}. Resulting in empty response.")
+ return []
+ if search_results is None:
+ self.logger.warning(f"No search results found for query: {self.query}")
+ return []
+ search_results = []
+
+ # Normalize the results to match the format of the other search APIs
+ for result in results:
+ # skip youtube results
+ if "youtube.com" in result["url"]:
+ continue
+ search_result = {
+ "title": result["name"],
+ "href": result["url"],
+ "body": result["snippet"],
+ }
+ search_results.append(search_result)
+
+ return search_results
diff --git a/gpt_researcher/retrievers/custom/__init__.py b/gpt_researcher/retrievers/custom/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/custom/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/custom/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9004d317856eae07399e6f93ac7e5530b3c70105
Binary files /dev/null and b/gpt_researcher/retrievers/custom/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/custom/__pycache__/custom.cpython-312.pyc b/gpt_researcher/retrievers/custom/__pycache__/custom.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e418ac75495fed2fd31890e7baaecbe0cc9319a
Binary files /dev/null and b/gpt_researcher/retrievers/custom/__pycache__/custom.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/custom/custom.py b/gpt_researcher/retrievers/custom/custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e9ae15a1f456f125ce9eee2c80736b1f3eaf224
--- /dev/null
+++ b/gpt_researcher/retrievers/custom/custom.py
@@ -0,0 +1,52 @@
+from typing import Any, Dict, List, Optional
+import requests
+import os
+
+
+class CustomRetriever:
+ """
+ Custom API Retriever
+ """
+
+ def __init__(self, query: str):
+ self.endpoint = os.getenv('RETRIEVER_ENDPOINT')
+ if not self.endpoint:
+ raise ValueError("RETRIEVER_ENDPOINT environment variable not set")
+
+ self.params = self._populate_params()
+ self.query = query
+
+ def _populate_params(self) -> Dict[str, Any]:
+ """
+ Populates parameters from environment variables prefixed with 'RETRIEVER_ARG_'
+ """
+ return {
+ key[len('RETRIEVER_ARG_'):].lower(): value
+ for key, value in os.environ.items()
+ if key.startswith('RETRIEVER_ARG_')
+ }
+
+ def search(self, max_results: int = 5) -> Optional[List[Dict[str, Any]]]:
+ """
+ Performs the search using the custom retriever endpoint.
+
+ :param max_results: Maximum number of results to return (not currently used)
+ :return: JSON response in the format:
+ [
+ {
+ "url": "http://example.com/page1",
+ "raw_content": "Content of page 1"
+ },
+ {
+ "url": "http://example.com/page2",
+ "raw_content": "Content of page 2"
+ }
+ ]
+ """
+ try:
+ response = requests.get(self.endpoint, params={**self.params, 'query': self.query})
+ response.raise_for_status()
+ return response.json()
+ except requests.RequestException as e:
+ print(f"Failed to retrieve search results: {e}")
+ return None
\ No newline at end of file
diff --git a/gpt_researcher/retrievers/duckduckgo/__init__.py b/gpt_researcher/retrievers/duckduckgo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/duckduckgo/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/duckduckgo/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2df08b658eb9b1235434fa9d2c6c7951714295b1
Binary files /dev/null and b/gpt_researcher/retrievers/duckduckgo/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/duckduckgo/__pycache__/duckduckgo.cpython-312.pyc b/gpt_researcher/retrievers/duckduckgo/__pycache__/duckduckgo.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69ff055a13b5c6b11173c490ddd4cc1d9cc98678
Binary files /dev/null and b/gpt_researcher/retrievers/duckduckgo/__pycache__/duckduckgo.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/duckduckgo/duckduckgo.py b/gpt_researcher/retrievers/duckduckgo/duckduckgo.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecf2d9173d73763da2fb55e879ff0255d25ad78f
--- /dev/null
+++ b/gpt_researcher/retrievers/duckduckgo/duckduckgo.py
@@ -0,0 +1,27 @@
+from itertools import islice
+from ..utils import check_pkg
+
+
+class Duckduckgo:
+ """
+ Duckduckgo API Retriever
+ """
+ def __init__(self, query):
+ check_pkg('duckduckgo_search')
+ from duckduckgo_search import DDGS
+ self.ddg = DDGS()
+ self.query = query
+
+ def search(self, max_results=5):
+ """
+ Performs the search
+ :param query:
+ :param max_results:
+ :return:
+ """
+ try:
+ search_response = self.ddg.text(self.query, region='wt-wt', max_results=max_results)
+ except Exception as e:
+ print(f"Error: {e}. Failed fetching sources. Resulting in empty response.")
+ search_response = []
+ return search_response
\ No newline at end of file
diff --git a/gpt_researcher/retrievers/exa/__init__.py b/gpt_researcher/retrievers/exa/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/exa/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/exa/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ec15609267c5078f58ea760812f552d08f295c0
Binary files /dev/null and b/gpt_researcher/retrievers/exa/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/exa/__pycache__/exa.cpython-312.pyc b/gpt_researcher/retrievers/exa/__pycache__/exa.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f9f12aa935388dfc5f58453b8088f45c4d43d2a
Binary files /dev/null and b/gpt_researcher/retrievers/exa/__pycache__/exa.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/exa/exa.py b/gpt_researcher/retrievers/exa/exa.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd4d3332baa3962e254f926c741fd6072fe58d5c
--- /dev/null
+++ b/gpt_researcher/retrievers/exa/exa.py
@@ -0,0 +1,99 @@
+import os
+from ..utils import check_pkg
+
+
+class ExaSearch:
+ """
+ Exa API Retriever
+ """
+
+ def __init__(self, query):
+ """
+ Initializes the ExaSearch object.
+ Args:
+ query: The search query.
+ """
+ # This validation is necessary since exa_py is optional
+ check_pkg("exa_py")
+ from exa_py import Exa
+ self.query = query
+ self.api_key = self._retrieve_api_key()
+ self.client = Exa(api_key=self.api_key)
+
+ def _retrieve_api_key(self):
+ """
+ Retrieves the Exa API key from environment variables.
+ Returns:
+ The API key.
+ Raises:
+ Exception: If the API key is not found.
+ """
+ try:
+ api_key = os.environ["EXA_API_KEY"]
+ except KeyError:
+ raise Exception(
+ "Exa API key not found. Please set the EXA_API_KEY environment variable. "
+ "You can obtain your key from https://exa.ai/"
+ )
+ return api_key
+
+ def search(
+ self, max_results=10, use_autoprompt=False, search_type="neural", **filters
+ ):
+ """
+ Searches the query using the Exa API.
+ Args:
+ max_results: The maximum number of results to return.
+ use_autoprompt: Whether to use autoprompting.
+ search_type: The type of search (e.g., "neural", "keyword").
+ **filters: Additional filters (e.g., date range, domains).
+ Returns:
+ A list of search results.
+ """
+ results = self.client.search(
+ self.query,
+ type=search_type,
+ use_autoprompt=use_autoprompt,
+ num_results=max_results,
+ **filters
+ )
+
+ search_response = [
+ {"href": result.url, "body": result.text} for result in results.results
+ ]
+ return search_response
+
+ def find_similar(self, url, exclude_source_domain=False, **filters):
+ """
+ Finds similar documents to the provided URL using the Exa API.
+ Args:
+ url: The URL to find similar documents for.
+ exclude_source_domain: Whether to exclude the source domain in the results.
+ **filters: Additional filters.
+ Returns:
+ A list of similar documents.
+ """
+ results = self.client.find_similar(
+ url, exclude_source_domain=exclude_source_domain, **filters
+ )
+
+ similar_response = [
+ {"href": result.url, "body": result.text} for result in results.results
+ ]
+ return similar_response
+
+ def get_contents(self, ids, **options):
+ """
+ Retrieves the contents of the specified IDs using the Exa API.
+ Args:
+ ids: The IDs of the documents to retrieve.
+ **options: Additional options for content retrieval.
+ Returns:
+ A list of document contents.
+ """
+ results = self.client.get_contents(ids, **options)
+
+ contents_response = [
+ {"id": result.id, "content": result.text} for result in results.results
+ ]
+ return contents_response
diff --git a/gpt_researcher/retrievers/google/__init__.py b/gpt_researcher/retrievers/google/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/google/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/google/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ff9650558c4b095ab5f7ba7d5e978a119d0ce77
Binary files /dev/null and b/gpt_researcher/retrievers/google/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/google/__pycache__/google.cpython-312.pyc b/gpt_researcher/retrievers/google/__pycache__/google.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab9f3288def12f75e3d006cab0c40989fc9b4a57
Binary files /dev/null and b/gpt_researcher/retrievers/google/__pycache__/google.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/google/google.py b/gpt_researcher/retrievers/google/google.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b9ea199a6051de1210f1e6918d97546e8edb400
--- /dev/null
+++ b/gpt_researcher/retrievers/google/google.py
@@ -0,0 +1,93 @@
+# Tavily API Retriever
+
+# libraries
+import os
+import requests
+import json
+
+
+class GoogleSearch:
+ """
+ Google API Retriever
+ """
+ def __init__(self, query, headers=None):
+ """
+ Initializes the TavilySearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.headers = headers or {}
+ self.api_key = self.headers.get("google_api_key") or self.get_api_key() # Use the passed api_key or fallback to environment variable
+ self.cx_key = self.headers.get("google_cx_key") or self.get_cx_key() # Use the passed cx_key or fallback to environment variable
+
+ def get_api_key(self):
+ """
+ Gets the Google API key
+ Returns:
+
+ """
+ # Get the API key
+ try:
+ api_key = os.environ["GOOGLE_API_KEY"]
+ except:
+ raise Exception("Google API key not found. Please set the GOOGLE_API_KEY environment variable. "
+ "You can get a key at https://developers.google.com/custom-search/v1/overview")
+ return api_key
+
+ def get_cx_key(self):
+ """
+ Gets the Google CX key
+ Returns:
+
+ """
+ # Get the API key
+ try:
+ api_key = os.environ["GOOGLE_CX_KEY"]
+ except:
+ raise Exception("Google CX key not found. Please set the GOOGLE_CX_KEY environment variable. "
+ "You can get a key at https://developers.google.com/custom-search/v1/overview")
+ return api_key
+
+ def search(self, max_results=7):
+ """
+ Searches the query
+ Returns:
+
+ """
+ """Useful for general internet search queries using the Google API."""
+ print("Searching with query {0}...".format(self.query))
+ url = f"https://www.googleapis.com/customsearch/v1?key={self.api_key}&cx={self.cx_key}&q={self.query}&start=1"
+ resp = requests.get(url)
+
+ if resp.status_code < 200 or resp.status_code >= 300:
+ print("Google search: unexpected response status: ", resp.status_code)
+
+ if resp is None:
+ return
+ try:
+ search_results = json.loads(resp.text)
+ except Exception:
+ return
+ if search_results is None:
+ return
+
+ results = search_results.get("items", [])
+ search_results = []
+
+ # Normalizing results to match the format of the other search APIs
+ for result in results:
+ # skip youtube results
+ if "youtube.com" in result["link"]:
+ continue
+ try:
+ search_result = {
+ "title": result["title"],
+ "href": result["link"],
+ "body": result["snippet"],
+ }
+ except:
+ continue
+ search_results.append(search_result)
+
+ return search_results[:max_results]
diff --git a/gpt_researcher/retrievers/pubmed_central/__init__.py b/gpt_researcher/retrievers/pubmed_central/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/pubmed_central/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/pubmed_central/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..beb87aaf3240dd1895cc2e9e2ade2fbb36477dc5
Binary files /dev/null and b/gpt_researcher/retrievers/pubmed_central/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/pubmed_central/__pycache__/pubmed_central.cpython-312.pyc b/gpt_researcher/retrievers/pubmed_central/__pycache__/pubmed_central.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56b8402f2937a069e2ff835fd5875b11879d340c
Binary files /dev/null and b/gpt_researcher/retrievers/pubmed_central/__pycache__/pubmed_central.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/pubmed_central/pubmed_central.py b/gpt_researcher/retrievers/pubmed_central/pubmed_central.py
new file mode 100644
index 0000000000000000000000000000000000000000..014fced5206d0d778681ddb872cecee6b31172d5
--- /dev/null
+++ b/gpt_researcher/retrievers/pubmed_central/pubmed_central.py
@@ -0,0 +1,174 @@
+import os
+import xml.etree.ElementTree as ET
+
+import requests
+
+
+class PubMedCentralSearch:
+ """
+ PubMed Central API Retriever
+ """
+
+ def __init__(self, query):
+ """
+ Initializes the PubMedCentralSearch object.
+ Args:
+ query: The search query.
+ """
+ self.query = query
+ self.api_key = self._retrieve_api_key()
+
+ def _retrieve_api_key(self):
+ """
+ Retrieves the NCBI API key from environment variables.
+ Returns:
+ The API key.
+ Raises:
+ Exception: If the API key is not found.
+ """
+ try:
+ api_key = os.environ["NCBI_API_KEY"]
+ except KeyError:
+ raise Exception(
+ "NCBI API key not found. Please set the NCBI_API_KEY environment variable. "
+ "You can obtain your key from https://www.ncbi.nlm.nih.gov/account/"
+ )
+ return api_key
+
+ def search(self, max_results=10):
+ """
+ Searches the query using the PubMed Central API.
+ Args:
+ max_results: The maximum number of results to return.
+ Returns:
+ A list of search results.
+ """
+ base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi"
+ params = {
+ "db": "pmc",
+ "term": f"{self.query} AND free fulltext[filter]",
+ "retmax": max_results,
+ "usehistory": "y",
+ "api_key": self.api_key,
+ "retmode": "json",
+ "sort": "relevance"
+ }
+ response = requests.get(base_url, params=params)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to retrieve data: {response.status_code} - {response.text}"
+ )
+
+ results = response.json()
+ ids = results["esearchresult"]["idlist"]
+
+ search_response = []
+ for article_id in ids:
+ xml_content = self.fetch([article_id])
+ if self.has_body_content(xml_content):
+ article_data = self.parse_xml(xml_content)
+ if article_data:
+ search_response.append(
+ {
+ "href": f"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC{article_id}/",
+ "body": f"{article_data['title']}\n\n{article_data['abstract']}\n\n{article_data['body'][:500]}...",
+ }
+ )
+
+ if len(search_response) >= max_results:
+ break
+
+ return search_response
+
+ def fetch(self, ids):
+ """
+ Fetches the full text content for given article IDs.
+ Args:
+ ids: List of article IDs.
+ Returns:
+ XML content of the articles.
+ """
+ base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
+ params = {
+ "db": "pmc",
+ "id": ",".join(ids),
+ "retmode": "xml",
+ "api_key": self.api_key,
+ }
+ response = requests.get(base_url, params=params)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to retrieve data: {response.status_code} - {response.text}"
+ )
+
+ return response.text
+
+ def has_body_content(self, xml_content):
+ """
+ Checks if the XML content has a body section.
+ Args:
+ xml_content: XML content of the article.
+ Returns:
+ Boolean indicating presence of body content.
+ """
+ root = ET.fromstring(xml_content)
+ ns = {
+ "mml": "http://www.w3.org/1998/Math/MathML",
+ "xlink": "http://www.w3.org/1999/xlink",
+ }
+ article = root.find("article", ns)
+ if article is None:
+ return False
+
+ body_elem = article.find(".//body", namespaces=ns)
+ if body_elem is not None:
+ return True
+ else:
+ for sec in article.findall(".//sec", namespaces=ns):
+ for p in sec.findall(".//p", namespaces=ns):
+ if p.text:
+ return True
+ return False
+
+ def parse_xml(self, xml_content):
+ """
+ Parses the XML content to extract title, abstract, and body.
+ Args:
+ xml_content: XML content of the article.
+ Returns:
+ Dictionary containing title, abstract, and body text.
+ """
+ root = ET.fromstring(xml_content)
+ ns = {
+ "mml": "http://www.w3.org/1998/Math/MathML",
+ "xlink": "http://www.w3.org/1999/xlink",
+ }
+
+ article = root.find("article", ns)
+ if article is None:
+ return None
+
+ title = article.findtext(
+ ".//title-group/article-title", default="", namespaces=ns
+ )
+
+ abstract = article.find(".//abstract", namespaces=ns)
+ abstract_text = (
+ "".join(abstract.itertext()).strip() if abstract is not None else ""
+ )
+
+ body = []
+ body_elem = article.find(".//body", namespaces=ns)
+ if body_elem is not None:
+ for p in body_elem.findall(".//p", namespaces=ns):
+ if p.text:
+ body.append(p.text.strip())
+ else:
+ for sec in article.findall(".//sec", namespaces=ns):
+ for p in sec.findall(".//p", namespaces=ns):
+ if p.text:
+ body.append(p.text.strip())
+
+ return {"title": title, "abstract": abstract_text, "body": "\n".join(body)}
diff --git a/gpt_researcher/retrievers/searchapi/__init__.py b/gpt_researcher/retrievers/searchapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/searchapi/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/searchapi/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4504174a6104da63b91b0b28e9a226b1dd09b53
Binary files /dev/null and b/gpt_researcher/retrievers/searchapi/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/searchapi/__pycache__/searchapi.cpython-312.pyc b/gpt_researcher/retrievers/searchapi/__pycache__/searchapi.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab584d42b67a200e47fe5b37c8c6dc11043a6b42
Binary files /dev/null and b/gpt_researcher/retrievers/searchapi/__pycache__/searchapi.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/searchapi/searchapi.py b/gpt_researcher/retrievers/searchapi/searchapi.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4b5a25e674d65a17dfecfbd43e425b39693b616
--- /dev/null
+++ b/gpt_researcher/retrievers/searchapi/searchapi.py
@@ -0,0 +1,84 @@
+# SearchApi Retriever
+
+# libraries
+import os
+import requests
+import urllib.parse
+
+
+class SearchApiSearch():
+ """
+ SearchApi Retriever
+ """
+ def __init__(self, query):
+ """
+ Initializes the SearchApiSearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.api_key = self.get_api_key()
+
+ def get_api_key(self):
+ """
+ Gets the SearchApi API key
+ Returns:
+
+ """
+ try:
+ api_key = os.environ["SEARCHAPI_API_KEY"]
+ except:
+ raise Exception("SearchApi key not found. Please set the SEARCHAPI_API_KEY environment variable. "
+ "You can get a key at https://www.searchapi.io/")
+ return api_key
+
+ def search(self, max_results=7):
+ """
+ Searches the query
+ Returns:
+
+ """
+ print("SearchApiSearch: Searching with query {0}...".format(self.query))
+ """Useful for general internet search queries using SearchApi."""
+
+
+ url = "https://www.searchapi.io/api/v1/search"
+ params = {
+ "q": self.query,
+ "engine": "google",
+ }
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': f'Bearer {self.api_key}',
+ 'X-SearchApi-Source': 'gpt-researcher'
+ }
+
+ encoded_url = url + "?" + urllib.parse.urlencode(params)
+ search_response = []
+
+ try:
+ response = requests.get(encoded_url, headers=headers, timeout=20)
+ if response.status_code == 200:
+ search_results = response.json()
+ if search_results:
+ results = search_results["organic_results"]
+ results_processed = 0
+ for result in results:
+ # skip youtube results
+ if "youtube.com" in result["link"]:
+ continue
+ if results_processed >= max_results:
+ break
+ search_result = {
+ "title": result["title"],
+ "href": result["link"],
+ "body": result["snippet"],
+ }
+ search_response.append(search_result)
+ results_processed += 1
+ except Exception as e:
+ print(f"Error: {e}. Failed fetching sources. Resulting in empty response.")
+ search_response = []
+
+ return search_response
diff --git a/gpt_researcher/retrievers/searx/__init__.py b/gpt_researcher/retrievers/searx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/searx/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/searx/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f2935b4c6d5190df3e7232282f210783efcc319
Binary files /dev/null and b/gpt_researcher/retrievers/searx/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/searx/__pycache__/searx.cpython-312.pyc b/gpt_researcher/retrievers/searx/__pycache__/searx.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f47df1b20e535a9da45398080210e3d3d182f66
Binary files /dev/null and b/gpt_researcher/retrievers/searx/__pycache__/searx.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/searx/searx.py b/gpt_researcher/retrievers/searx/searx.py
new file mode 100644
index 0000000000000000000000000000000000000000..81df7400e8635f246247f7e6235c4212ed099aa1
--- /dev/null
+++ b/gpt_researcher/retrievers/searx/searx.py
@@ -0,0 +1,77 @@
+import os
+import json
+import requests
+from typing import List, Dict
+from urllib.parse import urljoin
+
+
+class SearxSearch():
+ """
+ SearxNG API Retriever
+ """
+ def __init__(self, query: str):
+ """
+ Initializes the SearxSearch object
+ Args:
+ query: Search query string
+ """
+ self.query = query
+ self.base_url = self.get_searxng_url()
+
+ def get_searxng_url(self) -> str:
+ """
+ Gets the SearxNG instance URL from environment variables
+ Returns:
+ str: Base URL of SearxNG instance
+ """
+ try:
+ base_url = os.environ["SEARX_URL"]
+ if not base_url.endswith('/'):
+ base_url += '/'
+ return base_url
+ except KeyError:
+ raise Exception(
+ "SearxNG URL not found. Please set the SEARX_URL environment variable. "
+ "You can find public instances at https://searx.space/"
+ )
+
+ def search(self, max_results: int = 10) -> List[Dict[str, str]]:
+ """
+ Searches the query using SearxNG API
+ Args:
+ max_results: Maximum number of results to return
+ Returns:
+ List of dictionaries containing search results
+ """
+ search_url = urljoin(self.base_url, "search")
+
+ params = {
+ # The search query.
+ 'q': self.query,
+ # Output format of results. Format needs to be activated in searxng config.
+ 'format': 'json'
+ }
+
+ try:
+ response = requests.get(
+ search_url,
+ params=params,
+ headers={'Accept': 'application/json'}
+ )
+ response.raise_for_status()
+ results = response.json()
+
+ # Normalize results to match the expected format
+ search_response = []
+ for result in results.get('results', [])[:max_results]:
+ search_response.append({
+ "href": result.get('url', ''),
+ "body": result.get('content', '')
+ })
+
+ return search_response
+
+ except requests.exceptions.RequestException as e:
+ raise Exception(f"Error querying SearxNG: {str(e)}")
+ except json.JSONDecodeError:
+ raise Exception("Error parsing SearxNG response")
diff --git a/gpt_researcher/retrievers/semantic_scholar/__init__.py b/gpt_researcher/retrievers/semantic_scholar/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/semantic_scholar/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/semantic_scholar/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..78532989d415f00cb050d72ab3c7bf43ff8bdc81
Binary files /dev/null and b/gpt_researcher/retrievers/semantic_scholar/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/semantic_scholar/__pycache__/semantic_scholar.cpython-312.pyc b/gpt_researcher/retrievers/semantic_scholar/__pycache__/semantic_scholar.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8522d04dbf15d09df6a41d5e2f3406e4d1ee8b48
Binary files /dev/null and b/gpt_researcher/retrievers/semantic_scholar/__pycache__/semantic_scholar.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/semantic_scholar/semantic_scholar.py b/gpt_researcher/retrievers/semantic_scholar/semantic_scholar.py
new file mode 100644
index 0000000000000000000000000000000000000000..81ef807b8c2f743b7d547708b9181bf791280ca6
--- /dev/null
+++ b/gpt_researcher/retrievers/semantic_scholar/semantic_scholar.py
@@ -0,0 +1,59 @@
+from typing import Dict, List
+
+import requests
+
+
+class SemanticScholarSearch:
+ """
+ Semantic Scholar API Retriever
+ """
+
+ BASE_URL = "https://api.semanticscholar.org/graph/v1/paper/search"
+ VALID_SORT_CRITERIA = ["relevance", "citationCount", "publicationDate"]
+
+ def __init__(self, query: str, sort: str = "relevance"):
+ """
+ Initialize the SemanticScholarSearch class with a query and sort criterion.
+
+ :param query: Search query string
+ :param sort: Sort criterion ('relevance', 'citationCount', 'publicationDate')
+ """
+ self.query = query
+ assert sort in self.VALID_SORT_CRITERIA, "Invalid sort criterion"
+ self.sort = sort.lower()
+
+ def search(self, max_results: int = 20) -> List[Dict[str, str]]:
+ """
+ Perform the search on Semantic Scholar and return results.
+
+ :param max_results: Maximum number of results to retrieve
+ :return: List of dictionaries containing title, href, and body of each paper
+ """
+ params = {
+ "query": self.query,
+ "limit": max_results,
+ "fields": "title,abstract,url,venue,year,authors,isOpenAccess,openAccessPdf",
+ "sort": self.sort,
+ }
+
+ try:
+ response = requests.get(self.BASE_URL, params=params)
+ response.raise_for_status()
+ except requests.RequestException as e:
+ print(f"An error occurred while accessing Semantic Scholar API: {e}")
+ return []
+
+ results = response.json().get("data", [])
+ search_result = []
+
+ for result in results:
+ if result.get("isOpenAccess") and result.get("openAccessPdf"):
+ search_result.append(
+ {
+ "title": result.get("title", "No Title"),
+ "href": result["openAccessPdf"].get("url", "No URL"),
+ "body": result.get("abstract", "Abstract not available"),
+ }
+ )
+
+ return search_result
diff --git a/gpt_researcher/retrievers/serpapi/__init__.py b/gpt_researcher/retrievers/serpapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/serpapi/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/serpapi/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a694d23e4118e1b20820b8d557e8895842c4c6fb
Binary files /dev/null and b/gpt_researcher/retrievers/serpapi/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/serpapi/__pycache__/serpapi.cpython-312.pyc b/gpt_researcher/retrievers/serpapi/__pycache__/serpapi.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d227cf6e83018e91469111d2575bfe3c104718ce
Binary files /dev/null and b/gpt_researcher/retrievers/serpapi/__pycache__/serpapi.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/serpapi/serpapi.py b/gpt_researcher/retrievers/serpapi/serpapi.py
new file mode 100644
index 0000000000000000000000000000000000000000..f86e6f65789a311dd386d08d1cedf35711416f39
--- /dev/null
+++ b/gpt_researcher/retrievers/serpapi/serpapi.py
@@ -0,0 +1,76 @@
+# SerpApi Retriever
+
+# libraries
+import os
+import requests
+import urllib.parse
+
+
+class SerpApiSearch():
+ """
+ SerpApi Retriever
+ """
+ def __init__(self, query):
+ """
+ Initializes the SerpApiSearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.api_key = self.get_api_key()
+
+ def get_api_key(self):
+ """
+ Gets the SerpApi API key
+ Returns:
+
+ """
+ try:
+ api_key = os.environ["SERPAPI_API_KEY"]
+ except:
+ raise Exception("SerpApi API key not found. Please set the SERPAPI_API_KEY environment variable. "
+ "You can get a key at https://serpapi.com/")
+ return api_key
+
+ def search(self, max_results=7):
+ """
+ Searches the query
+ Returns:
+
+ """
+ print("SerpApiSearch: Searching with query {0}...".format(self.query))
+ """Useful for general internet search queries using SerpApi."""
+
+
+ url = "https://serpapi.com/search.json"
+ params = {
+ "q": self.query,
+ "api_key": self.api_key
+ }
+ encoded_url = url + "?" + urllib.parse.urlencode(params)
+ search_response = []
+ try:
+ response = requests.get(encoded_url, timeout=10)
+ if response.status_code == 200:
+ search_results = response.json()
+ if search_results:
+ results = search_results["organic_results"]
+ results_processed = 0
+ for result in results:
+ # skip youtube results
+ if "youtube.com" in result["link"]:
+ continue
+ if results_processed >= max_results:
+ break
+ search_result = {
+ "title": result["title"],
+ "href": result["link"],
+ "body": result["snippet"],
+ }
+ search_response.append(search_result)
+ results_processed += 1
+ except Exception as e:
+ print(f"Error: {e}. Failed fetching sources. Resulting in empty response.")
+ search_response = []
+
+ return search_response
diff --git a/gpt_researcher/retrievers/serper/__init__.py b/gpt_researcher/retrievers/serper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/serper/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/serper/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b44768aad9af2f72451a114cf2e2ba0c367b4d6
Binary files /dev/null and b/gpt_researcher/retrievers/serper/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/serper/__pycache__/serper.cpython-312.pyc b/gpt_researcher/retrievers/serper/__pycache__/serper.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e86ac2765bfa403dad5cedd19d3fb930aa1bc1f1
Binary files /dev/null and b/gpt_researcher/retrievers/serper/__pycache__/serper.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/serper/serper.py b/gpt_researcher/retrievers/serper/serper.py
new file mode 100644
index 0000000000000000000000000000000000000000..f81a9840307dc01d685cbf22c4d44883682340b5
--- /dev/null
+++ b/gpt_researcher/retrievers/serper/serper.py
@@ -0,0 +1,81 @@
+# Google Serper Retriever
+
+# libraries
+import os
+import requests
+import json
+
+
+class SerperSearch():
+ """
+ Google Serper Retriever
+ """
+ def __init__(self, query):
+ """
+ Initializes the SerperSearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.api_key = self.get_api_key()
+
+ def get_api_key(self):
+ """
+ Gets the Serper API key
+ Returns:
+
+ """
+ try:
+ api_key = os.environ["SERPER_API_KEY"]
+ except:
+ raise Exception("Serper API key not found. Please set the SERPER_API_KEY environment variable. "
+ "You can get a key at https://serper.dev/")
+ return api_key
+
+ def search(self, max_results=7):
+ """
+ Searches the query
+ Returns:
+
+ """
+ print("Searching with query {0}...".format(self.query))
+ """Useful for general internet search queries using the Serp API."""
+
+
+ # Search the query (see https://serper.dev/playground for the format)
+ url = "https://google.serper.dev/search"
+
+ headers = {
+ 'X-API-KEY': self.api_key,
+ 'Content-Type': 'application/json'
+ }
+ data = json.dumps({"q": self.query, "num": max_results})
+
+ resp = requests.request("POST", url, timeout=10, headers=headers, data=data)
+
+ # Preprocess the results
+ if resp is None:
+ return
+ try:
+ search_results = json.loads(resp.text)
+ except Exception:
+ return
+ if search_results is None:
+ return
+
+ results = search_results["organic"]
+ search_results = []
+
+ # Normalize the results to match the format of the other search APIs
+ for result in results:
+ # skip youtube results
+ if "youtube.com" in result["link"]:
+ continue
+ search_result = {
+ "title": result["title"],
+ "href": result["link"],
+ "body": result["snippet"],
+ }
+ search_results.append(search_result)
+
+ return search_results
diff --git a/gpt_researcher/retrievers/tavily/__init__.py b/gpt_researcher/retrievers/tavily/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/retrievers/tavily/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/retrievers/tavily/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f931be11c5fa099b4d48ed614df06a594fbbf4b7
Binary files /dev/null and b/gpt_researcher/retrievers/tavily/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/tavily/__pycache__/tavily_search.cpython-312.pyc b/gpt_researcher/retrievers/tavily/__pycache__/tavily_search.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6298917a8f22d7e1c36f3ad22af3a69ab6c84d4e
Binary files /dev/null and b/gpt_researcher/retrievers/tavily/__pycache__/tavily_search.cpython-312.pyc differ
diff --git a/gpt_researcher/retrievers/tavily/tavily_search.py b/gpt_researcher/retrievers/tavily/tavily_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a112d3a6ead6b76929f95e6fd9403ec6a967a7f
--- /dev/null
+++ b/gpt_researcher/retrievers/tavily/tavily_search.py
@@ -0,0 +1,106 @@
+# Tavily API Retriever
+
+# libraries
+import os
+from typing import Literal, Sequence, Optional
+import requests
+import json
+
+
+class TavilySearch():
+ """
+ Tavily API Retriever
+ """
+
+ def __init__(self, query, headers=None, topic="general"):
+ """
+ Initializes the TavilySearch object
+ Args:
+ query:
+ """
+ self.query = query
+ self.headers = headers or {}
+ self.topic = topic
+ self.base_url = "https://api.tavily.com/search"
+ self.api_key = self.get_api_key()
+ self.headers = {
+ "Content-Type": "application/json",
+ }
+
+ def get_api_key(self):
+ """
+ Gets the Tavily API key
+ Returns:
+
+ """
+ api_key = self.headers.get("tavily_api_key")
+ if not api_key:
+ try:
+ api_key = os.environ["TAVILY_API_KEY"]
+ except KeyError:
+ raise Exception(
+ "Tavily API key not found. Please set the TAVILY_API_KEY environment variable.")
+ return api_key
+
+ def _search(self,
+ query: str,
+ search_depth: Literal["basic", "advanced"] = "basic",
+ topic: str = "general",
+ days: int = 2,
+ max_results: int = 5,
+ include_domains: Sequence[str] = None,
+ exclude_domains: Sequence[str] = None,
+ include_answer: bool = False,
+ include_raw_content: bool = False,
+ include_images: bool = False,
+ use_cache: bool = True,
+ ) -> dict:
+ """
+ Internal search method to send the request to the API.
+ """
+
+ data = {
+ "query": query,
+ "search_depth": search_depth,
+ "topic": topic,
+ "days": days,
+ "include_answer": include_answer,
+ "include_raw_content": include_raw_content,
+ "max_results": max_results,
+ "include_domains": include_domains,
+ "exclude_domains": exclude_domains,
+ "include_images": include_images,
+ "api_key": self.api_key,
+ "use_cache": use_cache,
+ }
+
+ response = requests.post(self.base_url, data=json.dumps(
+ data), headers=self.headers, timeout=100)
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ # Raises a HTTPError if the HTTP request returned an unsuccessful status code
+ response.raise_for_status()
+
+ def search(self, max_results=7):
+ """
+ Searches the query
+ Returns:
+
+ """
+ try:
+ # Search the query
+ results = self._search(
+ self.query, search_depth="basic", max_results=max_results, topic=self.topic)
+ sources = results.get("results", [])
+ if not sources:
+ raise Exception("No results found with Tavily API search.")
+ # Return the results
+ search_response = [{"href": obj["url"],
+ "body": obj["content"]} for obj in sources]
+ except Exception as e:
+ print(
+ f"Error: {e}. Failed fetching sources. Resulting in empty response.")
+ search_response = []
+ return search_response
diff --git a/gpt_researcher/retrievers/utils.py b/gpt_researcher/retrievers/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbed92fe69334101a29fcaf3e7086f2e5da030d3
--- /dev/null
+++ b/gpt_researcher/retrievers/utils.py
@@ -0,0 +1,42 @@
+import importlib.util
+import os
+
+VALID_RETRIEVERS = [
+ "arxiv",
+ "bing",
+ "custom",
+ "duckduckgo",
+ "exa",
+ "google",
+ "searchapi",
+ "searx",
+ "semantic_scholar",
+ "serpapi",
+ "serper",
+ "tavily",
+ "pubmed_central",
+]
+
+
+def check_pkg(pkg: str) -> None:
+ if not importlib.util.find_spec(pkg):
+ pkg_kebab = pkg.replace("_", "-")
+ raise ImportError(
+ f"Unable to import {pkg_kebab}. Please install with "
+ f"`pip install -U {pkg_kebab}`"
+ )
+
+# Get a list of all retriever names to be used as validators for supported retrievers
+def get_all_retriever_names() -> list:
+ try:
+ current_dir = os.path.dirname(__file__)
+
+ all_items = os.listdir(current_dir)
+
+ # Filter out only the directories, excluding __pycache__
+ retrievers = [item for item in all_items if os.path.isdir(os.path.join(current_dir, item))]
+ except Exception as e:
+ print(f"Error in get_all_retriever_names: {e}")
+ retrievers = VALID_RETRIEVERS
+
+ return retrievers
diff --git a/gpt_researcher/scraper/__init__.py b/gpt_researcher/scraper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..672c26a05a7a5d00e3eada6a8fb0bf823fcd9aff
--- /dev/null
+++ b/gpt_researcher/scraper/__init__.py
@@ -0,0 +1,18 @@
+
+from .beautiful_soup.beautiful_soup import BeautifulSoupScraper
+from .web_base_loader.web_base_loader import WebBaseLoaderScraper
+from .arxiv.arxiv import ArxivScraper
+from .pymupdf.pymupdf import PyMuPDFScraper
+from .browser.browser import BrowserScraper
+from .tavily_extract.tavily_extract import TavilyExtract
+from .scraper import Scraper
+
+__all__ = [
+ "BeautifulSoupScraper",
+ "WebBaseLoaderScraper",
+ "ArxivScraper",
+ "PyMuPDFScraper",
+ "BrowserScraper",
+ "TavilyExtract",
+ "Scraper"
+]
\ No newline at end of file
diff --git a/gpt_researcher/scraper/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e28f7215d607f04c31a5839cf95a69c80bc2fad
Binary files /dev/null and b/gpt_researcher/scraper/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/__pycache__/scraper.cpython-312.pyc b/gpt_researcher/scraper/__pycache__/scraper.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c641a4c2035d991d472b31e144ee0528a0d650ef
Binary files /dev/null and b/gpt_researcher/scraper/__pycache__/scraper.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/__pycache__/utils.cpython-312.pyc b/gpt_researcher/scraper/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dfff7d3bb60e70804712a220ed9ee830a7fc3b5
Binary files /dev/null and b/gpt_researcher/scraper/__pycache__/utils.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/arxiv/__init__.py b/gpt_researcher/scraper/arxiv/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/arxiv/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/arxiv/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7982e9ca9e438b7891b84648af8655e0ed379396
Binary files /dev/null and b/gpt_researcher/scraper/arxiv/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/arxiv/__pycache__/arxiv.cpython-312.pyc b/gpt_researcher/scraper/arxiv/__pycache__/arxiv.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cf484439b243fca33b7d4f94dc30087fca815dc
Binary files /dev/null and b/gpt_researcher/scraper/arxiv/__pycache__/arxiv.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/arxiv/arxiv.py b/gpt_researcher/scraper/arxiv/arxiv.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d8582e413be63734d6f75de5909885a6e7a8435
--- /dev/null
+++ b/gpt_researcher/scraper/arxiv/arxiv.py
@@ -0,0 +1,22 @@
+from langchain_community.retrievers import ArxivRetriever
+
+
+class ArxivScraper:
+
+ def __init__(self, link, session=None):
+ self.link = link
+ self.session = session
+
+ def scrape(self):
+ """
+ The function scrapes relevant documents from Arxiv based on a given link and returns the content
+ of the first document.
+
+ Returns:
+ The code is returning the page content of the first document retrieved by the ArxivRetriever
+ for a given query extracted from the link.
+ """
+ query = self.link.split("/")[-1]
+ retriever = ArxivRetriever(load_max_docs=2, doc_content_chars_max=None)
+ docs = retriever.invoke(query=query)
+ return docs[0].page_content
diff --git a/gpt_researcher/scraper/beautiful_soup/__init__.py b/gpt_researcher/scraper/beautiful_soup/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/beautiful_soup/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/beautiful_soup/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2a5619e01c608a19d27de5c9a9736c464811b78
Binary files /dev/null and b/gpt_researcher/scraper/beautiful_soup/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/beautiful_soup/__pycache__/beautiful_soup.cpython-312.pyc b/gpt_researcher/scraper/beautiful_soup/__pycache__/beautiful_soup.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..249ac1049ee4a9c51a99d19efef3df17357d440b
Binary files /dev/null and b/gpt_researcher/scraper/beautiful_soup/__pycache__/beautiful_soup.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/beautiful_soup/beautiful_soup.py b/gpt_researcher/scraper/beautiful_soup/beautiful_soup.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1e324c7d4e4120bdc0cd163b22654491de047cd
--- /dev/null
+++ b/gpt_researcher/scraper/beautiful_soup/beautiful_soup.py
@@ -0,0 +1,74 @@
+from bs4 import BeautifulSoup
+from urllib.parse import urljoin
+
+from ..utils import get_relevant_images, extract_title
+
+class BeautifulSoupScraper:
+
+ def __init__(self, link, session=None):
+ self.link = link
+ self.session = session
+
+ def scrape(self):
+ """
+ This function scrapes content from a webpage by making a GET request, parsing the HTML using
+ BeautifulSoup, and extracting script and style elements before returning the cleaned content.
+
+ Returns:
+ The `scrape` method is returning the cleaned and extracted content from the webpage specified
+ by the `self.link` attribute. The method fetches the webpage content, removes script and style
+ tags, extracts the text content, and returns the cleaned content as a string. If any exception
+ occurs during the process, an error message is printed and an empty string is returned.
+ """
+ try:
+ response = self.session.get(self.link, timeout=4)
+ soup = BeautifulSoup(
+ response.content, "lxml", from_encoding=response.encoding
+ )
+
+ for script_or_style in soup(["script", "style"]):
+ script_or_style.extract()
+
+ raw_content = self.get_content_from_url(soup)
+ lines = (line.strip() for line in raw_content.splitlines())
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
+ content = "\n".join(chunk for chunk in chunks if chunk)
+
+ image_urls = get_relevant_images(soup, self.link)
+
+ # Extract the title using the utility function
+ title = extract_title(soup)
+
+ return content, image_urls, title
+
+ except Exception as e:
+ print("Error! : " + str(e))
+ return "", [], ""
+
+ def get_content_from_url(self, soup: BeautifulSoup) -> str:
+ """Get the relevant text from the soup with improved filtering"""
+ text_elements = []
+ tags = ["h1", "h2", "h3", "h4", "h5", "p", "li", "div", "span"]
+
+ for element in soup.find_all(tags):
+ # Skip empty elements
+ if not element.text.strip():
+ continue
+
+ # Skip elements with very short text (likely buttons or links)
+ if len(element.text.split()) < 3:
+ continue
+
+ # Check if the element is likely to be navigation or a menu
+ parent_classes = element.parent.get('class', [])
+ if any(cls in ['nav', 'menu', 'sidebar', 'footer'] for cls in parent_classes):
+ continue
+
+ # Remove excess whitespace and join lines
+ cleaned_text = ' '.join(element.text.split())
+
+ # Add the cleaned text to our list of elements
+ text_elements.append(cleaned_text)
+
+ # Join all text elements with newlines
+ return '\n\n'.join(text_elements)
diff --git a/gpt_researcher/scraper/browser/__init__.py b/gpt_researcher/scraper/browser/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/browser/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/browser/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cab329e1169a8d1c7682f10086dbf9793014d2e3
Binary files /dev/null and b/gpt_researcher/scraper/browser/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/browser/__pycache__/browser.cpython-312.pyc b/gpt_researcher/scraper/browser/__pycache__/browser.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67bcb854c80cb6ded7364e6a7418ef478fd8cec8
Binary files /dev/null and b/gpt_researcher/scraper/browser/__pycache__/browser.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/browser/browser.py b/gpt_researcher/scraper/browser/browser.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aa234c8d8dca2ad5efeca25447c3caec0d14188
--- /dev/null
+++ b/gpt_researcher/scraper/browser/browser.py
@@ -0,0 +1,272 @@
+from __future__ import annotations
+
+import traceback
+import pickle
+from pathlib import Path
+from sys import platform
+import time
+import random
+import string
+import os
+
+from bs4 import BeautifulSoup
+
+from .processing.scrape_skills import (scrape_pdf_with_pymupdf,
+ scrape_pdf_with_arxiv)
+
+from urllib.parse import urljoin
+
+FILE_DIR = Path(__file__).parent.parent
+
+from ..utils import get_relevant_images, extract_title
+
+
+class BrowserScraper:
+ def __init__(self, url: str, session=None):
+ self.url = url
+ self.session = session
+ self.selenium_web_browser = "chrome"
+ self.headless = False
+ self.user_agent = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/128.0.0.0 Safari/537.36")
+ self.driver = None
+ self.use_browser_cookies = False
+ self._import_selenium() # Import only if used to avoid unnecessary dependencies
+ self.cookie_filename = f"{self._generate_random_string(8)}.pkl"
+
+ def scrape(self) -> tuple:
+ if not self.url:
+ print("URL not specified")
+ return "A URL was not specified, cancelling request to browse website.", [], ""
+
+ try:
+ self.setup_driver()
+ self._visit_google_and_save_cookies()
+ self._load_saved_cookies()
+ self._add_header()
+
+ text, image_urls, title = self.scrape_text_with_selenium()
+ return text, image_urls, title
+ except Exception as e:
+ print(f"An error occurred during scraping: {str(e)}")
+ print("Full stack trace:")
+ print(traceback.format_exc())
+ return f"An error occurred: {str(e)}\n\nStack trace:\n{traceback.format_exc()}", [], ""
+ finally:
+ if self.driver:
+ self.driver.quit()
+ self._cleanup_cookie_file()
+
+ def _import_selenium(self):
+ try:
+ global webdriver, By, EC, WebDriverWait, TimeoutException, WebDriverException
+ from selenium import webdriver
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support import expected_conditions as EC
+ from selenium.webdriver.support.wait import WebDriverWait
+ from selenium.common.exceptions import TimeoutException, WebDriverException
+
+ global ChromeOptions, FirefoxOptions, SafariOptions
+ from selenium.webdriver.chrome.options import Options as ChromeOptions
+ from selenium.webdriver.firefox.options import Options as FirefoxOptions
+ from selenium.webdriver.safari.options import Options as SafariOptions
+ except ImportError as e:
+ print(f"Failed to import Selenium: {str(e)}")
+ print("Please install Selenium and its dependencies to use BrowserScraper.")
+ print("You can install Selenium using pip:")
+ print(" pip install selenium")
+ print("If you're using a virtual environment, make sure it's activated.")
+ raise ImportError(
+ "Selenium is required but not installed. See error message above for installation instructions.") from e
+
+ def setup_driver(self) -> None:
+ # print(f"Setting up {self.selenium_web_browser} driver...")
+
+ options_available = {
+ "chrome": ChromeOptions,
+ "firefox": FirefoxOptions,
+ "safari": SafariOptions,
+ }
+
+ options = options_available[self.selenium_web_browser]()
+ options.add_argument(f"user-agent={self.user_agent}")
+ if self.headless:
+ options.add_argument("--headless")
+ options.add_argument("--enable-javascript")
+
+ try:
+ if self.selenium_web_browser == "firefox":
+ self.driver = webdriver.Firefox(options=options)
+ elif self.selenium_web_browser == "safari":
+ self.driver = webdriver.Safari(options=options)
+ else: # chrome
+ if platform == "linux" or platform == "linux2":
+ options.add_argument("--disable-dev-shm-usage")
+ options.add_argument("--remote-debugging-port=9222")
+ options.add_argument("--no-sandbox")
+ options.add_experimental_option("prefs", {"download_restrictions": 3})
+ self.driver = webdriver.Chrome(options=options)
+
+ if self.use_browser_cookies:
+ self._load_browser_cookies()
+
+ # print(f"{self.selenium_web_browser.capitalize()} driver set up successfully.")
+ except Exception as e:
+ print(f"Failed to set up {self.selenium_web_browser} driver: {str(e)}")
+ print("Full stack trace:")
+ print(traceback.format_exc())
+ raise
+
+ def _load_saved_cookies(self):
+ """Load saved cookies before visiting the target URL"""
+ cookie_file = Path(self.cookie_filename)
+ if cookie_file.exists():
+ cookies = pickle.load(open(self.cookie_filename, "rb"))
+ for cookie in cookies:
+ self.driver.add_cookie(cookie)
+ else:
+ print("No saved cookies found.")
+
+ def _load_browser_cookies(self):
+ """Load cookies directly from the browser"""
+ try:
+ import browser_cookie3
+ except ImportError:
+ print("browser_cookie3 is not installed. Please install it using: pip install browser_cookie3")
+ return
+
+ if self.selenium_web_browser == "chrome":
+ cookies = browser_cookie3.chrome()
+ elif self.selenium_web_browser == "firefox":
+ cookies = browser_cookie3.firefox()
+ else:
+ print(f"Cookie loading not supported for {self.selenium_web_browser}")
+ return
+
+ for cookie in cookies:
+ self.driver.add_cookie({'name': cookie.name, 'value': cookie.value, 'domain': cookie.domain})
+
+ def _cleanup_cookie_file(self):
+ """Remove the cookie file"""
+ cookie_file = Path(self.cookie_filename)
+ if cookie_file.exists():
+ try:
+ os.remove(self.cookie_filename)
+ except Exception as e:
+ print(f"Failed to remove cookie file: {str(e)}")
+ else:
+ print("No cookie file found to remove.")
+
+ def _generate_random_string(self, length):
+ """Generate a random string of specified length"""
+ return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
+
+ def _get_domain(self):
+ """Extract domain from URL"""
+ from urllib.parse import urlparse
+ """Get domain from URL, removing 'www' if present"""
+ domain = urlparse(self.url).netloc
+ return domain[4:] if domain.startswith('www.') else domain
+
+ def _visit_google_and_save_cookies(self):
+ """Visit Google and save cookies before navigating to the target URL"""
+ try:
+ self.driver.get("https://www.google.com")
+ time.sleep(2) # Wait for cookies to be set
+
+ # Save cookies to a file
+ cookies = self.driver.get_cookies()
+ pickle.dump(cookies, open(self.cookie_filename, "wb"))
+
+ # print("Google cookies saved successfully.")
+ except Exception as e:
+ print(f"Failed to visit Google and save cookies: {str(e)}")
+ print("Full stack trace:")
+ print(traceback.format_exc())
+
+ def scrape_text_with_selenium(self) -> tuple:
+ self.driver.get(self.url)
+
+ try:
+ WebDriverWait(self.driver, 20).until(
+ EC.presence_of_element_located((By.TAG_NAME, "body"))
+ )
+ except TimeoutException as e:
+ print("Timed out waiting for page to load")
+ print(f"Full stack trace:\n{traceback.format_exc()}")
+ return "Page load timed out", [], ""
+
+ self._scroll_to_bottom()
+
+ if self.url.endswith(".pdf"):
+ text = scrape_pdf_with_pymupdf(self.url)
+ return text, [], ""
+ elif "arxiv" in self.url:
+ doc_num = self.url.split("/")[-1]
+ text = scrape_pdf_with_arxiv(doc_num)
+ return text, [], ""
+ else:
+ page_source = self.driver.execute_script("return document.body.outerHTML;")
+ soup = BeautifulSoup(page_source, "html.parser")
+
+ for script in soup(["script", "style"]):
+ script.extract()
+
+ text = self.get_text(soup)
+ image_urls = get_relevant_images(soup, self.url)
+ title = extract_title(soup)
+
+ lines = (line.strip() for line in text.splitlines())
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
+ text = "\n".join(chunk for chunk in chunks if chunk)
+ return text, image_urls, title
+
+ def get_text(self, soup: BeautifulSoup) -> str:
+ """Get the relevant text from the soup with improved filtering"""
+ text_elements = []
+ tags = ["h1", "h2", "h3", "h4", "h5", "p", "li", "div", "span"]
+
+ for element in soup.find_all(tags):
+ # Skip empty elements
+ if not element.text.strip():
+ continue
+
+ # Skip elements with very short text (likely buttons or links)
+ if len(element.text.split()) < 3:
+ continue
+
+ # Check if the element is likely to be navigation or a menu
+ parent_classes = element.parent.get('class', [])
+ if any(cls in ['nav', 'menu', 'sidebar', 'footer'] for cls in parent_classes):
+ continue
+
+ # Remove excess whitespace and join lines
+ cleaned_text = ' '.join(element.text.split())
+
+ # Add the cleaned text to our list of elements
+ text_elements.append(cleaned_text)
+
+ # Join all text elements with newlines
+ return '\n\n'.join(text_elements)
+
+ def _scroll_to_bottom(self):
+ """Scroll to the bottom of the page to load all content"""
+ last_height = self.driver.execute_script("return document.body.scrollHeight")
+ while True:
+ self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
+ time.sleep(2) # Wait for content to load
+ new_height = self.driver.execute_script("return document.body.scrollHeight")
+ if new_height == last_height:
+ break
+ last_height = new_height
+
+ def _scroll_to_percentage(self, ratio: float) -> None:
+ """Scroll to a percentage of the page"""
+ if ratio < 0 or ratio > 1:
+ raise ValueError("Percentage should be between 0 and 1")
+ self.driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
+
+ def _add_header(self) -> None:
+ """Add a header to the website"""
+ self.driver.execute_script(open(f"{FILE_DIR}/browser/js/overlay.js", "r").read())
diff --git a/gpt_researcher/scraper/browser/js/overlay.js b/gpt_researcher/scraper/browser/js/overlay.js
new file mode 100644
index 0000000000000000000000000000000000000000..1110b6d5210c029aaa0720ab65f03d5d5c0f45d8
--- /dev/null
+++ b/gpt_researcher/scraper/browser/js/overlay.js
@@ -0,0 +1,29 @@
+const overlay = document.createElement('div');
+Object.assign(overlay.style, {
+ position: 'fixed',
+ zIndex: 999999,
+ top: 0,
+ left: 0,
+ width: '100%',
+ height: '100%',
+ background: 'rgba(0, 0, 0, 0.7)',
+ color: '#fff',
+ fontSize: '24px',
+ fontWeight: 'bold',
+ display: 'flex',
+ justifyContent: 'center',
+ alignItems: 'center',
+});
+const textContent = document.createElement('div');
+Object.assign(textContent.style, {
+ textAlign: 'center',
+});
+textContent.textContent = 'GPT Researcher: Analyzing Page';
+overlay.appendChild(textContent);
+document.body.append(overlay);
+document.body.style.overflow = 'hidden';
+let dotCount = 0;
+setInterval(() => {
+ textContent.textContent = 'GPT Researcher: Analyzing Page' + '.'.repeat(dotCount);
+ dotCount = (dotCount + 1) % 4;
+}, 1000);
diff --git a/gpt_researcher/scraper/browser/processing/__init__.py b/gpt_researcher/scraper/browser/processing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/browser/processing/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/browser/processing/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28abedea595ce3931c304ffe4c48cb8ad6eb6079
Binary files /dev/null and b/gpt_researcher/scraper/browser/processing/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/browser/processing/__pycache__/scrape_skills.cpython-312.pyc b/gpt_researcher/scraper/browser/processing/__pycache__/scrape_skills.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e6348c819a40826b44d7612e581f11c545e0e1e
Binary files /dev/null and b/gpt_researcher/scraper/browser/processing/__pycache__/scrape_skills.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/browser/processing/html.py b/gpt_researcher/scraper/browser/processing/html.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dfc626a1b82b22c749f803d55255356c2349919
--- /dev/null
+++ b/gpt_researcher/scraper/browser/processing/html.py
@@ -0,0 +1,33 @@
+"""HTML processing functions"""
+from __future__ import annotations
+
+from bs4 import BeautifulSoup
+from requests.compat import urljoin
+
+
+def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
+ """Extract hyperlinks from a BeautifulSoup object
+
+ Args:
+ soup (BeautifulSoup): The BeautifulSoup object
+ base_url (str): The base URL
+
+ Returns:
+ List[Tuple[str, str]]: The extracted hyperlinks
+ """
+ return [
+ (link.text, urljoin(base_url, link["href"]))
+ for link in soup.find_all("a", href=True)
+ ]
+
+
+def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
+ """Format hyperlinks to be displayed to the user
+
+ Args:
+ hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
+
+ Returns:
+ List[str]: The formatted hyperlinks
+ """
+ return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
diff --git a/gpt_researcher/scraper/browser/processing/scrape_skills.py b/gpt_researcher/scraper/browser/processing/scrape_skills.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c0f497293be4d1dfbe5f30702797ba806ddd845
--- /dev/null
+++ b/gpt_researcher/scraper/browser/processing/scrape_skills.py
@@ -0,0 +1,31 @@
+from langchain_community.document_loaders import PyMuPDFLoader
+from langchain_community.retrievers import ArxivRetriever
+
+
+def scrape_pdf_with_pymupdf(url) -> str:
+ """Scrape a pdf with pymupdf
+
+ Args:
+ url (str): The url of the pdf to scrape
+
+ Returns:
+ str: The text scraped from the pdf
+ """
+ loader = PyMuPDFLoader(url)
+ doc = loader.load()
+ return str(doc)
+
+
+def scrape_pdf_with_arxiv(query) -> str:
+ """Scrape a pdf with arxiv
+ default document length of 70000 about ~15 pages or None for no limit
+
+ Args:
+ query (str): The query to search for
+
+ Returns:
+ str: The text scraped from the pdf
+ """
+ retriever = ArxivRetriever(load_max_docs=2, doc_content_chars_max=None)
+ docs = retriever.get_relevant_documents(query=query)
+ return docs[0].page_content
\ No newline at end of file
diff --git a/gpt_researcher/scraper/pymupdf/__init__.py b/gpt_researcher/scraper/pymupdf/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/pymupdf/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/pymupdf/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b4bd660fe400781e02c4c1c10ae446afa4d72f3
Binary files /dev/null and b/gpt_researcher/scraper/pymupdf/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/pymupdf/__pycache__/pymupdf.cpython-312.pyc b/gpt_researcher/scraper/pymupdf/__pycache__/pymupdf.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c305150c4e8fe55f4b4ef790c0549ffb25bfaebf
Binary files /dev/null and b/gpt_researcher/scraper/pymupdf/__pycache__/pymupdf.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/pymupdf/pymupdf.py b/gpt_researcher/scraper/pymupdf/pymupdf.py
new file mode 100644
index 0000000000000000000000000000000000000000..37faed8040c53e1961978f173bcf058bb53c2a13
--- /dev/null
+++ b/gpt_researcher/scraper/pymupdf/pymupdf.py
@@ -0,0 +1,65 @@
+import os
+import requests
+import tempfile
+from urllib.parse import urlparse
+from langchain_community.document_loaders import PyMuPDFLoader
+
+
+class PyMuPDFScraper:
+
+ def __init__(self, link, session=None):
+ """
+ Initialize the scraper with a link and an optional session.
+
+ Args:
+ link (str): The URL or local file path of the PDF document.
+ session (requests.Session, optional): An optional session for making HTTP requests.
+ """
+ self.link = link
+ self.session = session
+
+ def is_url(self) -> bool:
+ """
+ Check if the provided `link` is a valid URL.
+
+ Returns:
+ bool: True if the link is a valid URL, False otherwise.
+ """
+ try:
+ result = urlparse(self.link)
+ return all([result.scheme, result.netloc]) # Check for valid scheme and network location
+ except Exception:
+ return False
+
+ def scrape(self) -> str:
+ """
+ The `scrape` function uses PyMuPDFLoader to load a document from the provided link (either URL or local file)
+ and returns the document as a string.
+
+ Returns:
+ str: A string representation of the loaded document.
+ """
+ try:
+ if self.is_url():
+ response = requests.get(self.link, timeout=5, stream=True)
+ response.raise_for_status()
+
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
+ temp_filename = temp_file.name # Get the temporary file name
+ for chunk in response.iter_content(chunk_size=8192):
+ temp_file.write(chunk) # Write the downloaded content to the temporary file
+
+ loader = PyMuPDFLoader(temp_filename)
+ doc = loader.load()
+
+ os.remove(temp_filename)
+ else:
+ loader = PyMuPDFLoader(self.link)
+ doc = loader.load()
+
+ return str(doc)
+
+ except requests.exceptions.Timeout:
+ print(f"Download timed out. Please check the link : {self.link}")
+ except Exception as e:
+ print(f"Error loading PDF : {self.link} {e}")
diff --git a/gpt_researcher/scraper/scraper.py b/gpt_researcher/scraper/scraper.py
new file mode 100644
index 0000000000000000000000000000000000000000..51d1ad3a3cf81ade028b3ea0b4de4d5e6b2afbed
--- /dev/null
+++ b/gpt_researcher/scraper/scraper.py
@@ -0,0 +1,127 @@
+from concurrent.futures.thread import ThreadPoolExecutor
+from functools import partial
+from colorama import Fore, init
+
+import requests
+import subprocess
+import sys
+import importlib
+
+from . import (
+ ArxivScraper,
+ BeautifulSoupScraper,
+ PyMuPDFScraper,
+ WebBaseLoaderScraper,
+ BrowserScraper,
+ TavilyExtract
+)
+
+
+class Scraper:
+ """
+ Scraper class to extract the content from the links
+ """
+
+ def __init__(self, urls, user_agent, scraper):
+ """
+ Initialize the Scraper class.
+ Args:
+ urls:
+ """
+ self.urls = urls
+ self.session = requests.Session()
+ self.session.headers.update({"User-Agent": user_agent})
+ self.scraper = scraper
+ if self.scraper == "tavily_extract":
+ self._check_pkg(self.scraper)
+
+ def run(self):
+ """
+ Extracts the content from the links
+ """
+ partial_extract = partial(self.extract_data_from_url, session=self.session)
+ with ThreadPoolExecutor(max_workers=20) as executor:
+ contents = executor.map(partial_extract, self.urls)
+ res = [content for content in contents if content["raw_content"] is not None]
+ return res
+
+ def _check_pkg(self, scrapper_name : str) -> None:
+ """
+ Checks and ensures required Python packages are available for scrapers that need
+ dependencies beyond requirements.txt. When adding a new scraper to the repo, update `pkg_map`
+ with its required information and call check_pkg() during initialization.
+ """
+ pkg_map = {
+ "tavily_extract": {"package_installation_name": "tavily-python",
+ "import_name": "tavily"},
+ }
+ pkg = pkg_map[scrapper_name]
+ if not importlib.util.find_spec(pkg["import_name"]):
+ pkg_inst_name = pkg["package_installation_name"]
+ init(autoreset=True)
+ print(Fore.YELLOW + f"{pkg_inst_name} not found. Attempting to install...")
+ try:
+ subprocess.check_call([sys.executable, "-m", "pip", "install", pkg_inst_name])
+ print(Fore.GREEN + f"{pkg_inst_name} installed successfully.")
+ except subprocess.CalledProcessError:
+ raise ImportError(
+ Fore.RED + f"Unable to install {pkg_inst_name}. Please install manually with "
+ f"`pip install -U {pkg_inst_name}`"
+ )
+
+ def extract_data_from_url(self, link, session):
+ """
+ Extracts the data from the link
+ """
+ try:
+ Scraper = self.get_scraper(link)
+ scraper = Scraper(link, session)
+ content, image_urls, title = scraper.scrape()
+
+ if len(content) < 100:
+ return {"url": link, "raw_content": None, "image_urls": [], "title": ""}
+
+ return {"url": link, "raw_content": content, "image_urls": image_urls, "title": title}
+ except Exception as e:
+ return {"url": link, "raw_content": None, "image_urls": [], "title": ""}
+
+ def get_scraper(self, link):
+ """
+ The function `get_scraper` determines the appropriate scraper class based on the provided link
+ or a default scraper if none matches.
+
+ Args:
+ link: The `get_scraper` method takes a `link` parameter which is a URL link to a webpage or a
+ PDF file. Based on the type of content the link points to, the method determines the appropriate
+ scraper class to use for extracting data from that content.
+
+ Returns:
+ The `get_scraper` method returns the scraper class based on the provided link. The method
+ checks the link to determine the appropriate scraper class to use based on predefined mappings
+ in the `SCRAPER_CLASSES` dictionary. If the link ends with ".pdf", it selects the
+ `PyMuPDFScraper` class. If the link contains "arxiv.org", it selects the `ArxivScraper
+ """
+
+ SCRAPER_CLASSES = {
+ "pdf": PyMuPDFScraper,
+ "arxiv": ArxivScraper,
+ "bs": BeautifulSoupScraper,
+ "web_base_loader": WebBaseLoaderScraper,
+ "browser": BrowserScraper,
+ "tavily_extract": TavilyExtract
+ }
+
+ scraper_key = None
+
+ if link.endswith(".pdf"):
+ scraper_key = "pdf"
+ elif "arxiv.org" in link:
+ scraper_key = "arxiv"
+ else:
+ scraper_key = self.scraper
+
+ scraper_class = SCRAPER_CLASSES.get(scraper_key)
+ if scraper_class is None:
+ raise Exception("Scraper not found.")
+
+ return scraper_class
diff --git a/gpt_researcher/scraper/tavily_extract/__init__.py b/gpt_researcher/scraper/tavily_extract/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/tavily_extract/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/tavily_extract/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a6fb2d95c36a60c3cfe162a80d7d5a95246adfa
Binary files /dev/null and b/gpt_researcher/scraper/tavily_extract/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/tavily_extract/__pycache__/tavily_extract.cpython-312.pyc b/gpt_researcher/scraper/tavily_extract/__pycache__/tavily_extract.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e43c209d0634fdd92f1937e5ab73c13078338b1d
Binary files /dev/null and b/gpt_researcher/scraper/tavily_extract/__pycache__/tavily_extract.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/tavily_extract/tavily_extract.py b/gpt_researcher/scraper/tavily_extract/tavily_extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..018bf468129c7e620b1f2269f8f039070c07035e
--- /dev/null
+++ b/gpt_researcher/scraper/tavily_extract/tavily_extract.py
@@ -0,0 +1,62 @@
+from bs4 import BeautifulSoup
+import os
+from ..utils import get_relevant_images, extract_title
+
+class TavilyExtract:
+
+ def __init__(self, link, session=None):
+ self.link = link
+ self.session = session
+ from tavily import TavilyClient
+ self.tavily_client = TavilyClient(api_key=self.get_api_key())
+
+ def get_api_key(self) -> str:
+ """
+ Gets the Tavily API key
+ Returns:
+ Api key (str)
+ """
+ try:
+ api_key = os.environ["TAVILY_API_KEY"]
+ except KeyError:
+ raise Exception(
+ "Tavily API key not found. Please set the TAVILY_API_KEY environment variable.")
+ return api_key
+
+ def scrape(self) -> tuple:
+ """
+ This function extracts content from a specified link using the Tavily Python SDK, the title and
+ images from the link are extracted using the functions from `gpt_researcher/scraper/utils.py`.
+
+ Returns:
+ The `scrape` method returns a tuple containing the extracted content, a list of image URLs, and
+ the title of the webpage specified by the `self.link` attribute. It uses the Tavily Python SDK to
+ extract and clean content from the webpage. If any exception occurs during the process, an error
+ message is printed and an empty result is returned.
+ """
+
+ try:
+ response = self.tavily_client.extract(urls=self.link)
+ if response['failed_results']:
+ return "", [], ""
+
+ # Parse the HTML content of the response to create a BeautifulSoup object for the utility functions
+ response_bs = self.session.get(self.link, timeout=4)
+ soup = BeautifulSoup(
+ response_bs.content, "lxml", from_encoding=response_bs.encoding
+ )
+
+ # Since only a single link is provided to tavily_client, the results will contain only one entry.
+ content = response['results'][0]['raw_content']
+
+ # Get relevant images using the utility function
+ image_urls = get_relevant_images(soup, self.link)
+
+ # Extract the title using the utility function
+ title = extract_title(soup)
+
+ return content, image_urls, title
+
+ except Exception as e:
+ print("Error! : " + str(e))
+ return "", [], ""
\ No newline at end of file
diff --git a/gpt_researcher/scraper/utils.py b/gpt_researcher/scraper/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e18df8cbe9e1f0e83fb61f38381d826839892bb0
--- /dev/null
+++ b/gpt_researcher/scraper/utils.py
@@ -0,0 +1,86 @@
+from bs4 import BeautifulSoup
+from urllib.parse import urljoin, urlparse, parse_qs
+import logging
+import hashlib
+
+def get_relevant_images(soup: BeautifulSoup, url: str) -> list:
+ """Extract relevant images from the page"""
+ image_urls = []
+
+ try:
+ # Find all img tags with src attribute
+ all_images = soup.find_all('img', src=True)
+
+ for img in all_images:
+ img_src = urljoin(url, img['src'])
+ if img_src.startswith(('http://', 'https://')):
+ score = 0
+ # Check for relevant classes
+ if any(cls in img.get('class', []) for cls in ['header', 'featured', 'hero', 'thumbnail', 'main', 'content']):
+ score = 4 # Higher score
+ # Check for size attributes
+ elif img.get('width') and img.get('height'):
+ width = parse_dimension(img['width'])
+ height = parse_dimension(img['height'])
+ if width and height:
+ if width >= 2000 and height >= 1000:
+ score = 3 # Medium score (very large images)
+ elif width >= 1600 or height >= 800:
+ score = 2 # Lower score
+ elif width >= 800 or height >= 500:
+ score = 1 # Lowest score
+ elif width >= 500 or height >= 300:
+ score = 0 # Lowest score
+ else:
+ continue # Skip small images
+
+ image_urls.append({'url': img_src, 'score': score})
+
+ # Sort images by score (highest first)
+ sorted_images = sorted(image_urls, key=lambda x: x['score'], reverse=True)
+
+ # Select all images with score 3 and 2, then add score 1 images up to a total of 10
+ high_score_images = [img for img in sorted_images if img['score'] in [3, 2]]
+ low_score_images = [img for img in sorted_images if img['score'] == 1]
+
+ result = high_score_images + low_score_images[:max(0, 10 - len(high_score_images))]
+ return result[:10] # Ensure we don't return more than 10 images in total
+
+ except Exception as e:
+ logging.error(f"Error in get_relevant_images: {e}")
+ return []
+
+def parse_dimension(value: str) -> int:
+ """Parse dimension value, handling px units"""
+ if value.lower().endswith('px'):
+ value = value[:-2] # Remove 'px' suffix
+ try:
+ return int(value) # Convert to float first to handle decimal values
+ except ValueError as e:
+ print(f"Error parsing dimension value {value}: {e}")
+ return None
+
+def extract_title(soup: BeautifulSoup) -> str:
+ """Extract the title from the BeautifulSoup object"""
+ return soup.title.string if soup.title else ""
+
+def get_image_hash(image_url: str) -> str:
+ """Calculate a simple hash based on the image filename and essential query parameters"""
+ try:
+ parsed_url = urlparse(image_url)
+
+ # Extract the filename
+ filename = parsed_url.path.split('/')[-1]
+
+ # Extract essential query parameters (e.g., 'url' for CDN-served images)
+ query_params = parse_qs(parsed_url.query)
+ essential_params = query_params.get('url', [])
+
+ # Combine filename and essential parameters
+ image_identifier = filename + ''.join(essential_params)
+
+ # Calculate hash
+ return hashlib.md5(image_identifier.encode()).hexdigest()
+ except Exception as e:
+ logging.error(f"Error calculating image hash for {image_url}: {e}")
+ return None
diff --git a/gpt_researcher/scraper/web_base_loader/__init__.py b/gpt_researcher/scraper/web_base_loader/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/scraper/web_base_loader/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/scraper/web_base_loader/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66fda005d6eb1364ca6a518a1bd875cc039c7ec5
Binary files /dev/null and b/gpt_researcher/scraper/web_base_loader/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/web_base_loader/__pycache__/web_base_loader.cpython-312.pyc b/gpt_researcher/scraper/web_base_loader/__pycache__/web_base_loader.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7e0a86672aa9ba7d44f65fe4150f778c3762eca
Binary files /dev/null and b/gpt_researcher/scraper/web_base_loader/__pycache__/web_base_loader.cpython-312.pyc differ
diff --git a/gpt_researcher/scraper/web_base_loader/web_base_loader.py b/gpt_researcher/scraper/web_base_loader/web_base_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..61b4c5782db3b655ba7d6de88abdc09e75892c0c
--- /dev/null
+++ b/gpt_researcher/scraper/web_base_loader/web_base_loader.py
@@ -0,0 +1,43 @@
+from bs4 import BeautifulSoup
+from urllib.parse import urljoin
+import requests
+from ..utils import get_relevant_images, extract_title
+
+class WebBaseLoaderScraper:
+
+ def __init__(self, link, session=None):
+ self.link = link
+ self.session = session or requests.Session()
+
+ def scrape(self) -> tuple:
+ """
+ This Python function scrapes content from a webpage using a WebBaseLoader object and returns the
+ concatenated page content.
+
+ Returns:
+ The `scrape` method is returning a string variable named `content` which contains the
+ concatenated page content from the documents loaded by the `WebBaseLoader`. If an exception
+ occurs during the process, an error message is printed and an empty string is returned.
+ """
+ try:
+ from langchain_community.document_loaders import WebBaseLoader
+ loader = WebBaseLoader(self.link)
+ loader.requests_kwargs = {"verify": False}
+ docs = loader.load()
+ content = ""
+
+ for doc in docs:
+ content += doc.page_content
+
+ response = self.session.get(self.link)
+ soup = BeautifulSoup(response.content, 'html.parser')
+ image_urls = get_relevant_images(soup, self.link)
+
+ # Extract the title using the utility function
+ title = extract_title(soup)
+
+ return content, image_urls, title
+
+ except Exception as e:
+ print("Error! : " + str(e))
+ return "", [], ""
diff --git a/gpt_researcher/skills/__init__.py b/gpt_researcher/skills/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb9364d984ca20fc2ce58e05330ba05a19783133
--- /dev/null
+++ b/gpt_researcher/skills/__init__.py
@@ -0,0 +1,13 @@
+from .context_manager import ContextManager
+from .researcher import ResearchConductor
+from .writer import ReportGenerator
+from .browser import BrowserManager
+from .curator import SourceCurator
+
+__all__ = [
+ 'ResearchConductor',
+ 'ReportGenerator',
+ 'ContextManager',
+ 'BrowserManager',
+ 'SourceCurator'
+]
diff --git a/gpt_researcher/skills/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/skills/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a8dd86ea37842486bcb4093ccca7a65fc2cf3ee
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/__pycache__/browser.cpython-312.pyc b/gpt_researcher/skills/__pycache__/browser.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a365d3e3e1ef833f7deb5daf45ceb4994e9ffe12
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/browser.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/__pycache__/context_manager.cpython-312.pyc b/gpt_researcher/skills/__pycache__/context_manager.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4720c72ce7bd80eba93285fe7d818f152d54702d
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/context_manager.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/__pycache__/curator.cpython-312.pyc b/gpt_researcher/skills/__pycache__/curator.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feb814a16be09416144bc962fd4ad39f33722ca6
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/curator.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/__pycache__/researcher.cpython-312.pyc b/gpt_researcher/skills/__pycache__/researcher.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48a272331f54cc2713f4d0fc5aa4aac8a5a6c01b
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/researcher.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/__pycache__/writer.cpython-312.pyc b/gpt_researcher/skills/__pycache__/writer.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42bd79ab28eafca6899e547474833fc53875b441
Binary files /dev/null and b/gpt_researcher/skills/__pycache__/writer.cpython-312.pyc differ
diff --git a/gpt_researcher/skills/browser.py b/gpt_researcher/skills/browser.py
new file mode 100644
index 0000000000000000000000000000000000000000..492987633b08f8bcf375ac3b6dd368f9e7415b2a
--- /dev/null
+++ b/gpt_researcher/skills/browser.py
@@ -0,0 +1,88 @@
+from typing import List, Dict
+
+from ..actions.utils import stream_output
+from ..actions.web_scraping import scrape_urls
+from ..scraper.utils import get_image_hash # Add this import
+
+
+class BrowserManager:
+ """Manages context for the researcher agent."""
+
+ def __init__(self, researcher):
+ self.researcher = researcher
+
+ async def browse_urls(self, urls: List[str]) -> List[Dict]:
+ """
+ Scrape content from a list of URLs.
+
+ Args:
+ urls (List[str]): List of URLs to scrape.
+
+ Returns:
+ List[Dict]: List of scraped content results.
+ """
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "scraping_urls",
+ f"🌐 Scraping content from {len(urls)} URLs...",
+ self.researcher.websocket,
+ )
+
+ scraped_content, images = scrape_urls(urls, self.researcher.cfg)
+ self.researcher.add_research_sources(scraped_content)
+ new_images = self.select_top_images(images, k=4) # Select top 2 images
+ self.researcher.add_research_images(new_images)
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "scraping_content",
+ f"📄 Scraped {len(scraped_content)} pages of content",
+ self.researcher.websocket,
+ )
+ await stream_output(
+ "logs",
+ "scraping_images",
+ f"🖼️ Selected {len(new_images)} new images from {len(images)} total images",
+ self.researcher.websocket,
+ True,
+ new_images
+ )
+ await stream_output(
+ "logs",
+ "scraping_complete",
+ f"🌐 Scraping complete",
+ self.researcher.websocket,
+ )
+
+ return scraped_content
+
+ def select_top_images(self, images: List[Dict], k: int = 2) -> List[str]:
+ """
+ Select most relevant images and remove duplicates based on image content.
+
+ Args:
+ images (List[Dict]): List of image dictionaries with 'url' and 'score' keys.
+ k (int): Number of top images to select if no high-score images are found.
+
+ Returns:
+ List[str]: List of selected image URLs.
+ """
+ unique_images = []
+ seen_hashes = set()
+ current_research_images = self.researcher.get_research_images()
+
+ # First, select all score 2 and 3 images
+ high_score_images = [img for img in images if img['score'] >= 2]
+
+ for img in high_score_images + images: # Process high-score images first, then all images
+ img_hash = get_image_hash(img['url'])
+ if img_hash and img_hash not in seen_hashes and img['url'] not in current_research_images:
+ seen_hashes.add(img_hash)
+ unique_images.append(img['url'])
+
+ if len(unique_images) == k:
+ break
+
+ return unique_images
diff --git a/gpt_researcher/skills/context_manager.py b/gpt_researcher/skills/context_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbff03a7e2a27ec4381d54e1803e7222077ee80c
--- /dev/null
+++ b/gpt_researcher/skills/context_manager.py
@@ -0,0 +1,86 @@
+import asyncio
+from typing import List, Dict, Optional, Set
+
+from ..context.compression import ContextCompressor, WrittenContentCompressor, VectorstoreCompressor
+from ..actions.utils import stream_output
+
+
+class ContextManager:
+ """Manages context for the researcher agent."""
+
+ def __init__(self, researcher):
+ self.researcher = researcher
+
+ async def get_similar_content_by_query(self, query, pages):
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "fetching_query_content",
+ f"📚 Getting relevant content based on query: {query}...",
+ self.researcher.websocket,
+ )
+
+ context_compressor = ContextCompressor(
+ documents=pages, embeddings=self.researcher.memory.get_embeddings()
+ )
+ return await context_compressor.async_get_context(
+ query=query, max_results=10, cost_callback=self.researcher.add_costs
+ )
+
+ async def get_similar_content_by_query_with_vectorstore(self, query, filter):
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "fetching_query_format",
+ f" Getting relevant content based on query: {query}...",
+ self.researcher.websocket,
+ )
+ vectorstore_compressor = VectorstoreCompressor(self.researcher.vector_store, filter)
+ return await vectorstore_compressor.async_get_context(query=query, max_results=8)
+
+ async def get_similar_written_contents_by_draft_section_titles(
+ self,
+ current_subtopic: str,
+ draft_section_titles: List[str],
+ written_contents: List[Dict],
+ max_results: int = 10
+ ) -> List[str]:
+ all_queries = [current_subtopic] + draft_section_titles
+
+ async def process_query(query: str) -> Set[str]:
+ return set(await self.__get_similar_written_contents_by_query(query, written_contents))
+
+ results = await asyncio.gather(*[process_query(query) for query in all_queries])
+ relevant_contents = set().union(*results)
+ relevant_contents = list(relevant_contents)[:max_results]
+
+ if relevant_contents and self.researcher.verbose:
+ prettier_contents = "\n".join(relevant_contents)
+ await stream_output(
+ "logs", "relevant_contents_context", f"📃 {prettier_contents}", self.researcher.websocket
+ )
+
+ return relevant_contents
+
+ async def __get_similar_written_contents_by_query(self,
+ query: str,
+ written_contents: List[Dict],
+ similarity_threshold: float = 0.5,
+ max_results: int = 10
+ ) -> List[str]:
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "fetching_relevant_written_content",
+ f"🔎 Getting relevant written content based on query: {query}...",
+ self.researcher.websocket,
+ )
+
+ written_content_compressor = WrittenContentCompressor(
+ documents=written_contents,
+ embeddings=self.researcher.memory.get_embeddings(),
+ similarity_threshold=similarity_threshold
+ )
+ return await written_content_compressor.async_get_context(
+ query=query, max_results=max_results, cost_callback=self.researcher.add_costs
+ )
diff --git a/gpt_researcher/skills/curator.py b/gpt_researcher/skills/curator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2374e5f41d3d162e2fa849d7e85265b803eb4f9
--- /dev/null
+++ b/gpt_researcher/skills/curator.py
@@ -0,0 +1,78 @@
+from typing import Dict, Optional, List
+import json
+from ..config.config import Config
+from ..utils.llm import create_chat_completion
+from ..prompts import curate_sources as rank_sources_prompt
+from ..actions import stream_output
+
+
+class SourceCurator:
+ """Ranks sources and curates data based on their relevance, credibility and reliability."""
+
+ def __init__(self, researcher):
+ self.researcher = researcher
+
+ async def curate_sources(
+ self,
+ source_data: List,
+ max_results: int = 10,
+ ) -> List:
+ """
+ Rank sources based on research data and guidelines.
+
+ Args:
+ query: The research query/task
+ source_data: List of source documents to rank
+ max_results: Maximum number of top sources to return
+
+ Returns:
+ str: Ranked list of source URLs with reasoning
+ """
+ print(f"\n\nCurating {len(source_data)} sources: {source_data}")
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "research_plan",
+ f"⚖️ Evaluating and curating sources by credibility and relevance...",
+ self.researcher.websocket,
+ )
+
+ response = ""
+ try:
+ response = await create_chat_completion(
+ model=self.researcher.cfg.smart_llm_model,
+ messages=[
+ {"role": "system", "content": f"{self.researcher.role}"},
+ {"role": "user", "content": rank_sources_prompt(
+ self.researcher.query, source_data, max_results)},
+ ],
+ temperature=0.2,
+ max_tokens=8000,
+ llm_provider=self.researcher.cfg.smart_llm_provider,
+ llm_kwargs=self.researcher.cfg.llm_kwargs,
+ cost_callback=self.researcher.add_costs,
+ )
+
+ curated_sources = json.loads(response)
+ print(f"\n\nFinal Curated sources {len(source_data)} sources: {curated_sources}")
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "research_plan",
+ f"🏅 Verified and ranked top {len(curated_sources)} most reliable sources",
+ self.researcher.websocket,
+ )
+
+ return curated_sources
+
+ except Exception as e:
+ print(f"Error in curate_sources from LLM response: {response}")
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "research_plan",
+ f"🚫 Source verification failed: {str(e)}",
+ self.researcher.websocket,
+ )
+ return source_data
diff --git a/gpt_researcher/skills/researcher.py b/gpt_researcher/skills/researcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..60a11415042b9996b07616e924668e7a8ac5d2d8
--- /dev/null
+++ b/gpt_researcher/skills/researcher.py
@@ -0,0 +1,402 @@
+import asyncio
+import random
+import json
+from typing import Dict, Optional
+import logging
+
+from ..actions.utils import stream_output
+from ..actions.query_processing import plan_research_outline, get_search_results
+from ..document import DocumentLoader, OnlineDocumentLoader, LangChainDocumentLoader
+from ..utils.enum import ReportSource, ReportType, Tone
+from ..utils.logging_config import get_json_handler, get_research_logger
+
+
+class ResearchConductor:
+ """Manages and coordinates the research process."""
+
+ def __init__(self, researcher):
+ self.researcher = researcher
+ self.logger = logging.getLogger('research')
+ self.json_handler = get_json_handler()
+
+ async def plan_research(self, query):
+ self.logger.info(f"Planning research for query: {query}")
+
+ await stream_output(
+ "logs",
+ "planning_research",
+ f"🌐 Browsing the web to learn more about the task: {query}...",
+ self.researcher.websocket,
+ )
+
+ search_results = await get_search_results(query, self.researcher.retrievers[0])
+ self.logger.info(f"Initial search results obtained: {len(search_results)} results")
+
+ await stream_output(
+ "logs",
+ "planning_research",
+ f"🤔 Planning the research strategy and subtasks...",
+ self.researcher.websocket,
+ )
+
+ outline = await plan_research_outline(
+ query=query,
+ search_results=search_results,
+ agent_role_prompt=self.researcher.role,
+ cfg=self.researcher.cfg,
+ parent_query=self.researcher.parent_query,
+ report_type=self.researcher.report_type,
+ cost_callback=self.researcher.add_costs,
+ )
+ self.logger.info(f"Research outline planned: {outline}")
+ return outline
+
+ async def conduct_research(self):
+ """Runs the GPT Researcher to conduct research"""
+ if self.json_handler:
+ self.json_handler.update_content("query", self.researcher.query)
+
+ self.logger.info(f"Starting research for query: {self.researcher.query}")
+
+ # Reset visited_urls and source_urls at the start of each research task
+ self.researcher.visited_urls.clear()
+ research_data = []
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "starting_research",
+ f"🔍 Starting the research task for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ if self.researcher.verbose:
+ await stream_output("logs", "agent_generated", self.researcher.agent, self.researcher.websocket)
+
+ # Research for relevant sources based on source types below
+ if self.researcher.source_urls:
+ self.logger.info("Using provided source URLs")
+ research_data = await self._get_context_by_urls(self.researcher.source_urls)
+ if research_data and len(research_data) == 0 and self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "answering_from_memory",
+ f"🧐 I was unable to find relevant context in the provided sources...",
+ self.researcher.websocket,
+ )
+ if self.researcher.complement_source_urls:
+ self.logger.info("Complementing with web search")
+ additional_research = await self._get_context_by_web_search(self.researcher.query)
+ research_data += ' '.join(additional_research)
+
+ elif self.researcher.report_source == ReportSource.Web.value:
+ self.logger.info("Using web search")
+ research_data = await self._get_context_by_web_search(self.researcher.query)
+
+ # ... rest of the conditions ...
+ elif self.researcher.report_source == ReportSource.Local.value:
+ self.logger.info("Using local search")
+ document_data = await DocumentLoader(self.researcher.cfg.doc_path).load()
+ self.logger.info(f"Loaded {len(document_data)} documents")
+ if self.researcher.vector_store:
+ self.researcher.vector_store.load(document_data)
+
+ research_data = await self._get_context_by_web_search(self.researcher.query, document_data)
+
+ # Hybrid search including both local documents and web sources
+ elif self.researcher.report_source == ReportSource.Hybrid.value:
+ if self.researcher.document_urls:
+ document_data = await OnlineDocumentLoader(self.researcher.document_urls).load()
+ else:
+ document_data = await DocumentLoader(self.researcher.cfg.doc_path).load()
+ if self.researcher.vector_store:
+ self.researcher.vector_store.load(document_data)
+ docs_context = await self._get_context_by_web_search(self.researcher.query, document_data)
+ web_context = await self._get_context_by_web_search(self.researcher.query)
+ research_data = f"Context from local documents: {docs_context}\n\nContext from web sources: {web_context}"
+
+ elif self.researcher.report_source == ReportSource.LangChainDocuments.value:
+ langchain_documents_data = await LangChainDocumentLoader(
+ self.researcher.documents
+ ).load()
+ if self.researcher.vector_store:
+ self.researcher.vector_store.load(langchain_documents_data)
+ research_data = await self._get_context_by_web_search(
+ self.researcher.query, langchain_documents_data
+ )
+
+ elif self.researcher.report_source == ReportSource.LangChainVectorStore.value:
+ research_data = await self._get_context_by_vectorstore(self.researcher.query, self.researcher.vector_store_filter)
+
+ # Rank and curate the sources
+ self.researcher.context = research_data
+ if self.researcher.cfg.curate_sources:
+ self.logger.info("Curating sources")
+ self.researcher.context = await self.researcher.source_curator.curate_sources(research_data)
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "research_step_finalized",
+ f"Finalized research step.\n💸 Total Research Costs: ${self.researcher.get_costs()}",
+ self.researcher.websocket,
+ )
+ if self.json_handler:
+ self.json_handler.update_content("costs", self.researcher.get_costs())
+ self.json_handler.update_content("context", self.researcher.context)
+
+ self.logger.info(f"Research completed. Context size: {len(str(self.researcher.context))}")
+ return self.researcher.context
+
+ async def _get_context_by_urls(self, urls):
+ """Scrapes and compresses the context from the given urls"""
+ self.logger.info(f"Getting context from URLs: {urls}")
+
+ new_search_urls = await self._get_new_urls(urls)
+ self.logger.info(f"New URLs to process: {new_search_urls}")
+
+ scraped_content = await self.researcher.scraper_manager.browse_urls(new_search_urls)
+ self.logger.info(f"Scraped content from {len(scraped_content)} URLs")
+
+ if self.researcher.vector_store:
+ self.logger.info("Loading content into vector store")
+ self.researcher.vector_store.load(scraped_content)
+
+ context = await self.researcher.context_manager.get_similar_content_by_query(
+ self.researcher.query, scraped_content
+ )
+ self.logger.info(f"Generated context length: {len(context)}")
+ return context
+
+ # Add logging to other methods similarly...
+
+ async def _get_context_by_vectorstore(self, query, filter: Optional[dict] = None):
+ """
+ Generates the context for the research task by searching the vectorstore
+ Returns:
+ context: List of context
+ """
+ context = []
+ # Generate Sub-Queries including original query
+ sub_queries = await self.plan_research(query)
+ # If this is not part of a sub researcher, add original query to research for better results
+ if self.researcher.report_type != "subtopic_report":
+ sub_queries.append(query)
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "subqueries",
+ f"🗂️ I will conduct my research based on the following queries: {sub_queries}...",
+ self.researcher.websocket,
+ True,
+ sub_queries,
+ )
+
+ # Using asyncio.gather to process the sub_queries asynchronously
+ context = await asyncio.gather(
+ *[
+ self._process_sub_query_with_vectorstore(sub_query, filter)
+ for sub_query in sub_queries
+ ]
+ )
+ return context
+
+ async def _get_context_by_web_search(self, query, scraped_data: list = []):
+ """
+ Generates the context for the research task by searching the query and scraping the results
+ Returns:
+ context: List of context
+ """
+ self.logger.info(f"Starting web search for query: {query}")
+
+ # Generate Sub-Queries including original query
+ sub_queries = await self.plan_research(query)
+ self.logger.info(f"Generated sub-queries: {sub_queries}")
+
+ # If this is not part of a sub researcher, add original query to research for better results
+ if self.researcher.report_type != "subtopic_report":
+ sub_queries.append(query)
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "subqueries",
+ f"🗂️ I will conduct my research based on the following queries: {sub_queries}...",
+ self.researcher.websocket,
+ True,
+ sub_queries,
+ )
+
+ # Using asyncio.gather to process the sub_queries asynchronously
+ try:
+ context = await asyncio.gather(
+ *[
+ self._process_sub_query(sub_query, scraped_data)
+ for sub_query in sub_queries
+ ]
+ )
+ self.logger.info(f"Gathered context from {len(context)} sub-queries")
+ # Filter out empty results and join the context
+ context = [c for c in context if c]
+ if context:
+ combined_context = " ".join(context)
+ self.logger.info(f"Combined context size: {len(combined_context)}")
+ return combined_context
+ return []
+ except Exception as e:
+ self.logger.error(f"Error during web search: {e}", exc_info=True)
+ return []
+
+ async def _process_sub_query(self, sub_query: str, scraped_data: list = []):
+ """Takes in a sub query and scrapes urls based on it and gathers context."""
+ if self.json_handler:
+ self.json_handler.log_event("sub_query", {
+ "query": sub_query,
+ "scraped_data_size": len(scraped_data)
+ })
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "running_subquery_research",
+ f"\n🔍 Running research for '{sub_query}'...",
+ self.researcher.websocket,
+ )
+
+ try:
+ if not scraped_data:
+ scraped_data = await self._scrape_data_by_urls(sub_query)
+ self.logger.info(f"Scraped data size: {len(scraped_data)}")
+
+ content = await self.researcher.context_manager.get_similar_content_by_query(sub_query, scraped_data)
+ self.logger.info(f"Content found for sub-query: {len(str(content)) if content else 0} chars")
+
+ if content and self.researcher.verbose:
+ await stream_output(
+ "logs", "subquery_context_window", f"📃 {content}", self.researcher.websocket
+ )
+ elif self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "subquery_context_not_found",
+ f"🤷 No content found for '{sub_query}'...",
+ self.researcher.websocket,
+ )
+ if content:
+ if self.json_handler:
+ self.json_handler.log_event("content_found", {
+ "sub_query": sub_query,
+ "content_size": len(content)
+ })
+ return content
+ except Exception as e:
+ self.logger.error(f"Error processing sub-query {sub_query}: {e}", exc_info=True)
+ return ""
+
+ async def _process_sub_query_with_vectorstore(self, sub_query: str, filter: Optional[dict] = None):
+ """Takes in a sub query and gathers context from the user provided vector store
+
+ Args:
+ sub_query (str): The sub-query generated from the original query
+
+ Returns:
+ str: The context gathered from search
+ """
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "running_subquery_with_vectorstore_research",
+ f"\n🔍 Running research for '{sub_query}'...",
+ self.researcher.websocket,
+ )
+
+ content = await self.researcher.context_manager.get_similar_content_by_query_with_vectorstore(sub_query, filter)
+
+ if content and self.researcher.verbose:
+ await stream_output(
+ "logs", "subquery_context_window", f"📃 {content}", self.researcher.websocket
+ )
+ elif self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "subquery_context_not_found",
+ f"🤷 No content found for '{sub_query}'...",
+ self.researcher.websocket,
+ )
+ return content
+
+ async def _get_new_urls(self, url_set_input):
+ """Gets the new urls from the given url set.
+ Args: url_set_input (set[str]): The url set to get the new urls from
+ Returns: list[str]: The new urls from the given url set
+ """
+
+ new_urls = []
+ for url in url_set_input:
+ if url not in self.researcher.visited_urls:
+ self.researcher.visited_urls.add(url)
+ new_urls.append(url)
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "added_source_url",
+ f"✅ Added source url to research: {url}\n",
+ self.researcher.websocket,
+ True,
+ url,
+ )
+
+ return new_urls
+
+ async def _search_relevant_source_urls(self, query):
+ new_search_urls = []
+
+ # Iterate through all retrievers
+ for retriever_class in self.researcher.retrievers:
+ # Instantiate the retriever with the sub-query
+ retriever = retriever_class(query)
+
+ # Perform the search using the current retriever
+ search_results = await asyncio.to_thread(
+ retriever.search, max_results=self.researcher.cfg.max_search_results_per_query
+ )
+
+ # Collect new URLs from search results
+ search_urls = [url.get("href") for url in search_results]
+ new_search_urls.extend(search_urls)
+
+ # Get unique URLs
+ new_search_urls = await self._get_new_urls(new_search_urls)
+ random.shuffle(new_search_urls)
+
+ return new_search_urls
+
+ async def _scrape_data_by_urls(self, sub_query):
+ """
+ Runs a sub-query across multiple retrievers and scrapes the resulting URLs.
+
+ Args:
+ sub_query (str): The sub-query to search for.
+
+ Returns:
+ list: A list of scraped content results.
+ """
+ new_search_urls = await self._search_relevant_source_urls(sub_query)
+
+ # Log the research process if verbose mode is on
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "researching",
+ f"🤔 Researching for relevant information across multiple sources...\n",
+ self.researcher.websocket,
+ )
+
+ # Scrape the new URLs
+ scraped_content = await self.researcher.scraper_manager.browse_urls(new_search_urls)
+
+ if self.researcher.vector_store:
+ self.researcher.vector_store.load(scraped_content)
+
+ return scraped_content
diff --git a/gpt_researcher/skills/writer.py b/gpt_researcher/skills/writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..690489d1dd17f3a2c139c0f455ce1eb9518ca3f0
--- /dev/null
+++ b/gpt_researcher/skills/writer.py
@@ -0,0 +1,209 @@
+from typing import Dict, Optional
+import json
+
+from ..utils.llm import construct_subtopics
+from ..actions import (
+ stream_output,
+ generate_report,
+ generate_draft_section_titles,
+ write_report_introduction,
+ write_conclusion
+)
+
+
+class ReportGenerator:
+ """Generates reports based on research data."""
+
+ def __init__(self, researcher):
+ self.researcher = researcher
+ self.research_params = {
+ "query": self.researcher.query,
+ "agent_role_prompt": self.researcher.cfg.agent_role or self.researcher.role,
+ "report_type": self.researcher.report_type,
+ "report_source": self.researcher.report_source,
+ "tone": self.researcher.tone,
+ "websocket": self.researcher.websocket,
+ "cfg": self.researcher.cfg,
+ "headers": self.researcher.headers,
+ }
+
+ async def write_report(self, existing_headers: list = [], relevant_written_contents: list = [], ext_context=None) -> str:
+ """
+ Write a report based on existing headers and relevant contents.
+
+ Args:
+ existing_headers (list): List of existing headers.
+ relevant_written_contents (list): List of relevant written contents.
+ ext_context (Optional): External context, if any.
+
+ Returns:
+ str: The generated report.
+ """
+ # send the selected images prior to writing report
+ research_images = self.researcher.get_research_images()
+ if research_images:
+ await stream_output(
+ "images",
+ "selected_images",
+ json.dumps(research_images),
+ self.researcher.websocket,
+ True,
+ research_images
+ )
+
+ context = ext_context or self.researcher.context
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "writing_report",
+ f"✍️ Writing report for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ report_params = self.research_params.copy()
+ report_params["context"] = context
+
+ if self.researcher.report_type == "subtopic_report":
+ report_params.update({
+ "main_topic": self.researcher.parent_query,
+ "existing_headers": existing_headers,
+ "relevant_written_contents": relevant_written_contents,
+ "cost_callback": self.researcher.add_costs,
+ })
+ else:
+ report_params["cost_callback"] = self.researcher.add_costs
+
+ report = await generate_report(**report_params)
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "report_written",
+ f"📝 Report written for '{self.researcher.query}'",
+ self.researcher.websocket,
+ )
+
+ return report
+
+ async def write_report_conclusion(self, report_content: str) -> str:
+ """
+ Write the conclusion for the report.
+
+ Args:
+ report_content (str): The content of the report.
+
+ Returns:
+ str: The generated conclusion.
+ """
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "writing_conclusion",
+ f"✍️ Writing conclusion for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ conclusion = await write_conclusion(
+ query=self.researcher.query,
+ context=report_content,
+ config=self.researcher.cfg,
+ agent_role_prompt=self.researcher.cfg.agent_role or self.researcher.role,
+ cost_callback=self.researcher.add_costs,
+ websocket=self.researcher.websocket,
+ )
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "conclusion_written",
+ f"📝 Conclusion written for '{self.researcher.query}'",
+ self.researcher.websocket,
+ )
+
+ return conclusion
+
+ async def write_introduction(self):
+ """Write the introduction section of the report."""
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "writing_introduction",
+ f"✍️ Writing introduction for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ introduction = await write_report_introduction(
+ query=self.researcher.query,
+ context=self.researcher.context,
+ agent_role_prompt=self.researcher.cfg.agent_role or self.researcher.role,
+ config=self.researcher.cfg,
+ websocket=self.researcher.websocket,
+ cost_callback=self.researcher.add_costs,
+ )
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "introduction_written",
+ f"📝 Introduction written for '{self.researcher.query}'",
+ self.researcher.websocket,
+ )
+
+ return introduction
+
+ async def get_subtopics(self):
+ """Retrieve subtopics for the research."""
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "generating_subtopics",
+ f"🌳 Generating subtopics for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ subtopics = await construct_subtopics(
+ task=self.researcher.query,
+ data=self.researcher.context,
+ config=self.researcher.cfg,
+ subtopics=self.researcher.subtopics,
+ )
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "subtopics_generated",
+ f"📊 Subtopics generated for '{self.researcher.query}'",
+ self.researcher.websocket,
+ )
+
+ return subtopics
+
+ async def get_draft_section_titles(self, current_subtopic: str):
+ """Generate draft section titles for the report."""
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "generating_draft_sections",
+ f"📑 Generating draft section titles for '{self.researcher.query}'...",
+ self.researcher.websocket,
+ )
+
+ draft_section_titles = await generate_draft_section_titles(
+ query=self.researcher.query,
+ current_subtopic=current_subtopic,
+ context=self.researcher.context,
+ role=self.researcher.cfg.agent_role or self.researcher.role,
+ websocket=self.researcher.websocket,
+ config=self.researcher.cfg,
+ cost_callback=self.researcher.add_costs,
+ )
+
+ if self.researcher.verbose:
+ await stream_output(
+ "logs",
+ "draft_sections_generated",
+ f"🗂️ Draft section titles generated for '{self.researcher.query}'",
+ self.researcher.websocket,
+ )
+
+ return draft_section_titles
diff --git a/gpt_researcher/utils/__init__.py b/gpt_researcher/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_researcher/utils/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/utils/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9830a25581d709b6b9d4edc48c88d080c0cd9d74
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/costs.cpython-312.pyc b/gpt_researcher/utils/__pycache__/costs.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b0cda3a05273ad1644bba61b07c6f3f9676dab1
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/costs.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/enum.cpython-312.pyc b/gpt_researcher/utils/__pycache__/enum.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..905fb1821d973b3cc3bd5b25043eaf0eece9e0b5
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/enum.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/llm.cpython-312.pyc b/gpt_researcher/utils/__pycache__/llm.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4d0bdff59eb72a6fa2a0256d526666cb2f61ad0
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/llm.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/logger.cpython-312.pyc b/gpt_researcher/utils/__pycache__/logger.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4693a59b3f116353e872e7e7fe3dd8e410a9df54
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/logger.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/logging_config.cpython-312.pyc b/gpt_researcher/utils/__pycache__/logging_config.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85a63f9300591c80bbb9bead8783683f2d915614
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/logging_config.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/__pycache__/validators.cpython-312.pyc b/gpt_researcher/utils/__pycache__/validators.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f5e8c013e3899256261821a8b29b4a5296296f0
Binary files /dev/null and b/gpt_researcher/utils/__pycache__/validators.cpython-312.pyc differ
diff --git a/gpt_researcher/utils/costs.py b/gpt_researcher/utils/costs.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ddb32b3bdd70a229007fa656b802edc4c24f18a
--- /dev/null
+++ b/gpt_researcher/utils/costs.py
@@ -0,0 +1,25 @@
+import tiktoken
+
+# Per OpenAI Pricing Page: https://openai.com/api/pricing/
+ENCODING_MODEL = "o200k_base"
+INPUT_COST_PER_TOKEN = 0.000005
+OUTPUT_COST_PER_TOKEN = 0.000015
+IMAGE_INFERENCE_COST = 0.003825
+EMBEDDING_COST = 0.02 / 1000000 # Assumes new ada-3-small
+
+
+# Cost estimation is via OpenAI libraries and models. May vary for other models
+def estimate_llm_cost(input_content: str, output_content: str) -> float:
+ encoding = tiktoken.get_encoding(ENCODING_MODEL)
+ input_tokens = encoding.encode(input_content)
+ output_tokens = encoding.encode(output_content)
+ input_costs = len(input_tokens) * INPUT_COST_PER_TOKEN
+ output_costs = len(output_tokens) * OUTPUT_COST_PER_TOKEN
+ return input_costs + output_costs
+
+
+def estimate_embedding_cost(model, docs):
+ encoding = tiktoken.encoding_for_model(model)
+ total_tokens = sum(len(encoding.encode(str(doc))) for doc in docs)
+ return total_tokens * EMBEDDING_COST
+
diff --git a/gpt_researcher/utils/enum.py b/gpt_researcher/utils/enum.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bdb44d63b4e436e3ff0e0084c452edef707c306
--- /dev/null
+++ b/gpt_researcher/utils/enum.py
@@ -0,0 +1,49 @@
+from enum import Enum
+
+
+class ReportType(Enum):
+ ResearchReport = "research_report"
+ ResourceReport = "resource_report"
+ OutlineReport = "outline_report"
+ CustomReport = "custom_report"
+ DetailedReport = "detailed_report"
+ SubtopicReport = "subtopic_report"
+
+
+class ReportSource(Enum):
+ Web = "web"
+ Local = "local"
+ LangChainDocuments = "langchain_documents"
+ LangChainVectorStore = "langchain_vectorstore"
+ Static = "static"
+ Hybrid = "hybrid"
+
+
+class Tone(Enum):
+ Objective = "Objective (impartial and unbiased presentation of facts and findings)"
+ Formal = "Formal (adheres to academic standards with sophisticated language and structure)"
+ Analytical = (
+ "Analytical (critical evaluation and detailed examination of data and theories)"
+ )
+ Persuasive = (
+ "Persuasive (convincing the audience of a particular viewpoint or argument)"
+ )
+ Informative = (
+ "Informative (providing clear and comprehensive information on a topic)"
+ )
+ Explanatory = "Explanatory (clarifying complex concepts and processes)"
+ Descriptive = (
+ "Descriptive (detailed depiction of phenomena, experiments, or case studies)"
+ )
+ Critical = "Critical (judging the validity and relevance of the research and its conclusions)"
+ Comparative = "Comparative (juxtaposing different theories, data, or methods to highlight differences and similarities)"
+ Speculative = "Speculative (exploring hypotheses and potential implications or future research directions)"
+ Reflective = "Reflective (considering the research process and personal insights or experiences)"
+ Narrative = (
+ "Narrative (telling a story to illustrate research findings or methodologies)"
+ )
+ Humorous = "Humorous (light-hearted and engaging, usually to make the content more relatable)"
+ Optimistic = "Optimistic (highlighting positive findings and potential benefits)"
+ Pessimistic = (
+ "Pessimistic (focusing on limitations, challenges, or negative outcomes)"
+ )
diff --git a/gpt_researcher/utils/llm.py b/gpt_researcher/utils/llm.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c17d42ab007d13606f379386281c9de38c4a461
--- /dev/null
+++ b/gpt_researcher/utils/llm.py
@@ -0,0 +1,123 @@
+# libraries
+from __future__ import annotations
+
+import json
+import logging
+from typing import Optional, Any, Dict
+
+from colorama import Fore, Style
+from langchain.output_parsers import PydanticOutputParser
+from langchain.prompts import PromptTemplate
+
+from ..prompts import generate_subtopics_prompt
+from .costs import estimate_llm_cost
+from .validators import Subtopics
+
+
+def get_llm(llm_provider, **kwargs):
+ from gpt_researcher.llm_provider import GenericLLMProvider
+ return GenericLLMProvider.from_provider(llm_provider, **kwargs)
+
+
+async def create_chat_completion(
+ messages: list, # type: ignore
+ model: Optional[str] = None,
+ temperature: Optional[float] = 0.4,
+ max_tokens: Optional[int] = 4000,
+ llm_provider: Optional[str] = None,
+ stream: Optional[bool] = False,
+ websocket: Any | None = None,
+ llm_kwargs: Dict[str, Any] | None = None,
+ cost_callback: callable = None
+) -> str:
+ """Create a chat completion using the OpenAI API
+ Args:
+ messages (list[dict[str, str]]): The messages to send to the chat completion
+ model (str, optional): The model to use. Defaults to None.
+ temperature (float, optional): The temperature to use. Defaults to 0.4.
+ max_tokens (int, optional): The max tokens to use. Defaults to 4000.
+ stream (bool, optional): Whether to stream the response. Defaults to False.
+ llm_provider (str, optional): The LLM Provider to use.
+ webocket (WebSocket): The websocket used in the currect request,
+ cost_callback: Callback function for updating cost
+ Returns:
+ str: The response from the chat completion
+ """
+ # validate input
+ if model is None:
+ raise ValueError("Model cannot be None")
+ if max_tokens is not None and max_tokens > 16001:
+ raise ValueError(
+ f"Max tokens cannot be more than 16,000, but got {max_tokens}")
+
+ # Get the provider from supported providers
+ provider = get_llm(llm_provider, model=model, temperature=temperature,
+ max_tokens=max_tokens, **(llm_kwargs or {}))
+
+ response = ""
+ # create response
+ for _ in range(10): # maximum of 10 attempts
+ response = await provider.get_chat_response(
+ messages, stream, websocket
+ )
+
+ if cost_callback:
+ llm_costs = estimate_llm_cost(str(messages), response)
+ cost_callback(llm_costs)
+
+ return response
+
+ logging.error(f"Failed to get response from {llm_provider} API")
+ raise RuntimeError(f"Failed to get response from {llm_provider} API")
+
+
+async def construct_subtopics(task: str, data: str, config, subtopics: list = []) -> list:
+ """
+ Construct subtopics based on the given task and data.
+
+ Args:
+ task (str): The main task or topic.
+ data (str): Additional data for context.
+ config: Configuration settings.
+ subtopics (list, optional): Existing subtopics. Defaults to [].
+
+ Returns:
+ list: A list of constructed subtopics.
+ """
+ try:
+ parser = PydanticOutputParser(pydantic_object=Subtopics)
+
+ prompt = PromptTemplate(
+ template=generate_subtopics_prompt(),
+ input_variables=["task", "data", "subtopics", "max_subtopics"],
+ partial_variables={
+ "format_instructions": parser.get_format_instructions()},
+ )
+
+ print(f"\n🤖 Calling {config.smart_llm_model}...\n")
+
+ temperature = config.temperature
+ # temperature = 0 # Note: temperature throughout the code base is currently set to Zero
+ provider = get_llm(
+ config.smart_llm_provider,
+ model=config.smart_llm_model,
+ temperature=temperature,
+ max_tokens=config.smart_token_limit,
+ **config.llm_kwargs,
+ )
+ model = provider.llm
+
+ chain = prompt | model | parser
+
+ output = chain.invoke({
+ "task": task,
+ "data": data,
+ "subtopics": subtopics,
+ "max_subtopics": config.max_subtopics
+ })
+
+ return output
+
+ except Exception as e:
+ print("Exception in parsing subtopics : ", e)
+ return subtopics
diff --git a/gpt_researcher/utils/logger.py b/gpt_researcher/utils/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..31ba0dc04b2f02303100531b23afa5a1c852aa04
--- /dev/null
+++ b/gpt_researcher/utils/logger.py
@@ -0,0 +1,96 @@
+import logging
+import sys
+from copy import copy
+from typing import Literal
+
+import click
+
+TRACE_LOG_LEVEL = 5
+
+
+def get_formatted_logger():
+ """Return a formatted logger."""
+ logger = logging.getLogger("scraper")
+ # Set the logging level
+ logger.setLevel(logging.INFO)
+
+ # Check if the logger already has handlers to avoid duplicates
+ if not logger.handlers:
+ # Create a handler
+ handler = logging.StreamHandler()
+
+ # Create a formatter using DefaultFormatter
+ formatter = DefaultFormatter(
+ "%(levelprefix)s [%(asctime)s] %(message)s",
+ datefmt="%H:%M:%S"
+ )
+
+ # Set the formatter for the handler
+ handler.setFormatter(formatter)
+
+ # Add the handler to the logger
+ logger.addHandler(handler)
+
+ # Disable propagation to prevent duplicate logging from parent loggers
+ logger.propagate = False
+
+ return logger
+
+
+class ColourizedFormatter(logging.Formatter):
+ """
+ A custom log formatter class that:
+
+ * Outputs the LOG_LEVEL with an appropriate color.
+ * If a log call includes an `extras={"color_message": ...}` it will be used
+ for formatting the output, instead of the plain text message.
+ """
+
+ level_name_colors = {
+ TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"),
+ logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
+ logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
+ logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
+ logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
+ logging.CRITICAL: lambda level_name: click.style(str(level_name), fg="bright_red"),
+ }
+
+ def __init__(
+ self,
+ fmt: str | None = None,
+ datefmt: str | None = None,
+ style: Literal["%", "{", "$"] = "%",
+ use_colors: bool | None = None,
+ ):
+ if use_colors in (True, False):
+ self.use_colors = use_colors
+ else:
+ self.use_colors = sys.stdout.isatty()
+ super().__init__(fmt=fmt, datefmt=datefmt, style=style)
+
+ def color_level_name(self, level_name: str, level_no: int) -> str:
+ def default(level_name: str) -> str:
+ return str(level_name) # pragma: no cover
+
+ func = self.level_name_colors.get(level_no, default)
+ return func(level_name)
+
+ def should_use_colors(self) -> bool:
+ return True # pragma: no cover
+
+ def formatMessage(self, record: logging.LogRecord) -> str:
+ recordcopy = copy(record)
+ levelname = recordcopy.levelname
+ seperator = " " * (8 - len(recordcopy.levelname))
+ if self.use_colors:
+ levelname = self.color_level_name(levelname, recordcopy.levelno)
+ if "color_message" in recordcopy.__dict__:
+ recordcopy.msg = recordcopy.__dict__["color_message"]
+ recordcopy.__dict__["message"] = recordcopy.getMessage()
+ recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator
+ return super().formatMessage(recordcopy)
+
+
+class DefaultFormatter(ColourizedFormatter):
+ def should_use_colors(self) -> bool:
+ return sys.stderr.isatty() # pragma: no cover
diff --git a/gpt_researcher/utils/logging_config.py b/gpt_researcher/utils/logging_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..03bf1723294d3a516000e945e699fa2f442830f3
--- /dev/null
+++ b/gpt_researcher/utils/logging_config.py
@@ -0,0 +1,82 @@
+import logging
+import json
+import os
+from datetime import datetime
+from pathlib import Path
+
+class JSONResearchHandler:
+ def __init__(self, json_file):
+ self.json_file = json_file
+ self.research_data = {
+ "timestamp": datetime.now().isoformat(),
+ "events": [],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0
+ }
+ }
+
+ def log_event(self, event_type: str, data: dict):
+ self.research_data["events"].append({
+ "timestamp": datetime.now().isoformat(),
+ "type": event_type,
+ "data": data
+ })
+ self._save_json()
+
+ def update_content(self, key: str, value):
+ self.research_data["content"][key] = value
+ self._save_json()
+
+ def _save_json(self):
+ with open(self.json_file, 'w') as f:
+ json.dump(self.research_data, f, indent=2)
+
+def setup_research_logging():
+ # Create logs directory if it doesn't exist
+ logs_dir = Path("logs")
+ logs_dir.mkdir(exist_ok=True)
+
+ # Generate timestamp for log files
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ # Create log file paths
+ log_file = logs_dir / f"research_{timestamp}.log"
+ json_file = logs_dir / f"research_{timestamp}.json"
+
+ # Configure file handler for research logs
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(logging.INFO)
+ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+
+ # Get research logger and configure it
+ research_logger = logging.getLogger('research')
+ research_logger.setLevel(logging.INFO)
+
+ # Remove any existing handlers to avoid duplicates
+ research_logger.handlers.clear()
+
+ # Add file handler
+ research_logger.addHandler(file_handler)
+
+ # Add stream handler for console output
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+ research_logger.addHandler(console_handler)
+
+ # Prevent propagation to root logger to avoid duplicate logs
+ research_logger.propagate = False
+
+ # Create JSON handler
+ json_handler = JSONResearchHandler(json_file)
+
+ return str(log_file), str(json_file), research_logger, json_handler
+
+def get_research_logger():
+ return logging.getLogger('research')
+
+def get_json_handler():
+ return getattr(logging.getLogger('research'), 'json_handler', None)
diff --git a/gpt_researcher/utils/validators.py b/gpt_researcher/utils/validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dcce6fbd19b2cfe6155728ac1f81dfad5dcd130
--- /dev/null
+++ b/gpt_researcher/utils/validators.py
@@ -0,0 +1,9 @@
+from typing import List
+
+from pydantic import BaseModel, Field
+
+class Subtopic(BaseModel):
+ task: str = Field(description="Task name", min_length=1)
+
+class Subtopics(BaseModel):
+ subtopics: List[Subtopic] = []
diff --git a/gpt_researcher/vector_store/__init__.py b/gpt_researcher/vector_store/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a00561e621a9bb2e1f3272cb88092f69f4e018ee
--- /dev/null
+++ b/gpt_researcher/vector_store/__init__.py
@@ -0,0 +1,3 @@
+from .vector_store import VectorStoreWrapper
+
+__all__ = ['VectorStoreWrapper']
\ No newline at end of file
diff --git a/gpt_researcher/vector_store/__pycache__/__init__.cpython-312.pyc b/gpt_researcher/vector_store/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36494849ab009f96ce945678aeff7d89db2a8cd9
Binary files /dev/null and b/gpt_researcher/vector_store/__pycache__/__init__.cpython-312.pyc differ
diff --git a/gpt_researcher/vector_store/__pycache__/vector_store.cpython-312.pyc b/gpt_researcher/vector_store/__pycache__/vector_store.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ae4d745223ecf34ee1ce47d6f9d640274cdd1cf
Binary files /dev/null and b/gpt_researcher/vector_store/__pycache__/vector_store.cpython-312.pyc differ
diff --git a/gpt_researcher/vector_store/vector_store.py b/gpt_researcher/vector_store/vector_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..eacffb03f756150fb78659355fb80d31e4b6a8e2
--- /dev/null
+++ b/gpt_researcher/vector_store/vector_store.py
@@ -0,0 +1,43 @@
+"""
+Wrapper for langchain vector store
+"""
+from typing import List, Dict
+
+from langchain.docstore.document import Document
+from langchain.vectorstores import VectorStore
+from langchain.text_splitter import RecursiveCharacterTextSplitter
+
+class VectorStoreWrapper:
+ """
+ A Wrapper for LangchainVectorStore to handle GPT-Researcher Document Type
+ """
+ def __init__(self, vector_store : VectorStore):
+ self.vector_store = vector_store
+
+ def load(self, documents):
+ """
+ Load the documents into vector_store
+ Translate to langchain doc type, split to chunks then load
+ """
+ langchain_documents = self._create_langchain_documents(documents)
+ splitted_documents = self._split_documents(langchain_documents)
+ self.vector_store.add_documents(splitted_documents)
+
+ def _create_langchain_documents(self, data: List[Dict[str, str]]) -> List[Document]:
+ """Convert GPT Researcher Document to Langchain Document"""
+ return [Document(page_content=item["raw_content"], metadata={"source": item["url"]}) for item in data]
+
+ def _split_documents(self, documents: List[Document], chunk_size: int = 1000, chunk_overlap: int = 200) -> List[Document]:
+ """
+ Split documents into smaller chunks
+ """
+ text_splitter = RecursiveCharacterTextSplitter(
+ chunk_size=chunk_size,
+ chunk_overlap=chunk_overlap,
+ )
+ return text_splitter.split_documents(documents)
+
+ async def asimilarity_search(self, query, k, filter):
+ """Return query by vector store"""
+ results = await self.vector_store.asimilarity_search(query=query, k=k, filter=filter)
+ return results
diff --git a/langgraph.json b/langgraph.json
new file mode 100644
index 0000000000000000000000000000000000000000..d8285d3f8606264704d07a4c2dda8f1ef3ee39a9
--- /dev/null
+++ b/langgraph.json
@@ -0,0 +1,10 @@
+{
+ "python_version": "3.11",
+ "dependencies": [
+ "./multi_agents"
+ ],
+ "graphs": {
+ "agent": "./multi_agents/agent.py:graph"
+ },
+ "env": ".env"
+}
\ No newline at end of file
diff --git a/logs/app.log b/logs/app.log
new file mode 100644
index 0000000000000000000000000000000000000000..d9ed149c4e6257a0f160b6bdde9d8926dc9bec00
--- /dev/null
+++ b/logs/app.log
@@ -0,0 +1,333 @@
+2025-01-20 01:39:03,815 - research - INFO - Starting research for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:39:03,833 - research - INFO - Using web search
+2025-01-20 01:39:03,834 - research - INFO - Starting web search for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:39:03,834 - research - INFO - Planning research for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:45:34,073 - research - INFO - Starting research for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:45:34,091 - research - INFO - Using web search
+2025-01-20 01:45:34,091 - research - INFO - Starting web search for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:45:34,091 - research - INFO - Planning research for query: How did KKR Real Estate perform during Q3 2024
+2025-01-20 01:45:38,522 - research - INFO - Initial search results obtained: 7 results
+2025-01-20 01:45:42,220 - research - INFO - Research outline planned: ['KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report', 'KKR Real Estate Q3 2024 performance overview', "Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments", 'KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation']
+2025-01-20 01:45:42,220 - research - INFO - Generated sub-queries: ['KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report', 'KKR Real Estate Q3 2024 performance overview', "Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments", 'KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation']
+2025-01-20 01:45:47,993 - research - INFO - Scraped data size: 5
+2025-01-20 01:45:51,906 - research - INFO - Scraped data size: 4
+2025-01-20 01:45:54,140 - research - INFO - Scraped data size: 4
+2025-01-20 01:45:56,452 - research - INFO - Scraped data size: 2
+2025-01-20 01:46:01,509 - research - INFO - Scraped data size: 1
+2025-01-20 01:46:09,111 - research - INFO - Content found for sub-query: 11485 chars
+2025-01-20 01:46:11,116 - research - INFO - Content found for sub-query: 10478 chars
+2025-01-20 01:46:13,632 - research - INFO - Content found for sub-query: 11596 chars
+2025-01-20 01:46:18,691 - research - INFO - Content found for sub-query: 11097 chars
+2025-01-20 01:46:21,107 - research - INFO - Content found for sub-query: 10857 chars
+2025-01-20 01:46:21,124 - research - INFO - Gathered context from 5 sub-queries
+2025-01-20 01:46:21,124 - research - INFO - Combined context size: 55517
+2025-01-20 01:46:21,139 - research - INFO - Research completed. Context size: 55517
+2025-01-20 01:47:02,796 - langchain_google_genai._function_utils - WARNING - Key 'title' is not supported in schema, ignoring
+2025-01-20 01:47:02,796 - langchain_google_genai._function_utils - WARNING - Key 'title' is not supported in schema, ignoring
+2025-01-20 01:57:12,262 - research - INFO - Starting research for query: What is the reason for rising dollar value and what role did Japan played into this?
+2025-01-20 01:57:12,297 - research - INFO - Using web search
+2025-01-20 01:57:12,297 - research - INFO - Starting web search for query: What is the reason for rising dollar value and what role did Japan played into this?
+2025-01-20 01:57:12,297 - research - INFO - Planning research for query: What is the reason for rising dollar value and what role did Japan played into this?
+2025-01-20 01:57:16,749 - research - INFO - Initial search results obtained: 7 results
+2025-01-20 01:57:20,133 - research - INFO - Research outline planned: ['Japanese yen and US dollar exchange rate trends 2022-2025', 'Impact of Japanese monetary policy on USD strength 2024-2025', 'Role of yen carry trade in recent dollar appreciation', 'Correlation between Japanese inflation and USD/JPY exchange rate']
+2025-01-20 01:57:20,134 - research - INFO - Generated sub-queries: ['Japanese yen and US dollar exchange rate trends 2022-2025', 'Impact of Japanese monetary policy on USD strength 2024-2025', 'Role of yen carry trade in recent dollar appreciation', 'Correlation between Japanese inflation and USD/JPY exchange rate']
+2025-01-20 01:57:25,822 - research - INFO - Scraped data size: 5
+2025-01-20 01:57:27,467 - research - INFO - Scraped data size: 4
+2025-01-20 01:57:30,994 - research - INFO - Scraped data size: 5
+2025-01-20 01:57:33,880 - research - INFO - Scraped data size: 4
+2025-01-20 01:57:35,187 - research - INFO - Scraped data size: 4
+2025-01-20 01:57:46,227 - research - INFO - Content found for sub-query: 11148 chars
+2025-01-20 01:57:52,378 - research - INFO - Content found for sub-query: 10490 chars
+2025-01-20 01:58:00,759 - research - INFO - Content found for sub-query: 11984 chars
+2025-01-20 01:58:12,833 - research - INFO - Content found for sub-query: 12076 chars
+2025-01-20 01:58:15,352 - research - INFO - Content found for sub-query: 12023 chars
+2025-01-20 01:58:15,371 - research - INFO - Gathered context from 5 sub-queries
+2025-01-20 01:58:15,371 - research - INFO - Combined context size: 57725
+2025-01-20 01:58:15,387 - research - INFO - Research completed. Context size: 57725
+2025-01-20 01:58:32,033 - research - INFO - Starting research for query: The Influence of Diverging Monetary Policies
+2025-01-20 01:58:32,066 - research - INFO - Using web search
+2025-01-20 01:58:32,066 - research - INFO - Starting web search for query: The Influence of Diverging Monetary Policies
+2025-01-20 01:58:32,066 - research - INFO - Planning research for query: The Influence of Diverging Monetary Policies
+2025-01-20 01:58:36,111 - research - INFO - Initial search results obtained: 7 results
+2025-01-20 01:58:39,542 - research - INFO - Research outline planned: ['US dollar appreciation and Japanese monetary policy divergence', 'Impact of Bank of Japan policies on USD/JPY exchange rate', 'Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025', 'Relationship between US dollar strength and Japanese Yen weakness since 2023']
+2025-01-20 01:58:39,543 - research - INFO - Generated sub-queries: ['US dollar appreciation and Japanese monetary policy divergence', 'Impact of Bank of Japan policies on USD/JPY exchange rate', 'Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025', 'Relationship between US dollar strength and Japanese Yen weakness since 2023']
+2025-01-20 01:58:44,393 - research - INFO - Scraped data size: 2
+2025-01-20 01:58:48,931 - research - INFO - Scraped data size: 4
+2025-01-20 01:58:52,489 - research - INFO - Scraped data size: 5
+2025-01-20 01:58:55,234 - research - INFO - Scraped data size: 4
+2025-01-20 01:58:57,805 - research - INFO - Content found for sub-query: 11837 chars
+2025-01-20 01:59:17,018 - research - INFO - Content found for sub-query: 11381 chars
+2025-01-20 01:59:19,361 - research - INFO - Content found for sub-query: 11275 chars
+2025-01-20 01:59:20,263 - research - INFO - Content found for sub-query: 11058 chars
+2025-01-20 01:59:20,281 - research - INFO - Gathered context from 4 sub-queries
+2025-01-20 01:59:20,283 - research - INFO - Combined context size: 45554
+2025-01-20 01:59:20,299 - research - INFO - Research completed. Context size: 45554
+2025-01-20 01:59:57,839 - research - INFO - Starting research for query: The Role of Japan's Economic Conditions and Policies
+2025-01-20 01:59:57,873 - research - INFO - Using web search
+2025-01-20 01:59:57,873 - research - INFO - Starting web search for query: The Role of Japan's Economic Conditions and Policies
+2025-01-20 01:59:57,873 - research - INFO - Planning research for query: The Role of Japan's Economic Conditions and Policies
+2025-01-20 02:00:02,555 - research - INFO - Initial search results obtained: 7 results
+2025-01-20 02:00:04,437 - research - INFO - Research outline planned: ["Japan's economic policies contributing to rising dollar value", 'Impact of Japanese monetary policy on USD/JPY exchange rate', "Correlation between Japan's economic stagnation and dollar appreciation", "Role of Japan's foreign economic policies in strengthening the US dollar"]
+2025-01-20 02:00:04,438 - research - INFO - Generated sub-queries: ["Japan's economic policies contributing to rising dollar value", 'Impact of Japanese monetary policy on USD/JPY exchange rate', "Correlation between Japan's economic stagnation and dollar appreciation", "Role of Japan's foreign economic policies in strengthening the US dollar"]
+2025-01-20 02:00:08,846 - research - INFO - Scraped data size: 5
+2025-01-20 02:00:26,779 - research - INFO - Scraped data size: 3
+2025-01-20 02:00:28,457 - research - INFO - Scraped data size: 4
+2025-01-20 02:00:32,911 - research - INFO - Scraped data size: 2
+2025-01-20 02:00:33,036 - research - INFO - Content found for sub-query: 10213 chars
+2025-01-20 02:00:36,322 - research - INFO - Content found for sub-query: 11034 chars
+2025-01-20 02:00:41,859 - research - INFO - Content found for sub-query: 10791 chars
+2025-01-20 02:00:55,306 - research - INFO - Content found for sub-query: 12793 chars
+2025-01-20 02:00:55,329 - research - INFO - Gathered context from 4 sub-queries
+2025-01-20 02:00:55,330 - research - INFO - Combined context size: 44834
+2025-01-20 02:00:55,351 - research - INFO - Research completed. Context size: 44834
+2025-01-20 02:01:35,726 - research - INFO - Starting research for query: Impact of Global Economic Factors and Carry Trade
+2025-01-20 02:01:35,766 - research - INFO - Using web search
+2025-01-20 02:01:35,767 - research - INFO - Starting web search for query: Impact of Global Economic Factors and Carry Trade
+2025-01-20 02:01:35,767 - research - INFO - Planning research for query: Impact of Global Economic Factors and Carry Trade
+2025-01-20 02:01:40,556 - research - INFO - Initial search results obtained: 7 results
+2025-01-20 02:01:43,596 - research - INFO - Research outline planned: ['rising dollar value 2025 global economic factors', 'Japan reverse carry trade impact on dollar 2025', 'yen carry trade unwinding effect on dollar appreciation', 'impact of global carry trades on USD exchange rates']
+2025-01-20 02:01:43,597 - research - INFO - Generated sub-queries: ['rising dollar value 2025 global economic factors', 'Japan reverse carry trade impact on dollar 2025', 'yen carry trade unwinding effect on dollar appreciation', 'impact of global carry trades on USD exchange rates']
+2025-01-20 02:01:49,316 - research - INFO - Scraped data size: 3
+2025-01-20 02:01:53,031 - research - INFO - Scraped data size: 5
+2025-01-20 02:01:55,273 - research - INFO - Scraped data size: 5
+2025-01-20 02:02:00,006 - research - INFO - Scraped data size: 4
+2025-01-20 02:02:09,055 - research - INFO - Content found for sub-query: 10885 chars
+2025-01-20 02:02:10,649 - research - INFO - Content found for sub-query: 11150 chars
+2025-01-20 02:02:23,024 - research - INFO - Content found for sub-query: 11257 chars
+2025-01-20 02:02:34,169 - research - INFO - Content found for sub-query: 11726 chars
+2025-01-20 02:02:34,193 - research - INFO - Gathered context from 4 sub-queries
+2025-01-20 02:02:34,193 - research - INFO - Combined context size: 45021
+2025-01-20 02:02:34,215 - research - INFO - Research completed. Context size: 45021
+2025-01-20 02:03:31,269 - langchain_google_genai._function_utils - WARNING - Key 'title' is not supported in schema, ignoring
+2025-01-20 02:03:31,270 - langchain_google_genai._function_utils - WARNING - Key 'title' is not supported in schema, ignoring
+2025-01-27 20:23:15,855 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-28 16:24:02,587 - research - INFO - Starting research for query: What is DeepSeek R1
+2025-01-28 16:24:02,615 - research - INFO - Using web search
+2025-01-28 16:24:02,615 - research - INFO - Starting web search for query: What is DeepSeek R1
+2025-01-28 16:24:02,615 - research - INFO - Planning research for query: What is DeepSeek R1
+2025-01-28 16:24:06,761 - research - INFO - Initial search results obtained: 7 results
+2025-01-28 16:24:09,866 - research - INFO - Research outline planned: ['DeepSeek R1 capabilities comparison OpenAI o1', 'DeepSeek R1 reinforcement learning training process XAI', 'DeepSeek R1 open-source license cost advantages', 'DeepSeek R1 performance benchmarks reasoning tasks datasets']
+2025-01-28 16:24:09,866 - research - INFO - Generated sub-queries: ['DeepSeek R1 capabilities comparison OpenAI o1', 'DeepSeek R1 reinforcement learning training process XAI', 'DeepSeek R1 open-source license cost advantages', 'DeepSeek R1 performance benchmarks reasoning tasks datasets']
+2025-01-28 16:24:13,848 - research - INFO - Scraped data size: 4
+2025-01-28 16:24:18,026 - research - INFO - Scraped data size: 5
+2025-01-28 16:24:19,732 - research - INFO - Scraped data size: 4
+2025-01-28 16:24:21,928 - research - INFO - Scraped data size: 3
+2025-01-28 16:24:23,515 - research - INFO - Scraped data size: 5
+2025-01-28 16:24:34,518 - research - INFO - Content found for sub-query: 11398 chars
+2025-01-28 16:24:37,685 - research - INFO - Content found for sub-query: 9270 chars
+2025-01-28 16:24:37,933 - research - INFO - Content found for sub-query: 11226 chars
+2025-01-28 16:24:39,478 - research - INFO - Content found for sub-query: 11818 chars
+2025-01-28 16:24:42,882 - research - INFO - Content found for sub-query: 11537 chars
+2025-01-28 16:24:42,903 - research - INFO - Gathered context from 5 sub-queries
+2025-01-28 16:24:42,904 - research - INFO - Combined context size: 55253
+2025-01-28 16:24:42,918 - research - INFO - Research completed. Context size: 55253
+2025-01-29 21:29:26,601 - research - INFO - Starting research for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 21:29:26,628 - research - INFO - Using web search
+2025-01-29 21:29:26,640 - research - INFO - Starting web search for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 21:29:26,642 - research - INFO - Planning research for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 21:29:31,634 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 21:29:35,487 - research - INFO - Research outline planned: ['"Deepseek v3 R1" model architecture and performance benchmarks', 'Impact of "Deepseek v3 R1" on AI industry competition and market share', '"Deepseek v3 R1" open-source code and community contributions analysis', '"Deepseek v3 R1" training costs and resource efficiency compared to competitors']
+2025-01-29 21:29:35,492 - research - INFO - Generated sub-queries: ['"Deepseek v3 R1" model architecture and performance benchmarks', 'Impact of "Deepseek v3 R1" on AI industry competition and market share', '"Deepseek v3 R1" open-source code and community contributions analysis', '"Deepseek v3 R1" training costs and resource efficiency compared to competitors']
+2025-01-29 21:29:40,573 - research - INFO - Scraped data size: 3
+2025-01-29 21:29:45,949 - research - INFO - Scraped data size: 3
+2025-01-29 21:29:53,049 - research - INFO - Scraped data size: 4
+2025-01-29 21:29:56,024 - research - INFO - Scraped data size: 2
+2025-01-29 21:30:00,716 - research - INFO - Scraped data size: 3
+2025-01-29 21:30:03,533 - research - INFO - Content found for sub-query: 11472 chars
+2025-01-29 21:30:03,910 - research - INFO - Content found for sub-query: 12051 chars
+2025-01-29 21:30:06,851 - research - INFO - Content found for sub-query: 11361 chars
+2025-01-29 21:30:13,783 - research - INFO - Content found for sub-query: 13910 chars
+2025-01-29 21:31:56,167 - research - INFO - Content found for sub-query: 11873 chars
+2025-01-29 21:31:56,222 - research - INFO - Gathered context from 5 sub-queries
+2025-01-29 21:31:56,222 - research - INFO - Combined context size: 60671
+2025-01-29 21:31:56,242 - research - INFO - Research completed. Context size: 60671
+2025-01-29 21:32:13,501 - research - INFO - Starting research for query: DeepSeek V3 R1: Architecture and Innovations
+2025-01-29 21:32:13,534 - research - INFO - Using web search
+2025-01-29 21:32:13,534 - research - INFO - Starting web search for query: DeepSeek V3 R1: Architecture and Innovations
+2025-01-29 21:32:13,534 - research - INFO - Planning research for query: DeepSeek V3 R1: Architecture and Innovations
+2025-01-29 21:32:17,736 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 21:32:20,067 - research - INFO - Research outline planned: ['"Deepseek v3 R1" architecture innovations', '"Deepseek v3 R1" impact AI industry "Multi-Head Latent Attention"', '"Deepseek v3 R1" performance benchmarks comparison "open-source"', '"Deepseek v3 R1" training efficiency cost analysis "Mixture-of-Experts"']
+2025-01-29 21:32:20,067 - research - INFO - Generated sub-queries: ['"Deepseek v3 R1" architecture innovations', '"Deepseek v3 R1" impact AI industry "Multi-Head Latent Attention"', '"Deepseek v3 R1" performance benchmarks comparison "open-source"', '"Deepseek v3 R1" training efficiency cost analysis "Mixture-of-Experts"']
+2025-01-29 21:32:24,720 - research - INFO - Scraped data size: 2
+2025-01-29 21:32:27,581 - research - INFO - Scraped data size: 4
+2025-01-29 21:32:29,324 - research - INFO - Scraped data size: 3
+2025-01-29 21:32:31,533 - research - INFO - Scraped data size: 4
+2025-01-29 21:32:31,627 - research - INFO - Content found for sub-query: 11707 chars
+2025-01-29 21:32:44,344 - research - INFO - Content found for sub-query: 11659 chars
+2025-01-29 21:32:49,633 - research - INFO - Content found for sub-query: 11818 chars
+2025-01-29 21:33:39,143 - research - INFO - Content found for sub-query: 12249 chars
+2025-01-29 21:33:39,174 - research - INFO - Gathered context from 4 sub-queries
+2025-01-29 21:33:39,175 - research - INFO - Combined context size: 47436
+2025-01-29 21:33:39,195 - research - INFO - Research completed. Context size: 47436
+2025-01-29 21:33:44,826 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 21:33:49,891 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 21:33:54,779 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 21:33:57,618 - research - INFO - Starting research for query: Performance Benchmarks and Impact on the AI Industry
+2025-01-29 21:33:57,657 - research - INFO - Using web search
+2025-01-29 21:33:57,658 - research - INFO - Starting web search for query: Performance Benchmarks and Impact on the AI Industry
+2025-01-29 21:33:57,658 - research - INFO - Planning research for query: Performance Benchmarks and Impact on the AI Industry
+2025-01-29 21:34:05,653 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 21:34:07,373 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 21:34:09,748 - gpt_researcher.actions.query_processing - WARNING - Error with strategic LLM: 429 Resource has been exhausted (e.g. check quota).. Retrying with max_tokens=4000.
+2025-01-29 21:34:09,748 - gpt_researcher.actions.query_processing - WARNING - See https://github.com/assafelovic/gpt-researcher/issues/1022
+2025-01-29 21:34:10,166 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 21:34:12,482 - gpt_researcher.actions.query_processing - WARNING - Retrying with max_tokens=4000 failed.
+2025-01-29 21:34:12,482 - gpt_researcher.actions.query_processing - WARNING - Error with strategic LLM: 429 Resource has been exhausted (e.g. check quota).. Falling back to smart LLM.
+2025-01-29 21:34:12,908 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 22:11:47,929 - research - INFO - Starting research for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 22:11:47,969 - research - INFO - Using web search
+2025-01-29 22:11:47,969 - research - INFO - Starting web search for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 22:11:47,970 - research - INFO - Planning research for query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.
+2025-01-29 22:11:55,045 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 22:11:57,377 - research - INFO - Research outline planned: ['Deepseek v3 R1 model architecture and performance benchmarks', "Impact of Deepseek's open-source model R1 on AI industry competition", 'Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models', 'Deepseek R1 cost analysis and implications for AI development accessibility']
+2025-01-29 22:11:57,379 - research - INFO - Generated sub-queries: ['Deepseek v3 R1 model architecture and performance benchmarks', "Impact of Deepseek's open-source model R1 on AI industry competition", 'Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models', 'Deepseek R1 cost analysis and implications for AI development accessibility']
+2025-01-29 22:12:00,482 - research - INFO - Scraped data size: 3
+2025-01-29 22:12:02,306 - research - INFO - Scraped data size: 5
+2025-01-29 22:12:02,685 - research - INFO - Scraped data size: 2
+2025-01-29 22:12:04,681 - research - INFO - Scraped data size: 5
+2025-01-29 22:12:11,250 - research - INFO - Scraped data size: 3
+2025-01-29 22:12:11,411 - research - INFO - Content found for sub-query: 11983 chars
+2025-01-29 22:12:12,340 - research - INFO - Content found for sub-query: 9910 chars
+2025-01-29 22:12:22,390 - research - INFO - Content found for sub-query: 10859 chars
+2025-01-29 22:12:30,714 - research - INFO - Content found for sub-query: 11472 chars
+2025-01-29 22:12:51,292 - research - INFO - Content found for sub-query: 10022 chars
+2025-01-29 22:12:51,315 - research - INFO - Gathered context from 5 sub-queries
+2025-01-29 22:12:51,316 - research - INFO - Combined context size: 54250
+2025-01-29 22:12:51,330 - research - INFO - Research completed. Context size: 54250
+2025-01-29 22:13:10,664 - research - INFO - Starting research for query: DeepSeek V3: Model Architecture and Performance
+2025-01-29 22:13:10,698 - research - INFO - Using web search
+2025-01-29 22:13:10,698 - research - INFO - Starting web search for query: DeepSeek V3: Model Architecture and Performance
+2025-01-29 22:13:10,699 - research - INFO - Planning research for query: DeepSeek V3: Model Architecture and Performance
+2025-01-29 22:13:15,835 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 22:13:19,183 - research - INFO - Research outline planned: ['"DeepSeek V3 R1" architecture performance benchmarks', '"DeepSeek V3 R1" impact AI industry applications', '"DeepSeek V3 R1" training methodology multi-token prediction MoE', '"DeepSeek V3 R1" comparison GPT-4 Claude 3.5 cost efficiency']
+2025-01-29 22:13:19,183 - research - INFO - Generated sub-queries: ['"DeepSeek V3 R1" architecture performance benchmarks', '"DeepSeek V3 R1" impact AI industry applications', '"DeepSeek V3 R1" training methodology multi-token prediction MoE', '"DeepSeek V3 R1" comparison GPT-4 Claude 3.5 cost efficiency']
+2025-01-29 22:13:25,197 - research - INFO - Scraped data size: 3
+2025-01-29 22:13:27,000 - research - INFO - Scraped data size: 4
+2025-01-29 22:13:28,859 - research - INFO - Scraped data size: 4
+2025-01-29 22:13:30,812 - research - INFO - Scraped data size: 4
+2025-01-29 22:13:36,480 - research - INFO - Content found for sub-query: 12342 chars
+2025-01-29 22:13:41,994 - research - INFO - Content found for sub-query: 11200 chars
+2025-01-29 22:13:46,986 - research - INFO - Content found for sub-query: 10948 chars
+2025-01-29 22:13:49,496 - research - INFO - Content found for sub-query: 11475 chars
+2025-01-29 22:13:49,523 - research - INFO - Gathered context from 4 sub-queries
+2025-01-29 22:13:49,524 - research - INFO - Combined context size: 45968
+2025-01-29 22:13:49,544 - research - INFO - Research completed. Context size: 45968
+2025-01-29 22:14:32,652 - research - INFO - Starting research for query: Impact on the AI Industry: Cost Efficiency and Democratization
+2025-01-29 22:14:32,689 - research - INFO - Using web search
+2025-01-29 22:14:32,689 - research - INFO - Starting web search for query: Impact on the AI Industry: Cost Efficiency and Democratization
+2025-01-29 22:14:32,689 - research - INFO - Planning research for query: Impact on the AI Industry: Cost Efficiency and Democratization
+2025-01-29 22:14:37,170 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 22:14:40,445 - research - INFO - Research outline planned: ['"Deepseek v3 R1" cost efficiency analysis', '"Deepseek v3 R1" democratization impact case studies', '"Deepseek v3 R1" comparison with other AI models cost and accessibility', '"Deepseek v3 R1" impact on AI development barriers']
+2025-01-29 22:14:40,445 - research - INFO - Generated sub-queries: ['"Deepseek v3 R1" cost efficiency analysis', '"Deepseek v3 R1" democratization impact case studies', '"Deepseek v3 R1" comparison with other AI models cost and accessibility', '"Deepseek v3 R1" impact on AI development barriers']
+2025-01-29 22:14:51,396 - research - INFO - Scraped data size: 3
+2025-01-29 22:14:54,923 - research - INFO - Scraped data size: 5
+2025-01-29 22:14:55,939 - research - INFO - Scraped data size: 2
+2025-01-29 22:14:58,986 - research - INFO - Scraped data size: 1
+2025-01-29 22:15:01,168 - research - INFO - Content found for sub-query: 10777 chars
+2025-01-29 22:15:07,937 - research - INFO - Content found for sub-query: 8621 chars
+2025-01-29 22:15:24,386 - research - INFO - Content found for sub-query: 11526 chars
+2025-01-29 22:15:48,087 - research - INFO - Content found for sub-query: 10174 chars
+2025-01-29 22:15:48,112 - research - INFO - Gathered context from 4 sub-queries
+2025-01-29 22:15:48,112 - research - INFO - Combined context size: 41101
+2025-01-29 22:15:48,140 - research - INFO - Research completed. Context size: 41101
+2025-01-29 22:16:21,633 - research - INFO - Starting research for query: Comparison with DeepSeek R1 and Other Models
+2025-01-29 22:16:21,677 - research - INFO - Using web search
+2025-01-29 22:16:21,678 - research - INFO - Starting web search for query: Comparison with DeepSeek R1 and Other Models
+2025-01-29 22:16:21,678 - research - INFO - Planning research for query: Comparison with DeepSeek R1 and Other Models
+2025-01-29 22:16:25,912 - research - INFO - Initial search results obtained: 7 results
+2025-01-29 22:16:29,686 - research - INFO - Research outline planned: ['Deepseek v3 R1 model specifications, benchmarks, capabilities', 'Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features', 'Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost', 'Deepseek v3 R1 impact AI industry applications, research, trends']
+2025-01-29 22:16:29,686 - research - INFO - Generated sub-queries: ['Deepseek v3 R1 model specifications, benchmarks, capabilities', 'Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features', 'Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost', 'Deepseek v3 R1 impact AI industry applications, research, trends']
+2025-01-29 22:16:33,100 - research - INFO - Scraped data size: 5
+2025-01-29 22:16:33,948 - research - INFO - Scraped data size: 5
+2025-01-29 22:16:35,453 - research - INFO - Scraped data size: 4
+2025-01-29 22:16:38,378 - research - INFO - Scraped data size: 4
+2025-01-29 22:16:55,055 - research - INFO - Content found for sub-query: 9041 chars
+2025-01-29 22:17:02,850 - research - INFO - Content found for sub-query: 10460 chars
+2025-01-29 22:17:04,426 - research - INFO - Content found for sub-query: 10801 chars
+2025-01-29 22:17:18,044 - research - INFO - Content found for sub-query: 9229 chars
+2025-01-29 22:17:18,101 - research - INFO - Gathered context from 4 sub-queries
+2025-01-29 22:17:18,102 - research - INFO - Combined context size: 39534
+2025-01-29 22:17:18,129 - research - INFO - Research completed. Context size: 39534
+2025-01-29 22:17:28,142 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 22:17:31,731 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-01-29 22:17:36,287 - langchain_google_genai.chat_models - WARNING - Retrying langchain_google_genai.chat_models._achat_with_retry.._achat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota)..
+2025-02-03 04:58:29,393 - research - INFO - Starting research for query: How did world acceptance corp perform during its december ending period Q3 2025
+2025-02-03 04:58:29,413 - research - INFO - Using web search
+2025-02-03 04:58:29,414 - research - INFO - Starting web search for query: How did world acceptance corp perform during its december ending period Q3 2025
+2025-02-03 04:58:29,414 - research - INFO - Planning research for query: How did world acceptance corp perform during its december ending period Q3 2025
+2025-02-03 04:58:33,566 - research - INFO - Initial search results obtained: 7 results
+2025-02-03 04:58:36,757 - research - INFO - Research outline planned: ['World Acceptance Corporation Q3 2025 earnings report', 'WRLD Q3 2025 financial results', 'World Acceptance Corp investor relations Q3 2025', 'World Acceptance Corporation Q3 2025 SEC filings']
+2025-02-03 04:58:36,757 - research - INFO - Generated sub-queries: ['World Acceptance Corporation Q3 2025 earnings report', 'WRLD Q3 2025 financial results', 'World Acceptance Corp investor relations Q3 2025', 'World Acceptance Corporation Q3 2025 SEC filings']
+2025-02-03 04:58:40,759 - research - INFO - Scraped data size: 5
+2025-02-03 04:58:43,073 - research - INFO - Scraped data size: 2
+2025-02-03 04:58:47,349 - research - INFO - Scraped data size: 2
+2025-02-03 04:58:50,963 - research - INFO - Scraped data size: 2
+2025-02-03 04:58:52,428 - research - INFO - Scraped data size: 1
+2025-02-03 04:58:54,832 - research - INFO - Content found for sub-query: 11016 chars
+2025-02-03 04:59:00,467 - research - INFO - Content found for sub-query: 11027 chars
+2025-02-03 04:59:09,197 - research - INFO - Content found for sub-query: 11681 chars
+2025-02-03 04:59:09,835 - research - INFO - Content found for sub-query: 10708 chars
+2025-02-03 04:59:28,071 - research - INFO - Content found for sub-query: 10206 chars
+2025-02-03 04:59:28,087 - research - INFO - Gathered context from 5 sub-queries
+2025-02-03 04:59:28,088 - research - INFO - Combined context size: 54642
+2025-02-03 04:59:28,099 - research - INFO - Research completed. Context size: 54642
+2025-02-03 11:03:43,287 - research - INFO - Starting research for query: What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed
+2025-02-03 11:03:43,305 - research - INFO - Using web search
+2025-02-03 11:03:43,306 - research - INFO - Starting web search for query: What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed
+2025-02-03 11:03:43,306 - research - INFO - Planning research for query: What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed
+2025-02-03 11:03:48,067 - research - INFO - Initial search results obtained: 7 results
+2025-02-03 11:03:51,487 - research - INFO - Research outline planned: ['distillation in large language models knowledge transfer techniques', 'deepseek r1 training process reinforcement learning from deepseek v3 base', 'deepseek r1 training cost analysis compared to openai models', 'flaws in arguments about ai industry panic due to deepseek r1 cost']
+2025-02-03 11:03:51,488 - research - INFO - Generated sub-queries: ['distillation in large language models knowledge transfer techniques', 'deepseek r1 training process reinforcement learning from deepseek v3 base', 'deepseek r1 training cost analysis compared to openai models', 'flaws in arguments about ai industry panic due to deepseek r1 cost']
+2025-02-03 11:03:58,620 - research - INFO - Scraped data size: 4
+2025-02-03 11:04:00,677 - research - INFO - Scraped data size: 5
+2025-02-03 11:04:02,028 - research - INFO - Scraped data size: 5
+2025-02-03 11:04:03,669 - research - INFO - Scraped data size: 4
+2025-02-03 11:04:04,249 - research - INFO - Scraped data size: 2
+2025-02-03 11:04:12,137 - research - INFO - Content found for sub-query: 12563 chars
+2025-02-03 11:04:14,327 - research - INFO - Content found for sub-query: 12242 chars
+2025-02-03 11:04:18,974 - research - INFO - Content found for sub-query: 10764 chars
+2025-02-03 11:04:23,186 - research - INFO - Content found for sub-query: 8931 chars
+2025-02-03 11:04:31,452 - research - INFO - Content found for sub-query: 12065 chars
+2025-02-03 11:04:31,469 - research - INFO - Gathered context from 5 sub-queries
+2025-02-03 11:04:31,470 - research - INFO - Combined context size: 56569
+2025-02-03 11:04:31,484 - research - INFO - Research completed. Context size: 56569
+2025-02-03 16:46:17,552 - research - INFO - Starting research for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:46:17,587 - research - INFO - Using web search
+2025-02-03 16:46:17,588 - research - INFO - Starting web search for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:46:17,588 - research - INFO - Planning research for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:46:21,777 - research - INFO - Initial search results obtained: 7 results
+2025-02-03 16:46:25,028 - research - INFO - Research outline planned: ['US economy news banking sector February 2025', 'Latest developments US banking sector interest rates February 2025', 'Impact of 2024 banking crisis on current US economy', 'Current state of major US banks financial performance']
+2025-02-03 16:46:25,029 - research - INFO - Generated sub-queries: ['US economy news banking sector February 2025', 'Latest developments US banking sector interest rates February 2025', 'Impact of 2024 banking crisis on current US economy', 'Current state of major US banks financial performance']
+2025-02-03 16:46:28,717 - research - INFO - Scraped data size: 4
+2025-02-03 16:46:31,239 - research - INFO - Scraped data size: 3
+2025-02-03 16:46:32,482 - research - INFO - Scraped data size: 3
+2025-02-03 16:46:38,166 - research - INFO - Scraped data size: 1
+2025-02-03 16:46:39,949 - research - INFO - Scraped data size: 3
+2025-02-03 16:46:41,469 - research - INFO - Content found for sub-query: 11838 chars
+2025-02-03 16:46:46,622 - research - INFO - Content found for sub-query: 10137 chars
+2025-02-03 16:46:52,035 - research - INFO - Content found for sub-query: 11190 chars
+2025-02-03 16:47:02,151 - research - INFO - Content found for sub-query: 10576 chars
+2025-02-03 16:47:33,847 - research - INFO - Content found for sub-query: 11663 chars
+2025-02-03 16:47:33,865 - research - INFO - Gathered context from 5 sub-queries
+2025-02-03 16:47:33,865 - research - INFO - Combined context size: 55408
+2025-02-03 16:47:33,880 - research - INFO - Research completed. Context size: 55408
+2025-02-03 16:58:15,900 - research - INFO - Starting research for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:58:15,920 - research - INFO - Using web search
+2025-02-03 16:58:15,920 - research - INFO - Starting web search for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:58:15,922 - research - INFO - Planning research for query: Whats the latest happening news in US economy and related to Banking sector
+2025-02-03 16:58:18,056 - research - INFO - Initial search results obtained: 7 results
+2025-02-03 16:58:20,007 - research - INFO - Research outline planned: ['US economy news February 2025 banking sector', 'latest developments US banking sector February 2025', 'Federal Reserve interest rate policy impact on banks 2025', 'US bank earnings reports Q1 2025']
+2025-02-03 16:58:20,007 - research - INFO - Generated sub-queries: ['US economy news February 2025 banking sector', 'latest developments US banking sector February 2025', 'Federal Reserve interest rate policy impact on banks 2025', 'US bank earnings reports Q1 2025']
+2025-02-03 16:58:24,311 - research - INFO - Scraped data size: 5
+2025-02-03 16:58:27,172 - research - INFO - Scraped data size: 5
+2025-02-03 16:58:31,113 - research - INFO - Scraped data size: 5
+2025-02-03 16:58:32,655 - research - INFO - Scraped data size: 3
+2025-02-03 16:58:34,494 - research - INFO - Scraped data size: 5
+2025-02-03 16:58:54,016 - research - INFO - Content found for sub-query: 10576 chars
+2025-02-03 16:58:57,591 - research - INFO - Content found for sub-query: 11261 chars
+2025-02-03 16:58:57,734 - research - INFO - Content found for sub-query: 12201 chars
+2025-02-03 16:58:58,316 - research - INFO - Content found for sub-query: 10809 chars
+2025-02-03 16:59:47,329 - research - INFO - Content found for sub-query: 11663 chars
+2025-02-03 16:59:47,349 - research - INFO - Gathered context from 5 sub-queries
+2025-02-03 16:59:47,349 - research - INFO - Combined context size: 56514
+2025-02-03 16:59:47,362 - research - INFO - Research completed. Context size: 56514
diff --git a/main.py b/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdcb3da0d4f8b59dad38b63b73c8d0e36247b177
--- /dev/null
+++ b/main.py
@@ -0,0 +1,37 @@
+from dotenv import load_dotenv
+import logging
+from pathlib import Path
+
+# Create logs directory if it doesn't exist
+logs_dir = Path("logs")
+logs_dir.mkdir(exist_ok=True)
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ handlers=[
+ # File handler for general application logs
+ logging.FileHandler('logs/app.log'),
+ # Stream handler for console output
+ logging.StreamHandler()
+ ]
+)
+
+# Suppress verbose fontTools logging
+logging.getLogger('fontTools').setLevel(logging.WARNING)
+logging.getLogger('fontTools.subset').setLevel(logging.WARNING)
+logging.getLogger('fontTools.ttLib').setLevel(logging.WARNING)
+
+# Create logger instance
+logger = logging.getLogger(__name__)
+
+load_dotenv()
+
+from backend.server.server import app
+
+if __name__ == "__main__":
+ import uvicorn
+
+ logger.info("Starting server...")
+ uvicorn.run(app, host="0.0.0.0", port=8000)
\ No newline at end of file
diff --git a/multi_agents/README.md b/multi_agents/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..17d76ea306d9ec2efafa6d58d93ffbd6bea009ca
--- /dev/null
+++ b/multi_agents/README.md
@@ -0,0 +1,107 @@
+# LangGraph x GPT Researcher
+[LangGraph](https://python.langchain.com/docs/langgraph) is a library for building stateful, multi-actor applications with LLMs.
+This example uses Langgraph to automate the process of an in depth research on any given topic.
+
+## Use case
+By using Langgraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills.
+Inspired by the recent [STORM](https://arxiv.org/abs/2402.14207) paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication.
+
+An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown.
+
+Please note: This example uses the OpenAI API only for optimized performance.
+
+## The Multi Agent Team
+The research team is made up of 8 agents:
+- **Human** - The human in the loop that oversees the process and provides feedback to the agents.
+- **Chief Editor** - Oversees the research process and manages the team. This is the "master" agent that coordinates the other agents using Langgraph.
+- **Researcher** (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic.
+- **Editor** - Responsible for planning the research outline and structure.
+- **Reviewer** - Validates the correctness of the research results given a set of criteria.
+- **Revisor** - Revises the research results based on the feedback from the reviewer.
+- **Writer** - Responsible for compiling and writing the final report.
+- **Publisher** - Responsible for publishing the final report in various formats.
+
+## How it works
+Generally, the process is based on the following stages:
+1. Planning stage
+2. Data collection and analysis
+3. Review and revision
+4. Writing and submission
+5. Publication
+
+### Architecture
+
+
+
+
+
+### Steps
+More specifically (as seen in the architecture diagram) the process is as follows:
+- Browser (gpt-researcher) - Browses the internet for initial research based on the given research task.
+- Editor - Plans the report outline and structure based on the initial research.
+- For each outline topic (in parallel):
+ - Researcher (gpt-researcher) - Runs an in depth research on the subtopics and writes a draft.
+ - Reviewer - Validates the correctness of the draft given a set of criteria and provides feedback.
+ - Revisor - Revises the draft until it is satisfactory based on the reviewer feedback.
+- Writer - Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.
+- Publisher - Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.
+
+## How to run
+1. Install required packages:
+ ```bash
+ pip install -r requirements.txt
+ ```
+3. Update env variables
+ ```bash
+ export OPENAI_API_KEY={Your OpenAI API Key here}
+ export TAVILY_API_KEY={Your Tavily API Key here}
+ ```
+2. Run the application:
+ ```bash
+ python main.py
+ ```
+
+## Usage
+To change the research query and customize the report, edit the `task.json` file in the main directory.
+#### Task.json contains the following fields:
+- `query` - The research query or task.
+- `model` - The OpenAI LLM to use for the agents.
+- `max_sections` - The maximum number of sections in the report. Each section is a subtopic of the research query.
+- `include_human_feedback` - If true, the user can provide feedback to the agents. If false, the agents will work autonomously.
+- `publish_formats` - The formats to publish the report in. The reports will be written in the `output` directory.
+- `source` - The location from which to conduct the research. Options: `web` or `local`. For local, please add `DOC_PATH` env var.
+- `follow_guidelines` - If true, the research report will follow the guidelines below. It will take longer to complete. If false, the report will be generated faster but may not follow the guidelines.
+- `guidelines` - A list of guidelines that the report must follow.
+- `verbose` - If true, the application will print detailed logs to the console.
+
+#### For example:
+```json
+{
+ "query": "Is AI in a hype cycle?",
+ "model": "gpt-4o",
+ "max_sections": 3,
+ "publish_formats": {
+ "markdown": true,
+ "pdf": true,
+ "docx": true
+ },
+ "include_human_feedback": false,
+ "source": "web",
+ "follow_guidelines": true,
+ "guidelines": [
+ "The report MUST fully answer the original question",
+ "The report MUST be written in apa format",
+ "The report MUST be written in english"
+ ],
+ "verbose": true
+}
+```
+
+## To Deploy
+
+```shell
+pip install langgraph-cli
+langgraph up
+```
+
+From there, see documentation [here](https://github.com/langchain-ai/langgraph-example) on how to use the streaming and async endpoints, as well as the playground.
diff --git a/multi_agents/__init__.py b/multi_agents/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c9ec03f5b878cc9997557a72f73ed8171d94ab6
--- /dev/null
+++ b/multi_agents/__init__.py
@@ -0,0 +1,27 @@
+# multi_agents/__init__.py
+
+from .agents import (
+ ResearchAgent,
+ WriterAgent,
+ PublisherAgent,
+ ReviserAgent,
+ ReviewerAgent,
+ EditorAgent,
+ ChiefEditorAgent
+)
+from .memory import (
+ DraftState,
+ ResearchState
+)
+
+__all__ = [
+ "ResearchAgent",
+ "WriterAgent",
+ "PublisherAgent",
+ "ReviserAgent",
+ "ReviewerAgent",
+ "EditorAgent",
+ "ChiefEditorAgent",
+ "DraftState",
+ "ResearchState"
+]
\ No newline at end of file
diff --git a/multi_agents/__pycache__/__init__.cpython-312.pyc b/multi_agents/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e02ccff7a0a73d21092cfe76735d0d36e473744e
Binary files /dev/null and b/multi_agents/__pycache__/__init__.cpython-312.pyc differ
diff --git a/multi_agents/__pycache__/main.cpython-312.pyc b/multi_agents/__pycache__/main.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a89cd2aec2e1b880ec8ff17a25cb4184a920adbd
Binary files /dev/null and b/multi_agents/__pycache__/main.cpython-312.pyc differ
diff --git a/multi_agents/agent.py b/multi_agents/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..9656116703fa460d9d8d7708e983ffb12f5bfe3d
--- /dev/null
+++ b/multi_agents/agent.py
@@ -0,0 +1,16 @@
+from multi_agents.agents import ChiefEditorAgent
+
+chief_editor = ChiefEditorAgent({
+ "query": "Is AI in a hype cycle?",
+ "max_sections": 3,
+ "follow_guidelines": False,
+ "model": "gpt-4o",
+ "guidelines": [
+ "The report MUST be written in APA format",
+ "Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
+ "The report MUST be written in spanish"
+ ],
+ "verbose": False
+}, websocket=None, stream_output=None)
+graph = chief_editor.init_research_team()
+graph = graph.compile()
\ No newline at end of file
diff --git a/multi_agents/agents/__init__.py b/multi_agents/agents/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..951033cfc71abab0ad0aaa61b1fa557d404a305d
--- /dev/null
+++ b/multi_agents/agents/__init__.py
@@ -0,0 +1,21 @@
+from .researcher import ResearchAgent
+from .writer import WriterAgent
+from .publisher import PublisherAgent
+from .reviser import ReviserAgent
+from .reviewer import ReviewerAgent
+from .editor import EditorAgent
+from .human import HumanAgent
+
+# Below import should remain last since it imports all of the above
+from .orchestrator import ChiefEditorAgent
+
+__all__ = [
+ "ChiefEditorAgent",
+ "ResearchAgent",
+ "WriterAgent",
+ "EditorAgent",
+ "PublisherAgent",
+ "ReviserAgent",
+ "ReviewerAgent",
+ "HumanAgent"
+]
diff --git a/multi_agents/agents/__pycache__/__init__.cpython-312.pyc b/multi_agents/agents/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64d0be41fa6a26756c69a719301639d9d6d53db0
Binary files /dev/null and b/multi_agents/agents/__pycache__/__init__.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/editor.cpython-312.pyc b/multi_agents/agents/__pycache__/editor.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ababbb419db464c96ec55f69db184e7020e2a4f
Binary files /dev/null and b/multi_agents/agents/__pycache__/editor.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/human.cpython-312.pyc b/multi_agents/agents/__pycache__/human.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..43d59b61e4fc7792ec1584500c15a77013e6231a
Binary files /dev/null and b/multi_agents/agents/__pycache__/human.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/orchestrator.cpython-312.pyc b/multi_agents/agents/__pycache__/orchestrator.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1046858cff759dfd3c2b57218ef1392d4d87cd63
Binary files /dev/null and b/multi_agents/agents/__pycache__/orchestrator.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/publisher.cpython-312.pyc b/multi_agents/agents/__pycache__/publisher.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9f30276321ed2c34c5b64a82f642968c49e7788
Binary files /dev/null and b/multi_agents/agents/__pycache__/publisher.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/researcher.cpython-312.pyc b/multi_agents/agents/__pycache__/researcher.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff687b5c2cf71e072de844a989c292417e80be40
Binary files /dev/null and b/multi_agents/agents/__pycache__/researcher.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/reviewer.cpython-312.pyc b/multi_agents/agents/__pycache__/reviewer.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba4b27f3fd078e75cd24afff4b0834ac9014dea6
Binary files /dev/null and b/multi_agents/agents/__pycache__/reviewer.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/reviser.cpython-312.pyc b/multi_agents/agents/__pycache__/reviser.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68ab51f338292a191da14d0266c1e0ef5ca1e9b6
Binary files /dev/null and b/multi_agents/agents/__pycache__/reviser.cpython-312.pyc differ
diff --git a/multi_agents/agents/__pycache__/writer.cpython-312.pyc b/multi_agents/agents/__pycache__/writer.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c6ae9ca34f22ccee03b571715ba7a5388aa024c
Binary files /dev/null and b/multi_agents/agents/__pycache__/writer.cpython-312.pyc differ
diff --git a/multi_agents/agents/editor.py b/multi_agents/agents/editor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee2f2edaed5045744f2e105eb44ec7af9308f1a6
--- /dev/null
+++ b/multi_agents/agents/editor.py
@@ -0,0 +1,167 @@
+from datetime import datetime
+import asyncio
+from typing import Dict, List, Optional
+
+from langgraph.graph import StateGraph, END
+
+from .utils.views import print_agent_output
+from .utils.llms import call_model
+from ..memory.draft import DraftState
+from . import ResearchAgent, ReviewerAgent, ReviserAgent
+
+
+class EditorAgent:
+ """Agent responsible for editing and managing code."""
+
+ def __init__(self, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+
+ async def plan_research(self, research_state: Dict[str, any]) -> Dict[str, any]:
+ """
+ Plan the research outline based on initial research and task parameters.
+
+ :param research_state: Dictionary containing research state information
+ :return: Dictionary with title, date, and planned sections
+ """
+ initial_research = research_state.get("initial_research")
+ task = research_state.get("task")
+ include_human_feedback = task.get("include_human_feedback")
+ human_feedback = research_state.get("human_feedback")
+ max_sections = task.get("max_sections")
+
+ prompt = self._create_planning_prompt(
+ initial_research, include_human_feedback, human_feedback, max_sections)
+
+ print_agent_output(
+ "Planning an outline layout based on initial research...", agent="EDITOR")
+ plan = await call_model(
+ prompt=prompt,
+ model=task.get("model"),
+ response_format="json",
+ )
+
+ return {
+ "title": plan.get("title"),
+ "date": plan.get("date"),
+ "sections": plan.get("sections"),
+ }
+
+ async def run_parallel_research(self, research_state: Dict[str, any]) -> Dict[str, List[str]]:
+ """
+ Execute parallel research tasks for each section.
+
+ :param research_state: Dictionary containing research state information
+ :return: Dictionary with research results
+ """
+ agents = self._initialize_agents()
+ workflow = self._create_workflow()
+ chain = workflow.compile()
+
+ queries = research_state.get("sections")
+ title = research_state.get("title")
+
+ self._log_parallel_research(queries)
+
+ final_drafts = [
+ chain.ainvoke(self._create_task_input(
+ research_state, query, title))
+ for query in queries
+ ]
+ research_results = [
+ result["draft"] for result in await asyncio.gather(*final_drafts)
+ ]
+
+ return {"research_data": research_results}
+
+ def _create_planning_prompt(self, initial_research: str, include_human_feedback: bool,
+ human_feedback: Optional[str], max_sections: int) -> List[Dict[str, str]]:
+ """Create the prompt for research planning."""
+ return [
+ {
+ "role": "system",
+ "content": "You are a research editor. Your goal is to oversee the research project "
+ "from inception to completion. Your main task is to plan the article section "
+ "layout based on an initial research summary.\n ",
+ },
+ {
+ "role": "user",
+ "content": self._format_planning_instructions(initial_research, include_human_feedback,
+ human_feedback, max_sections),
+ },
+ ]
+
+ def _format_planning_instructions(self, initial_research: str, include_human_feedback: bool,
+ human_feedback: Optional[str], max_sections: int) -> str:
+ """Format the instructions for research planning."""
+ today = datetime.now().strftime('%d/%m/%Y')
+ feedback_instruction = (
+ f"Human feedback: {human_feedback}. You must plan the sections based on the human feedback."
+ if include_human_feedback and human_feedback and human_feedback != 'no'
+ else ''
+ )
+
+ return f"""Today's date is {today}
+ Research summary report: '{initial_research}'
+ {feedback_instruction}
+ \nYour task is to generate an outline of sections headers for the research project
+ based on the research summary report above.
+ You must generate a maximum of {max_sections} section headers.
+ You must focus ONLY on related research topics for subheaders and do NOT include introduction, conclusion and references.
+ You must return nothing but a JSON with the fields 'title' (str) and
+ 'sections' (maximum {max_sections} section headers) with the following structure:
+ '{{title: string research title, date: today's date,
+ sections: ['section header 1', 'section header 2', 'section header 3' ...]}}'."""
+
+ def _initialize_agents(self) -> Dict[str, any]:
+ """Initialize the research, reviewer, and reviser skills."""
+ return {
+ "research": ResearchAgent(self.websocket, self.stream_output, self.headers),
+ "reviewer": ReviewerAgent(self.websocket, self.stream_output, self.headers),
+ "reviser": ReviserAgent(self.websocket, self.stream_output, self.headers),
+ }
+
+ def _create_workflow(self) -> StateGraph:
+ """Create the workflow for the research process."""
+ agents = self._initialize_agents()
+ workflow = StateGraph(DraftState)
+
+ workflow.add_node("researcher", agents["research"].run_depth_research)
+ workflow.add_node("reviewer", agents["reviewer"].run)
+ workflow.add_node("reviser", agents["reviser"].run)
+
+ workflow.set_entry_point("researcher")
+ workflow.add_edge("researcher", "reviewer")
+ workflow.add_edge("reviser", "reviewer")
+ workflow.add_conditional_edges(
+ "reviewer",
+ lambda draft: "accept" if draft["review"] is None else "revise",
+ {"accept": END, "revise": "reviser"},
+ )
+
+ return workflow
+
+ def _log_parallel_research(self, queries: List[str]) -> None:
+ """Log the start of parallel research tasks."""
+ if self.websocket and self.stream_output:
+ asyncio.create_task(self.stream_output(
+ "logs",
+ "parallel_research",
+ f"Running parallel research for the following queries: {queries}",
+ self.websocket,
+ ))
+ else:
+ print_agent_output(
+ f"Running the following research tasks in parallel: {queries}...",
+ agent="EDITOR",
+ )
+
+ def _create_task_input(self, research_state: Dict[str, any], query: str, title: str) -> Dict[str, any]:
+ """Create the input for a single research task."""
+ return {
+ "task": research_state.get("task"),
+ "topic": query,
+ "title": title,
+ "headers": self.headers,
+ }
diff --git a/multi_agents/agents/human.py b/multi_agents/agents/human.py
new file mode 100644
index 0000000000000000000000000000000000000000..81ebbb9b51ca43b89fd30f215f85d8f2a5b1b4ca
--- /dev/null
+++ b/multi_agents/agents/human.py
@@ -0,0 +1,51 @@
+import json
+
+
+class HumanAgent:
+ def __init__(self, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+
+ async def review_plan(self, research_state: dict):
+ print(f"HumanAgent websocket: {self.websocket}")
+ print(f"HumanAgent stream_output: {self.stream_output}")
+ task = research_state.get("task")
+ layout = research_state.get("sections")
+
+ user_feedback = None
+
+ if task.get("include_human_feedback"):
+ # Stream response to the user if a websocket is provided (such as from web app)
+ if self.websocket and self.stream_output:
+ try:
+ await self.stream_output(
+ "human_feedback",
+ "request",
+ f"Any feedback on this plan of topics to research? {layout}? If not, please reply with 'no'.",
+ self.websocket,
+ )
+ response = await self.websocket.receive_text()
+ print(f"Received response: {response}", flush=True)
+ response_data = json.loads(response)
+ if response_data.get("type") == "human_feedback":
+ user_feedback = response_data.get("content")
+ else:
+ print(
+ f"Unexpected response type: {response_data.get('type')}",
+ flush=True,
+ )
+ except Exception as e:
+ print(f"Error receiving human feedback: {e}", flush=True)
+ # Otherwise, prompt the user for feedback in the console
+ else:
+ user_feedback = input(
+ f"Any feedback on this plan? {layout}? If not, please reply with 'no'.\n>> "
+ )
+
+ if user_feedback and "no" in user_feedback.strip().lower():
+ user_feedback = None
+
+ print(f"User feedback before return: {user_feedback}")
+
+ return {"human_feedback": user_feedback}
diff --git a/multi_agents/agents/orchestrator.py b/multi_agents/agents/orchestrator.py
new file mode 100644
index 0000000000000000000000000000000000000000..da7a326fe8c676471bcf8c5e0663121526a98b49
--- /dev/null
+++ b/multi_agents/agents/orchestrator.py
@@ -0,0 +1,118 @@
+import os
+import time
+import datetime
+from langgraph.graph import StateGraph, END
+# from langgraph.checkpoint.memory import MemorySaver
+from .utils.views import print_agent_output
+from ..memory.research import ResearchState
+from .utils.utils import sanitize_filename
+
+# Import agent classes
+from . import \
+ WriterAgent, \
+ EditorAgent, \
+ PublisherAgent, \
+ ResearchAgent, \
+ HumanAgent
+
+
+class ChiefEditorAgent:
+ """Agent responsible for managing and coordinating editing tasks."""
+
+ def __init__(self, task: dict, websocket=None, stream_output=None, tone=None, headers=None):
+ self.task = task
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+ self.tone = tone
+ self.task_id = self._generate_task_id()
+ self.output_dir = self._create_output_directory()
+
+ def _generate_task_id(self):
+ # Currently time based, but can be any unique identifier
+ return int(time.time())
+
+ def _create_output_directory(self):
+ output_dir = "./outputs/" + \
+ sanitize_filename(
+ f"run_{self.task_id}_{self.task.get('query')[0:40]}")
+
+ os.makedirs(output_dir, exist_ok=True)
+ return output_dir
+
+ def _initialize_agents(self):
+ return {
+ "writer": WriterAgent(self.websocket, self.stream_output, self.headers),
+ "editor": EditorAgent(self.websocket, self.stream_output, self.headers),
+ "research": ResearchAgent(self.websocket, self.stream_output, self.tone, self.headers),
+ "publisher": PublisherAgent(self.output_dir, self.websocket, self.stream_output, self.headers),
+ "human": HumanAgent(self.websocket, self.stream_output, self.headers)
+ }
+
+ def _create_workflow(self, agents):
+ workflow = StateGraph(ResearchState)
+
+ # Add nodes for each agent
+ workflow.add_node("browser", agents["research"].run_initial_research)
+ workflow.add_node("planner", agents["editor"].plan_research)
+ workflow.add_node("researcher", agents["editor"].run_parallel_research)
+ workflow.add_node("writer", agents["writer"].run)
+ workflow.add_node("publisher", agents["publisher"].run)
+ workflow.add_node("human", agents["human"].review_plan)
+
+ # Add edges
+ self._add_workflow_edges(workflow)
+
+ return workflow
+
+ def _add_workflow_edges(self, workflow):
+ workflow.add_edge('browser', 'planner')
+ workflow.add_edge('planner', 'human')
+ workflow.add_edge('researcher', 'writer')
+ workflow.add_edge('writer', 'publisher')
+ workflow.set_entry_point("browser")
+ workflow.add_edge('publisher', END)
+
+ # Add human in the loop
+ workflow.add_conditional_edges(
+ 'human',
+ lambda review: "accept" if review['human_feedback'] is None else "revise",
+ {"accept": "researcher", "revise": "planner"}
+ )
+
+ def init_research_team(self):
+ """Initialize and create a workflow for the research team."""
+ agents = self._initialize_agents()
+ return self._create_workflow(agents)
+
+ async def _log_research_start(self):
+ message = f"Starting the research process for query '{self.task.get('query')}'..."
+ if self.websocket and self.stream_output:
+ await self.stream_output("logs", "starting_research", message, self.websocket)
+ else:
+ print_agent_output(message, "MASTER")
+
+ async def run_research_task(self, task_id=None):
+ """
+ Run a research task with the initialized research team.
+
+ Args:
+ task_id (optional): The ID of the task to run.
+
+ Returns:
+ The result of the research task.
+ """
+ research_team = self.init_research_team()
+ chain = research_team.compile()
+
+ await self._log_research_start()
+
+ config = {
+ "configurable": {
+ "thread_id": task_id,
+ "thread_ts": datetime.datetime.utcnow()
+ }
+ }
+
+ result = await chain.ainvoke({"task": self.task}, config=config)
+ return result
diff --git a/multi_agents/agents/publisher.py b/multi_agents/agents/publisher.py
new file mode 100644
index 0000000000000000000000000000000000000000..71360fbf3398b29a2782a237c7a0206301e3f009
--- /dev/null
+++ b/multi_agents/agents/publisher.py
@@ -0,0 +1,63 @@
+from .utils.file_formats import \
+ write_md_to_pdf, \
+ write_md_to_word, \
+ write_text_to_md
+
+from .utils.views import print_agent_output
+
+
+class PublisherAgent:
+ def __init__(self, output_dir: str, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.output_dir = output_dir
+ self.headers = headers or {}
+
+ async def publish_research_report(self, research_state: dict, publish_formats: dict):
+ layout = self.generate_layout(research_state)
+ await self.write_report_by_formats(layout, publish_formats)
+
+ return layout
+
+ def generate_layout(self, research_state: dict):
+ sections = '\n\n'.join(f"{value}"
+ for subheader in research_state.get("research_data")
+ for key, value in subheader.items())
+ references = '\n'.join(f"{reference}" for reference in research_state.get("sources"))
+ headers = research_state.get("headers")
+ layout = f"""# {headers.get('title')}
+#### {headers.get("date")}: {research_state.get('date')}
+
+## {headers.get("introduction")}
+{research_state.get('introduction')}
+
+## {headers.get("table_of_contents")}
+{research_state.get('table_of_contents')}
+
+{sections}
+
+## {headers.get("conclusion")}
+{research_state.get('conclusion')}
+
+## {headers.get("references")}
+{references}
+"""
+ return layout
+
+ async def write_report_by_formats(self, layout:str, publish_formats: dict):
+ if publish_formats.get("pdf"):
+ await write_md_to_pdf(layout, self.output_dir)
+ if publish_formats.get("docx"):
+ await write_md_to_word(layout, self.output_dir)
+ if publish_formats.get("markdown"):
+ await write_text_to_md(layout, self.output_dir)
+
+ async def run(self, research_state: dict):
+ task = research_state.get("task")
+ publish_formats = task.get("publish_formats")
+ if self.websocket and self.stream_output:
+ await self.stream_output("logs", "publishing", f"Publishing final research report based on retrieved data...", self.websocket)
+ else:
+ print_agent_output(output="Publishing final research report based on retrieved data...", agent="PUBLISHER")
+ final_research_report = await self.publish_research_report(research_state, publish_formats)
+ return {"report": final_research_report}
diff --git a/multi_agents/agents/researcher.py b/multi_agents/agents/researcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b8603c33d77eb92438b86dcced6029ee89f942a
--- /dev/null
+++ b/multi_agents/agents/researcher.py
@@ -0,0 +1,58 @@
+from gpt_researcher import GPTResearcher
+from colorama import Fore, Style
+from .utils.views import print_agent_output
+
+
+class ResearchAgent:
+ def __init__(self, websocket=None, stream_output=None, tone=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+ self.tone = tone
+
+ async def research(self, query: str, research_report: str = "research_report",
+ parent_query: str = "", verbose=True, source="web", tone=None, headers=None):
+ # Initialize the researcher
+ researcher = GPTResearcher(query=query, report_type=research_report, parent_query=parent_query,
+ verbose=verbose, report_source=source, tone=tone, websocket=self.websocket, headers=self.headers)
+ # Conduct research on the given query
+ await researcher.conduct_research()
+ # Write the report
+ report = await researcher.write_report()
+
+ return report
+
+ async def run_subtopic_research(self, parent_query: str, subtopic: str, verbose: bool = True, source="web", headers=None):
+ try:
+ report = await self.research(parent_query=parent_query, query=subtopic,
+ research_report="subtopic_report", verbose=verbose, source=source, tone=self.tone, headers=None)
+ except Exception as e:
+ print(f"{Fore.RED}Error in researching topic {subtopic}: {e}{Style.RESET_ALL}")
+ report = None
+ return {subtopic: report}
+
+ async def run_initial_research(self, research_state: dict):
+ task = research_state.get("task")
+ query = task.get("query")
+ source = task.get("source", "web")
+
+ if self.websocket and self.stream_output:
+ await self.stream_output("logs", "initial_research", f"Running initial research on the following query: {query}", self.websocket)
+ else:
+ print_agent_output(f"Running initial research on the following query: {query}", agent="RESEARCHER")
+ return {"task": task, "initial_research": await self.research(query=query, verbose=task.get("verbose"),
+ source=source, tone=self.tone, headers=self.headers)}
+
+ async def run_depth_research(self, draft_state: dict):
+ task = draft_state.get("task")
+ topic = draft_state.get("topic")
+ parent_query = task.get("query")
+ source = task.get("source", "web")
+ verbose = task.get("verbose")
+ if self.websocket and self.stream_output:
+ await self.stream_output("logs", "depth_research", f"Running in depth research on the following report topic: {topic}", self.websocket)
+ else:
+ print_agent_output(f"Running in depth research on the following report topic: {topic}", agent="RESEARCHER")
+ research_draft = await self.run_subtopic_research(parent_query=parent_query, subtopic=topic,
+ verbose=verbose, source=source, headers=self.headers)
+ return {"draft": research_draft}
\ No newline at end of file
diff --git a/multi_agents/agents/reviewer.py b/multi_agents/agents/reviewer.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8d536b3b4a105538dc5ec43fc265d5f499b9031
--- /dev/null
+++ b/multi_agents/agents/reviewer.py
@@ -0,0 +1,79 @@
+from .utils.views import print_agent_output
+from .utils.llms import call_model
+
+TEMPLATE = """You are an expert research article reviewer. \
+Your goal is to review research drafts and provide feedback to the reviser only based on specific guidelines. \
+"""
+
+
+class ReviewerAgent:
+ def __init__(self, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+
+ async def review_draft(self, draft_state: dict):
+ """
+ Review a draft article
+ :param draft_state:
+ :return:
+ """
+ task = draft_state.get("task")
+ guidelines = "- ".join(guideline for guideline in task.get("guidelines"))
+ revision_notes = draft_state.get("revision_notes")
+
+ revise_prompt = f"""The reviser has already revised the draft based on your previous review notes with the following feedback:
+{revision_notes}\n
+Please provide additional feedback ONLY if critical since the reviser has already made changes based on your previous feedback.
+If you think the article is sufficient or that non critical revisions are required, please aim to return None.
+"""
+
+ review_prompt = f"""You have been tasked with reviewing the draft which was written by a non-expert based on specific guidelines.
+Please accept the draft if it is good enough to publish, or send it for revision, along with your notes to guide the revision.
+If not all of the guideline criteria are met, you should send appropriate revision notes.
+If the draft meets all the guidelines, please return None.
+{revise_prompt if revision_notes else ""}
+
+Guidelines: {guidelines}\nDraft: {draft_state.get("draft")}\n
+"""
+ prompt = [
+ {"role": "system", "content": TEMPLATE},
+ {"role": "user", "content": review_prompt},
+ ]
+
+ response = await call_model(prompt, model=task.get("model"))
+
+ if task.get("verbose"):
+ if self.websocket and self.stream_output:
+ await self.stream_output(
+ "logs",
+ "review_feedback",
+ f"Review feedback is: {response}...",
+ self.websocket,
+ )
+ else:
+ print_agent_output(
+ f"Review feedback is: {response}...", agent="REVIEWER"
+ )
+
+ if "None" in response:
+ return None
+ return response
+
+ async def run(self, draft_state: dict):
+ task = draft_state.get("task")
+ guidelines = task.get("guidelines")
+ to_follow_guidelines = task.get("follow_guidelines")
+ review = None
+ if to_follow_guidelines:
+ print_agent_output(f"Reviewing draft...", agent="REVIEWER")
+
+ if task.get("verbose"):
+ print_agent_output(
+ f"Following guidelines {guidelines}...", agent="REVIEWER"
+ )
+
+ review = await self.review_draft(draft_state)
+ else:
+ print_agent_output(f"Ignoring guidelines...", agent="REVIEWER")
+ return {"review": review}
diff --git a/multi_agents/agents/reviser.py b/multi_agents/agents/reviser.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc5b1693714643d875e0053acad240405bb08e44
--- /dev/null
+++ b/multi_agents/agents/reviser.py
@@ -0,0 +1,74 @@
+from .utils.views import print_agent_output
+from .utils.llms import call_model
+import json
+
+sample_revision_notes = """
+{
+ "draft": {
+ draft title: The revised draft that you are submitting for review
+ },
+ "revision_notes": Your message to the reviewer about the changes you made to the draft based on their feedback
+}
+"""
+
+
+class ReviserAgent:
+ def __init__(self, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers or {}
+
+ async def revise_draft(self, draft_state: dict):
+ """
+ Review a draft article
+ :param draft_state:
+ :return:
+ """
+ review = draft_state.get("review")
+ task = draft_state.get("task")
+ draft_report = draft_state.get("draft")
+ prompt = [
+ {
+ "role": "system",
+ "content": "You are an expert writer. Your goal is to revise drafts based on reviewer notes.",
+ },
+ {
+ "role": "user",
+ "content": f"""Draft:\n{draft_report}" + "Reviewer's notes:\n{review}\n\n
+You have been tasked by your reviewer with revising the following draft, which was written by a non-expert.
+If you decide to follow the reviewer's notes, please write a new draft and make sure to address all of the points they raised.
+Please keep all other aspects of the draft the same.
+You MUST return nothing but a JSON in the following format:
+{sample_revision_notes}
+""",
+ },
+ ]
+
+ response = await call_model(
+ prompt,
+ model=task.get("model"),
+ response_format="json",
+ )
+ return response
+
+ async def run(self, draft_state: dict):
+ print_agent_output(f"Rewriting draft based on feedback...", agent="REVISOR")
+ revision = await self.revise_draft(draft_state)
+
+ if draft_state.get("task").get("verbose"):
+ if self.websocket and self.stream_output:
+ await self.stream_output(
+ "logs",
+ "revision_notes",
+ f"Revision notes: {revision.get('revision_notes')}",
+ self.websocket,
+ )
+ else:
+ print_agent_output(
+ f"Revision notes: {revision.get('revision_notes')}", agent="REVISOR"
+ )
+
+ return {
+ "draft": revision.get("draft"),
+ "revision_notes": revision.get("revision_notes"),
+ }
diff --git a/multi_agents/agents/utils/__init__.py b/multi_agents/agents/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/multi_agents/agents/utils/__pycache__/__init__.cpython-312.pyc b/multi_agents/agents/utils/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34d680db4012c9f7d84efd30ee0c2781b962da22
Binary files /dev/null and b/multi_agents/agents/utils/__pycache__/__init__.cpython-312.pyc differ
diff --git a/multi_agents/agents/utils/__pycache__/file_formats.cpython-312.pyc b/multi_agents/agents/utils/__pycache__/file_formats.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f291fd51dd14fac7acfb7e4fc6fccb1ed0816690
Binary files /dev/null and b/multi_agents/agents/utils/__pycache__/file_formats.cpython-312.pyc differ
diff --git a/multi_agents/agents/utils/__pycache__/llms.cpython-312.pyc b/multi_agents/agents/utils/__pycache__/llms.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63c9deef0a5016222fb4b94d8a2bbd472714fe4b
Binary files /dev/null and b/multi_agents/agents/utils/__pycache__/llms.cpython-312.pyc differ
diff --git a/multi_agents/agents/utils/__pycache__/utils.cpython-312.pyc b/multi_agents/agents/utils/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1732faa9d5e5e9530384042be6200d1efdf156c7
Binary files /dev/null and b/multi_agents/agents/utils/__pycache__/utils.cpython-312.pyc differ
diff --git a/multi_agents/agents/utils/__pycache__/views.cpython-312.pyc b/multi_agents/agents/utils/__pycache__/views.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3a4e4723b8562fbf1898878f3b0ac759b35d4c6
Binary files /dev/null and b/multi_agents/agents/utils/__pycache__/views.cpython-312.pyc differ
diff --git a/multi_agents/agents/utils/file_formats.py b/multi_agents/agents/utils/file_formats.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4326296c5795541cfab74e2d5cde316cedbcacd
--- /dev/null
+++ b/multi_agents/agents/utils/file_formats.py
@@ -0,0 +1,101 @@
+import aiofiles
+import urllib
+import uuid
+import mistune
+import os
+
+async def write_to_file(filename: str, text: str) -> None:
+ """Asynchronously write text to a file in UTF-8 encoding.
+
+ Args:
+ filename (str): The filename to write to.
+ text (str): The text to write.
+ """
+ # Convert text to UTF-8, replacing any problematic characters
+ text_utf8 = text.encode('utf-8', errors='replace').decode('utf-8')
+
+ async with aiofiles.open(filename, "w", encoding='utf-8') as file:
+ await file.write(text_utf8)
+
+
+async def write_text_to_md(text: str, path: str) -> str:
+ """Writes text to a Markdown file and returns the file path.
+
+ Args:
+ text (str): Text to write to the Markdown file.
+
+ Returns:
+ str: The file path of the generated Markdown file.
+ """
+ task = uuid.uuid4().hex
+ file_path = f"{path}/{task}.md"
+ await write_to_file(file_path, text)
+ print(f"Report written to {file_path}")
+ return file_path
+
+
+async def write_md_to_pdf(text: str, path: str) -> str:
+ """Converts Markdown text to a PDF file and returns the file path.
+
+ Args:
+ text (str): Markdown text to convert.
+
+ Returns:
+ str: The encoded file path of the generated PDF.
+ """
+ task = uuid.uuid4().hex
+ file_path = f"{path}/{task}.pdf"
+
+ try:
+ # Get the directory of the current file
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ css_path = os.path.join(current_dir, "pdf_styles.css")
+
+ # Moved imports to inner function to avoid known import errors with gobject-2.0
+ from md2pdf.core import md2pdf
+ md2pdf(file_path,
+ md_content=text,
+ css_file_path=css_path,
+ base_url=None)
+ print(f"Report written to {file_path}")
+ except Exception as e:
+ print(f"Error in converting Markdown to PDF: {e}")
+ return ""
+
+ encoded_file_path = urllib.parse.quote(file_path)
+ return encoded_file_path
+
+
+async def write_md_to_word(text: str, path: str) -> str:
+ """Converts Markdown text to a DOCX file and returns the file path.
+
+ Args:
+ text (str): Markdown text to convert.
+
+ Returns:
+ str: The encoded file path of the generated DOCX.
+ """
+ task = uuid.uuid4().hex
+ file_path = f"{path}/{task}.docx"
+
+ try:
+ from htmldocx import HtmlToDocx
+ from docx import Document
+ # Convert report markdown to HTML
+ html = mistune.html(text)
+ # Create a document object
+ doc = Document()
+ # Convert the html generated from the report to document format
+ HtmlToDocx().add_html_to_document(html, doc)
+
+ # Saving the docx document to file_path
+ doc.save(file_path)
+
+ print(f"Report written to {file_path}")
+
+ encoded_file_path = urllib.parse.quote(f"{file_path}.docx")
+ return encoded_file_path
+
+ except Exception as e:
+ print(f"Error in converting Markdown to DOCX: {e}")
+ return ""
diff --git a/multi_agents/agents/utils/llms.py b/multi_agents/agents/utils/llms.py
new file mode 100644
index 0000000000000000000000000000000000000000..7db44f2de482283e7ac143eee06f135d83122964
--- /dev/null
+++ b/multi_agents/agents/utils/llms.py
@@ -0,0 +1,49 @@
+import json5 as json
+import json_repair
+from langchain_community.adapters.openai import convert_openai_messages
+
+from gpt_researcher.config.config import Config
+from gpt_researcher.utils.llm import create_chat_completion
+
+from loguru import logger
+
+
+async def call_model(
+ prompt: list,
+ model: str,
+ response_format: str = None,
+):
+
+ optional_params = {}
+ if response_format == "json":
+ optional_params = {"response_format": {"type": "json_object"}}
+
+ cfg = Config()
+ lc_messages = convert_openai_messages(prompt)
+
+ try:
+ response = await create_chat_completion(
+ model=model,
+ messages=lc_messages,
+ temperature=0,
+ llm_provider=cfg.smart_llm_provider,
+ llm_kwargs=cfg.llm_kwargs,
+ # cost_callback=cost_callback,
+ )
+
+ if response_format == "json":
+ try:
+ cleaned_json_string = response.strip("```json\n")
+ return json.loads(cleaned_json_string)
+ except Exception as e:
+ print("⚠️ Error in reading JSON, attempting to repair JSON")
+ logger.error(
+ f"Error in reading JSON, attempting to repair reponse: {response}"
+ )
+ return json_repair.loads(response)
+ else:
+ return response
+
+ except Exception as e:
+ print("⚠️ Error in calling model")
+ logger.error(f"Error in calling model: {e}")
diff --git a/multi_agents/agents/utils/pdf_styles.css b/multi_agents/agents/utils/pdf_styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..d2743e7b0a1e5fbdf4c5cdc21d9622a9216eebed
--- /dev/null
+++ b/multi_agents/agents/utils/pdf_styles.css
@@ -0,0 +1,53 @@
+body {
+ font-family: 'Libre Baskerville', serif;
+ font-size: 12pt; /* standard size for academic papers */
+ line-height: 1.6; /* for readability */
+ color: #333; /* softer on the eyes than black */
+ background-color: #fff; /* white background */
+ margin: 0;
+ padding: 0;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ font-family: 'Libre Baskerville', serif;
+ color: #000; /* darker than the body text */
+ margin-top: 1em; /* space above headers */
+}
+
+h1 {
+ font-size: 2em; /* make h1 twice the size of the body text */
+}
+
+h2 {
+ font-size: 1.5em;
+}
+
+/* Add some space between paragraphs */
+p {
+ margin-bottom: 1em;
+}
+
+/* Style for blockquotes, often used in academic papers */
+blockquote {
+ font-style: italic;
+ margin: 1em 0;
+ padding: 1em;
+ background-color: #f9f9f9; /* a light grey background */
+}
+
+/* You might want to style tables, figures, etc. too */
+table {
+ border-collapse: collapse;
+ width: 100%;
+}
+
+table, th, td {
+ border: 1px solid #ddd;
+ text-align: left;
+ padding: 8px;
+}
+
+th {
+ background-color: #f2f2f2;
+ color: black;
+}
\ No newline at end of file
diff --git a/multi_agents/agents/utils/utils.py b/multi_agents/agents/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..084137e040e3f371e9ad7174cbb56e4568deb801
--- /dev/null
+++ b/multi_agents/agents/utils/utils.py
@@ -0,0 +1,26 @@
+import re
+
+def sanitize_filename(filename: str) -> str:
+ """
+ Sanitize a given filename by replacing characters that are invalid
+ in Windows file paths with an underscore ('_').
+
+ This function ensures that the filename is compatible with all
+ operating systems by removing or replacing characters that are
+ not allowed in Windows file paths. Specifically, it replaces
+ the following characters: < > : " / \\ | ? *
+
+ Parameters:
+ filename (str): The original filename to be sanitized.
+
+ Returns:
+ str: The sanitized filename with invalid characters replaced by an underscore.
+
+ Examples:
+ >>> sanitize_filename('invalid:file/name*example?.txt')
+ 'invalid_file_name_example_.txt'
+
+ >>> sanitize_filename('valid_filename.txt')
+ 'valid_filename.txt'
+ """
+ return re.sub(r'[<>:"/\\|?*]', '_', filename)
diff --git a/multi_agents/agents/utils/views.py b/multi_agents/agents/utils/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..0edf0e1877bfcdedafa3e4cd027cea9da51f0c58
--- /dev/null
+++ b/multi_agents/agents/utils/views.py
@@ -0,0 +1,16 @@
+from colorama import Fore, Style
+from enum import Enum
+
+
+class AgentColor(Enum):
+ RESEARCHER = Fore.LIGHTBLUE_EX
+ EDITOR = Fore.YELLOW
+ WRITER = Fore.LIGHTGREEN_EX
+ PUBLISHER = Fore.MAGENTA
+ REVIEWER = Fore.CYAN
+ REVISOR = Fore.LIGHTWHITE_EX
+ MASTER = Fore.LIGHTYELLOW_EX
+
+
+def print_agent_output(output:str, agent: str="RESEARCHER"):
+ print(f"{AgentColor[agent].value}{agent}: {output}{Style.RESET_ALL}")
\ No newline at end of file
diff --git a/multi_agents/agents/writer.py b/multi_agents/agents/writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..53288af38f16b971a6fd9efb36914ba1109c19e3
--- /dev/null
+++ b/multi_agents/agents/writer.py
@@ -0,0 +1,142 @@
+from datetime import datetime
+import json5 as json
+from .utils.views import print_agent_output
+from .utils.llms import call_model
+
+sample_json = """
+{
+ "table_of_contents": A table of contents in markdown syntax (using '-') based on the research headers and subheaders,
+ "introduction": An indepth introduction to the topic in markdown syntax and hyperlink references to relevant sources,
+ "conclusion": A conclusion to the entire research based on all research data in markdown syntax and hyperlink references to relevant sources,
+ "sources": A list with strings of all used source links in the entire research data in markdown syntax and apa citation format. For example: ['- Title, year, Author [source url](source)', ...]
+}
+"""
+
+
+class WriterAgent:
+ def __init__(self, websocket=None, stream_output=None, headers=None):
+ self.websocket = websocket
+ self.stream_output = stream_output
+ self.headers = headers
+
+ def get_headers(self, research_state: dict):
+ return {
+ "title": research_state.get("title"),
+ "date": "Date",
+ "introduction": "Introduction",
+ "table_of_contents": "Table of Contents",
+ "conclusion": "Conclusion",
+ "references": "References",
+ }
+
+ async def write_sections(self, research_state: dict):
+ query = research_state.get("title")
+ data = research_state.get("research_data")
+ task = research_state.get("task")
+ follow_guidelines = task.get("follow_guidelines")
+ guidelines = task.get("guidelines")
+
+ prompt = [
+ {
+ "role": "system",
+ "content": "You are a research writer. Your sole purpose is to write a well-written "
+ "research reports about a "
+ "topic based on research findings and information.\n ",
+ },
+ {
+ "role": "user",
+ "content": f"Today's date is {datetime.now().strftime('%d/%m/%Y')}\n."
+ f"Query or Topic: {query}\n"
+ f"Research data: {str(data)}\n"
+ f"Your task is to write an in depth, well written and detailed "
+ f"introduction and conclusion to the research report based on the provided research data. "
+ f"Do not include headers in the results.\n"
+ f"You MUST include any relevant sources to the introduction and conclusion as markdown hyperlinks -"
+ f"For example: 'This is a sample text. ([url website](url))'\n\n"
+ f"{f'You must follow the guidelines provided: {guidelines}' if follow_guidelines else ''}\n"
+ f"You MUST return nothing but a JSON in the following format (without json markdown):\n"
+ f"{sample_json}\n\n",
+ },
+ ]
+
+ response = await call_model(
+ prompt,
+ task.get("model"),
+ response_format="json",
+ )
+ return response
+
+ async def revise_headers(self, task: dict, headers: dict):
+ prompt = [
+ {
+ "role": "system",
+ "content": """You are a research writer.
+Your sole purpose is to revise the headers data based on the given guidelines.""",
+ },
+ {
+ "role": "user",
+ "content": f"""Your task is to revise the given headers JSON based on the guidelines given.
+You are to follow the guidelines but the values should be in simple strings, ignoring all markdown syntax.
+You must return nothing but a JSON in the same format as given in headers data.
+Guidelines: {task.get("guidelines")}\n
+Headers Data: {headers}\n
+""",
+ },
+ ]
+
+ response = await call_model(
+ prompt,
+ task.get("model"),
+ response_format="json",
+ )
+ return {"headers": response}
+
+ async def run(self, research_state: dict):
+ if self.websocket and self.stream_output:
+ await self.stream_output(
+ "logs",
+ "writing_report",
+ f"Writing final research report based on research data...",
+ self.websocket,
+ )
+ else:
+ print_agent_output(
+ f"Writing final research report based on research data...",
+ agent="WRITER",
+ )
+
+ research_layout_content = await self.write_sections(research_state)
+
+ if research_state.get("task").get("verbose"):
+ if self.websocket and self.stream_output:
+ research_layout_content_str = json.dumps(
+ research_layout_content, indent=2
+ )
+ await self.stream_output(
+ "logs",
+ "research_layout_content",
+ research_layout_content_str,
+ self.websocket,
+ )
+ else:
+ print_agent_output(research_layout_content, agent="WRITER")
+
+ headers = self.get_headers(research_state)
+ if research_state.get("task").get("follow_guidelines"):
+ if self.websocket and self.stream_output:
+ await self.stream_output(
+ "logs",
+ "rewriting_layout",
+ "Rewriting layout based on guidelines...",
+ self.websocket,
+ )
+ else:
+ print_agent_output(
+ "Rewriting layout based on guidelines...", agent="WRITER"
+ )
+ headers = await self.revise_headers(
+ task=research_state.get("task"), headers=headers
+ )
+ headers = headers.get("headers")
+
+ return {**research_layout_content, "headers": headers}
diff --git a/multi_agents/langgraph.json b/multi_agents/langgraph.json
new file mode 100644
index 0000000000000000000000000000000000000000..12ea6e83461af86d5a9acfac788e3ad80e41b448
--- /dev/null
+++ b/multi_agents/langgraph.json
@@ -0,0 +1,10 @@
+{
+ "python_version": "3.11",
+ "dependencies": [
+ "."
+ ],
+ "graphs": {
+ "agent": "./agent.py:graph"
+ },
+ "env": ".env"
+}
\ No newline at end of file
diff --git a/multi_agents/main.py b/multi_agents/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..e347150d230b96954713f64823806f02b3b03cd0
--- /dev/null
+++ b/multi_agents/main.py
@@ -0,0 +1,52 @@
+from dotenv import load_dotenv
+import sys
+import os
+import uuid
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+from multi_agents.agents import ChiefEditorAgent
+import asyncio
+import json
+from gpt_researcher.utils.enum import Tone
+
+# Run with LangSmith if API key is set
+if os.environ.get("LANGCHAIN_API_KEY"):
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
+load_dotenv()
+
+def open_task():
+ # Get the directory of the current script
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ # Construct the absolute path to task.json
+ task_json_path = os.path.join(current_dir, 'task.json')
+
+ with open(task_json_path, 'r') as f:
+ task = json.load(f)
+
+ if not task:
+ raise Exception("No task found. Please ensure a valid task.json file is present in the multi_agents directory and contains the necessary task information.")
+
+ return task
+
+async def run_research_task(query, websocket=None, stream_output=None, tone=Tone.Objective, headers=None):
+ task = open_task()
+ task["query"] = query
+
+ chief_editor = ChiefEditorAgent(task, websocket, stream_output, tone, headers)
+ research_report = await chief_editor.run_research_task()
+
+ if websocket and stream_output:
+ await stream_output("logs", "research_report", research_report, websocket)
+
+ return research_report
+
+async def main():
+ task = open_task()
+
+ chief_editor = ChiefEditorAgent(task)
+ research_report = await chief_editor.run_research_task(task_id=uuid.uuid4())
+
+ return research_report
+
+if __name__ == "__main__":
+ asyncio.run(main())
\ No newline at end of file
diff --git a/multi_agents/memory/__init__.py b/multi_agents/memory/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4237fa42b521779005aeffda0d395033cd9a6af
--- /dev/null
+++ b/multi_agents/memory/__init__.py
@@ -0,0 +1,7 @@
+from .draft import DraftState
+from .research import ResearchState
+
+__all__ = [
+ "DraftState",
+ "ResearchState"
+]
\ No newline at end of file
diff --git a/multi_agents/memory/__pycache__/__init__.cpython-312.pyc b/multi_agents/memory/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e19c4242baab3e4b8f9347f850faaba99e5437c1
Binary files /dev/null and b/multi_agents/memory/__pycache__/__init__.cpython-312.pyc differ
diff --git a/multi_agents/memory/__pycache__/draft.cpython-312.pyc b/multi_agents/memory/__pycache__/draft.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2581fae3b93000e5b988a74e881d33190a8d114e
Binary files /dev/null and b/multi_agents/memory/__pycache__/draft.cpython-312.pyc differ
diff --git a/multi_agents/memory/__pycache__/research.cpython-312.pyc b/multi_agents/memory/__pycache__/research.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a52172b0f2e74f930c09ea76325b66292a1ab5e
Binary files /dev/null and b/multi_agents/memory/__pycache__/research.cpython-312.pyc differ
diff --git a/multi_agents/memory/draft.py b/multi_agents/memory/draft.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5e6862146d56672857e837a91b31c6b0e23b962
--- /dev/null
+++ b/multi_agents/memory/draft.py
@@ -0,0 +1,10 @@
+from typing import TypedDict, List, Annotated
+import operator
+
+
+class DraftState(TypedDict):
+ task: dict
+ topic: str
+ draft: dict
+ review: str
+ revision_notes: str
\ No newline at end of file
diff --git a/multi_agents/memory/research.py b/multi_agents/memory/research.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfa11e8bdd558db069db0b6d54b43aa099bf5ecc
--- /dev/null
+++ b/multi_agents/memory/research.py
@@ -0,0 +1,21 @@
+from typing import TypedDict, List, Annotated
+import operator
+
+
+class ResearchState(TypedDict):
+ task: dict
+ initial_research: str
+ sections: List[str]
+ research_data: List[dict]
+ human_feedback: str
+ # Report layout
+ title: str
+ headers: dict
+ date: str
+ table_of_contents: str
+ introduction: str
+ conclusion: str
+ sources: List[str]
+ report: str
+
+
diff --git a/multi_agents/package.json b/multi_agents/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..419f7c1d6fceaeadf62845e82ab38bec0c7aebde
--- /dev/null
+++ b/multi_agents/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "simple_js_test",
+ "version": "1.0.0",
+ "description": "",
+ "main": "server.js",
+ "type": "module",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "author": "",
+ "license": "ISC",
+ "dependencies": {
+ "@langchain/langgraph-sdk": "^0.0.1-rc.13"
+ }
+}
diff --git a/multi_agents/requirements.txt b/multi_agents/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..971701aac7e0d4d5c523d2ba54903dec01627402
--- /dev/null
+++ b/multi_agents/requirements.txt
@@ -0,0 +1,7 @@
+langgraph
+gpt_researcher
+langgraph-cli
+python-dotenv
+weasyprint
+json5
+loguru
diff --git a/multi_agents/task.json b/multi_agents/task.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb3325942b7b0727ab3d260a765ac166db87c0e7
--- /dev/null
+++ b/multi_agents/task.json
@@ -0,0 +1,18 @@
+{
+ "query": "Is AI in a hype cycle?",
+ "max_sections": 3,
+ "publish_formats": {
+ "markdown": true,
+ "pdf": true,
+ "docx": true
+ },
+ "include_human_feedback": false,
+ "follow_guidelines": false,
+ "model": "gpt-4o",
+ "guidelines": [
+ "The report MUST be written in APA format",
+ "Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
+ "The report MUST be written in spanish"
+ ],
+ "verbose": true
+}
\ No newline at end of file
diff --git a/outputs/task_1737317335_How did KKR Real Estate perform during Q3 2024.json b/outputs/task_1737317335_How did KKR Real Estate perform during Q3 2024.json
new file mode 100644
index 0000000000000000000000000000000000000000..c15a150272b87739596a9f94d7f3b368a00a65ba
--- /dev/null
+++ b/outputs/task_1737317335_How did KKR Real Estate perform during Q3 2024.json
@@ -0,0 +1,42 @@
+{
+ "timestamp": "2025-01-20T01:38:55.221463",
+ "events": [
+ {
+ "timestamp": "2025-01-20T01:39:03.823244",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'How did KKR Real Estate perform during Q3 2024'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:39:03.833296",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:39:03.841133",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: How did KKR Real Estate perform during Q3 2024...",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.docx b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.docx
new file mode 100644
index 0000000000000000000000000000000000000000..173e7491647fb5b33a9624b15206b62a9c1605e0
Binary files /dev/null and b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.docx differ
diff --git a/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.md b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.md
new file mode 100644
index 0000000000000000000000000000000000000000..03963f5a288b14b9b6a7c32b421bc61c284656c2
--- /dev/null
+++ b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.md
@@ -0,0 +1,44 @@
+## KKR Real Estate Finance Trust (KREF) Q3 2024 Performance: A Detailed Analysis
+
+KKR Real Estate Finance Trust (KREF) reported its Q3 2024 results on October 21, 2024, revealing a mixed performance marked by both positive surprises and underlying challenges. While the company exceeded expectations on several key metrics, the overall picture suggests a complex operating environment navigating a shifting interest rate landscape and persistent headwinds in the commercial real estate sector. This report provides a detailed analysis of KREF's Q3 2024 performance, drawing on information from multiple trusted financial news sources and official company releases.
+
+**Earnings and Revenue:**
+
+KREF reported a GAAP net loss of $7.4 million, translating to a loss of $0.19 per share ([MarketScreener](https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/)). However, adjusting for non-recurring costs and stock option expense, earnings reached $0.40 per share, significantly surpassing the consensus estimate of $0.34 and marking a "Beat" by $0.03 ([MarketBeat](https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/)). This positive surprise was echoed by Public.com, which highlighted the 8.82% beat over analyst expectations of $0.34 EPS ([Public](https://public.com/stocks/kref/earnings)). Furthermore, KREF's Q3 revenue reached $140.2 million, drastically exceeding the anticipated $37.87 million ([Yahoo Finance](https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html)). MarketBeat reported an even higher actual revenue figure of $140.15 million, exceeding the expected $39.52 million by a substantial $100.63 million ([MarketBeat](https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/)). This substantial revenue beat, coupled with the positive earnings surprise, paints a picture of stronger-than-expected top-line performance.
+
+**Distributable Earnings and Dividends:**
+
+KREF reported distributable earnings (DE) of $25.9 million or $0.37 per share, comfortably covering the Q3 dividend of $0.25 per share ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). Management expressed confidence in maintaining DE ex-losses above the dividend level heading into 2025, despite anticipated impacts from lower SOFR and the REO portfolio ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). This suggests a commitment to maintaining dividend payouts, which is a positive sign for investors.
+
+**Loan Portfolio and Repayments:**
+
+KREF experienced significant loan repayments during Q3 2024, totaling $290 million, against fundings of $55 million ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). This trend of repayments exceeding fundings has been consistent, occurring in five of the last six quarters. Furthermore, a $138 million office loan was sold at par post-quarter end ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). These repayments have significantly reduced future funding obligations to just 8% of the funded portfolio, indicating a more conservative approach to deploying capital. Year-to-date repayments surpassed the initial full-year expectation of $1 billion, demonstrating strong performance in this area ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)).
+
+**Market Environment and Outlook:**
+
+KREF's management highlighted an improving market environment with growing confidence in an interest rate cut cycle ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). This optimism is supported by increased transaction volumes within KREF's real estate credit pipeline, which grew by 40% from the beginning of the year to approximately $20 billion per week ([Yahoo Finance](https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html)). Management believes this points to a significant lending opportunity in the future.
+
+**Challenges and Concerns:**
+
+Despite the positive aspects of KREF's Q3 performance, certain challenges remain. The reported GAAP net loss, although offset by adjusted earnings, raises concerns about the sustainability of profitability. Furthermore, the impact of lower SOFR on future earnings needs careful monitoring. While the company benefits from KKR's global platform and resources, the competitive landscape in the private equity real estate market remains intense, as highlighted by DCF.fm's Porter's Five Forces analysis ([DCF.fm](https://dcf.fm/products/kkr-porters-five-forces-analysis)). The analysis emphasizes the bargaining power of large institutional clients and the fierce competition among private equity firms, posing ongoing challenges for KREF.
+
+**Conclusion:**
+
+KREF's Q3 2024 performance presents a mixed picture. The company delivered positive surprises in terms of earnings and revenue, driven by strong loan repayments and an improving market environment. However, the reported GAAP net loss and the potential impact of lower SOFR warrant attention. While management expresses optimism about future lending opportunities, navigating the competitive private equity landscape and potential headwinds in the commercial real estate sector will be crucial for KREF's continued success.
+
+
+**References**
+
+MarketBeat. (2024, October 21). *KKR Real Estate Finance Trust (NYSE:KREF) Q3 2024 Earnings Report on 10/21/2024*. https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/
+
+Public. (2024, October 22). *KREF Earnings: Latest Report, Earnings Call & Financials*. https://public.com/stocks/kref/earnings
+
+Yahoo Finance. (2024, October 21). *Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result*. https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html
+
+Yahoo Finance. (2024, October 22). *Q3 2024 KKR Real Estate Finance Trust Inc Earnings Call*. https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html
+
+DCF.fm. (2024, November 16). *What are the Porter’s Five Forces of KKR & Co. Inc. (KKR)? – DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates*. https://dcf.fm/products/kkr-porters-five-forces-analysis
+
+MarketScreener. (2024, October 21). *KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT*. https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/
+
+
diff --git a/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.pdf b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..acccdaec669c0882caa9c746bd667d9f10c7f498
Binary files /dev/null and b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 20.pdf differ
diff --git a/outputs/task_1737317728_How did KKR Real Estate perform during Q3 2024.json b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 2024.json
new file mode 100644
index 0000000000000000000000000000000000000000..0be97672db5545c0d60ea2cb18f044917f7928a2
--- /dev/null
+++ b/outputs/task_1737317728_How did KKR Real Estate perform during Q3 2024.json
@@ -0,0 +1,705 @@
+{
+ "timestamp": "2025-01-20T01:45:28.869465",
+ "events": [
+ {
+ "timestamp": "2025-01-20T01:45:34.081445",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'How did KKR Real Estate perform during Q3 2024'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:34.091682",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:34.099207",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: How did KKR Real Estate perform during Q3 2024...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:38.542561",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.239849",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report', 'KKR Real Estate Q3 2024 performance overview', \"Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments\", 'KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation', 'How did KKR Real Estate perform during Q3 2024']...",
+ "metadata": [
+ "KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report",
+ "KKR Real Estate Q3 2024 performance overview",
+ "Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments",
+ "KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation",
+ "How did KKR Real Estate perform during Q3 2024"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.262574",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.274456",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'KKR Real Estate Q3 2024 performance overview'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.285895",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.297404",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:42.308341",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'How did KKR Real Estate perform during Q3 2024'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.905977",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.zacks.com/stock/news/2354330/kkr-real-estate-finance-kref-q3-earnings-surpass-estimates\n",
+ "metadata": "https://www.zacks.com/stock/news/2354330/kkr-real-estate-finance-kref-q3-earnings-surpass-estimates"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.917733",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://folikoinsights.com/article/KKR/2024/11/04/kkr-reports-strong-q3-2024-performance-with-58-rise-in-adjusted-net-income-and-plans-20-billion-buyo\n",
+ "metadata": "https://folikoinsights.com/article/KKR/2024/11/04/kkr-reports-strong-q3-2024-performance-with-58-rise-in-adjusted-net-income-and-plans-20-billion-buyo"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.929657",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/heres-key-metrics-tell-us-223009890.html\n",
+ "metadata": "https://finance.yahoo.com/news/heres-key-metrics-tell-us-223009890.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.939566",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\n",
+ "metadata": "https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.951247",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.kkrreit.com/for-investors/news-and-events/press-releases/2024/10-21-2024-211546134\n",
+ "metadata": "https://www.kkrreit.com/for-investors/news-and-events/press-releases/2024/10-21-2024-211546134"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.963560",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:44.974810",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:47.969500",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:47.981119",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:47.992877",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:48.002902",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: How did KKR Real Estate perform during Q3 2024...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.589686",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketbeat.com/stocks/NYSE/KREF/earnings/\n",
+ "metadata": "https://www.marketbeat.com/stocks/NYSE/KREF/earnings/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.601430",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://public.com/stocks/kref/earnings\n",
+ "metadata": "https://public.com/stocks/kref/earnings"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.613400",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\n",
+ "metadata": "https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.624822",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/\n",
+ "metadata": "https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.636083",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:50.648033",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:51.881492",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:51.893217",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 12 total images",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:51.903869",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:51.916696",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: KKR Real Estate Finance Trust (KREF) Q3 2024 earnings report...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.085234",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/kkr-real-estate-finance-trust-070852763.html\n",
+ "metadata": "https://finance.yahoo.com/news/kkr-real-estate-finance-trust-070852763.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.099339",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.streetinsider.com/Business+Wire/KKR+Real+Estate+Finance+Trust+Inc.+Reports+Third+Quarter+2024+Results/23859269.html\n",
+ "metadata": "https://www.streetinsider.com/Business+Wire/KKR+Real+Estate+Finance+Trust+Inc.+Reports+Third+Quarter+2024+Results/23859269.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.116760",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\n",
+ "metadata": "https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.130112",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations\n",
+ "metadata": "https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.142311",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:52.153521",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.114208",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.126560",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.136931",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.149359",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: KKR Real Estate Finance Trust (KREF) Q3 2024 financial results investor presentation...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.267867",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\n",
+ "metadata": "https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.279747",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html\n",
+ "metadata": "https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.292197",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:54.303467",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 2 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.415400",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.433347",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.450402",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.466154",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: KKR Real Estate Q3 2024 performance overview...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.628254",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fitchratings.com/research/non-bank-financial-institutions/fitch-affirms-kkr-at-a-outlook-stable-18-10-2024\n",
+ "metadata": "https://www.fitchratings.com/research/non-bank-financial-institutions/fitch-affirms-kkr-at-a-outlook-stable-18-10-2024"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.640648",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://dcf.fm/products/kkr-porters-five-forces-analysis\n",
+ "metadata": "https://dcf.fm/products/kkr-porters-five-forces-analysis"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.652130",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.kkrreit.com/~/media/Files/K/KKR-V2/reports-and-presentations/kref-2q24-transcript.pdf\n",
+ "metadata": "https://www.kkrreit.com/~/media/Files/K/KKR-V2/reports-and-presentations/kref-2q24-transcript.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.664630",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.kkr.com/content/dam/kkr/insights/pdf/market-review-real-estate-april-2024.pdf\n",
+ "metadata": "https://www.kkr.com/content/dam/kkr/insights/pdf/market-review-real-estate-april-2024.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.677557",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:45:56.689791",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:01.477989",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 1 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:01.492953",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://dcfmodeling.com/cdn/shop/files/kkr.png?v=1728124918&width=1100"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:01.508884",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:01.523686",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Analysis of KKR Real Estate's $4.5 billion 2024 US equity investments...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:09.126095",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly filings, and other essential\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly filings, and other essential\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly filings, and other essential\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly filings, and other essential\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly filings, and other essential\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: large institutional clients, wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial performance, including balance sheets, income statements, and cash flow statements. SEC Filings \u2013 View KKR & Co. Inc. (KKR)' latest filings with the U.S. Securities and Exchange Commission (SEC) for regulatory reports, annual and quarterly\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: High exit barriers due to long-term investment commitments\nHigh exit barriers exist within private equity, as investments typically require long-term commitments. KKR's funds often have durations of 10 years or more, which limits the ability to quickly liquidate investments. As of September 30, 2024, KKR reported uncalled commitments of approximately $8.2 billion across various investment vehicles. This long-term horizon can deter new entrants but also intensifies competition among existing firms vying for investor capital and market share.\nOngoing innovation in investment strategies heightens competition\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: shaped by Michael Porter\u2019s Five Forces Framework. The bargaining power of suppliers remains moderate, influenced by industry consolidation and the demand for specialized financial services. On the other hand, customers, particularly large institutional clients, wield significant power, exacerbated by intense competition among private equity firms. The competitive rivalry is fierce, with numerous players vying for market share, while the threat of substitutes looms as alternative investment options gain traction. Lastly, despite high capital requirements and regulatory hurdles, the threat of new entrants persists, though KKR\u2019s established brand and network provide substantial defenses. Navigating these forces will be crucial for KKR\u2019s sustained growth and market positioning in 2024. Updated on 16 Nov 2024 Resources: KKR & Co. Inc. (KKR) Financial Statements \u2013 Access the full quarterly financial statements for Q3 2024 to get an in-depth view of KKR & Co. Inc. (KKR)' financial\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: investors and deals KKR benefits from strong network effects, where its existing relationships with investors and portfolio companies enhance its ability to attract further investments. This network is reinforced by KKR's $104.30 billion in investments as of September 30, 2024. New entrants face the challenge of building similar networks from scratch, which can take years and substantial effort. Key Metrics Value Total GAAP Assets $360.66 billion Total GAAP Revenues (2024) $18.62 billion Capital Allocation-Based Income (2024) $(3.16 billion) Total Investments $104.30 billion In summary, KKR & Co. Inc. faces a complex landscape shaped by Michael Porter\u2019s Five Forces Framework. The bargaining power of suppliers remains moderate, influenced by industry consolidation and the demand for specialized financial services. On the other hand, customers, particularly large institutional clients, wield significant power, exacerbated by intense competition among private equity firms. The\n\nSource: https://dcf.fm/products/kkr-porters-five-forces-analysis\nTitle: What are the Porter\u2019s Five Forces of KKR & Co. Inc. (KKR)? \u2013 DCF, SWOT, CANVAS, PESTEL, BCG Editable Templates\nContent: investors and deals KKR benefits from strong network effects, where its existing relationships with investors and portfolio companies enhance its ability to attract further investments. This network is reinforced by KKR's $104.30 billion in investments as of September 30, 2024. New entrants face the challenge of building similar networks from scratch, which can take years and substantial effort. Key Metrics Value Total GAAP Assets $360.66 billion Total GAAP Revenues (2024) $18.62 billion Capital Allocation-Based Income (2024) $(3.16 billion) Total Investments $104.30 billion In summary, KKR & Co. Inc. faces a complex landscape shaped by Michael Porter\u2019s Five Forces Framework. The bargaining power of suppliers remains moderate, influenced by industry consolidation and the demand for specialized financial services. On the other hand, customers, particularly large institutional clients, wield significant power, exacerbated by intense competition among private equity firms. The\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:11.131647",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37 million. _____ This story was generated by Automated Insights (http://automatedinsights.com/ap) using data from Zacks Investment Research. Access a Zacks stock report on KREF at\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37 million. _____ This story was generated by Automated Insights (http://automatedinsights.com/ap) using data from Zacks Investment Research. Access a Zacks stock report on KREF at\n\nSource: https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\nTitle: KKR Real Estate: Q3 Earnings Snapshot -October 21, 2024 at 04:37 pm EDT | MarketScreener\nContent: KKR Real Estate: Q3 Earnings Snapshot October 21, 2024 at 04:37 pm EDT Share NEW YORK (AP) \u2014 NEW YORK (AP) \u2014 KKR Real Estate Finance Trust Inc. (KREF) on Monday reported a loss of $7.4 million in its third quarter. On a per-share basis, the New York-based company said it had a loss of 19 cents. Earnings, adjusted for non-recurring costs and stock option expense, came to 40 cents per share. The real estate finance company posted revenue of $140.2 million in the period. Its adjusted revenue was $37 million. _____ This story was generated by Automated Insights (http://automatedinsights.com/ap) using data from Zacks Investment Research. Access a Zacks stock report on KREF at https://www.zacks.com/ap/KREFFor copyright information, check with the distributor of this item, STATS Perform dba Automated Insights. , source Associated Press News Share \u00a9 Acquiremedia - 2024\nKKR Real Estate: Q3 Earnings Snapshot\nOctober 21, 2024 at 04:37 pm EDT Share\nOctober 21, 2024 at 04:37 pm EDT\n\nSource: https://www.kkrreit.com/for-investors/news-and-events/press-releases/2024/10-21-2024-211546134\nTitle: \r\n\tKKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results \u2013 KKR Real Estate Finance Trust\r\n\nContent: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results\n\nSource: https://www.kkrreit.com/for-investors/news-and-events/press-releases/2024/10-21-2024-211546134\nTitle: \r\n\tKKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results \u2013 KKR Real Estate Finance Trust\r\n\nContent: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results\nOct 21, 2024\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:13.645946",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations\nTitle: \r\n\tEvents & Presentations \u2013 KKR Real Estate Finance Trust\r\n\nContent: KREF Investor Presentation \u2013 October 2024 Oct 29, 2024 04:30 PM Presentation\nKREF Investor Presentation \u2013 October 2024 Oct 29, 2024 04:30 PM\nKREF Investor Presentation \u2013 October 2024\nOct 29, 2024 04:30 PM\nKKR Real Estate Finance Trust Inc. Third Quarter 2024 Financial Results Conference Call Oct 22, 2024 10:00 AM Webcast KKR Real Estate Finance Trust Inc. Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended September 30th, 2024 View Transcript\nKKR Real Estate Finance Trust Inc. Third Quarter 2024 Financial Results Conference Call Oct 22, 2024 10:00 AM\nKKR Real Estate Finance Trust Inc. Third Quarter 2024 Financial Results Conference Call\nOct 22, 2024 10:00 AM\nWebcast KKR Real Estate Finance Trust Inc. Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended September 30th, 2024 View Transcript\nKKR Real Estate Finance Trust Inc. Third Quarter 2024 Results\n\nSource: https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations\nTitle: \r\n\tEvents & Presentations \u2013 KKR Real Estate Finance Trust\r\n\nContent: Oct 24, 2023 10:00 AM\nWebcast KKR Real Estate Finance Trust Inc. Third Quarter 2023 Results KKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended Sept 30th, 2023 View Transcript\nKKR Real Estate Finance Trust Inc. Third Quarter 2023 Results\nKKR Real Estate Finance Trust Inc. Third Quarter 2023 Results\nKKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended Sept 30th, 2023\nKKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended Sept 30th, 2023\nKREF Investor Presentation \u2013 July 2023 Jul 28, 2023 06:06 PM Presentation\nKREF Investor Presentation \u2013 July 2023 Jul 28, 2023 06:06 PM\nKREF Investor Presentation \u2013 July 2023\nJul 28, 2023 06:06 PM\n\nSource: https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations\nTitle: \r\n\tEvents & Presentations \u2013 KKR Real Estate Finance Trust\r\n\nContent: KREF Investor Presentation \u2013 July 2023\nJul 28, 2023 06:06 PM\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Financial Results Conference Call Jul 25, 2023 10:00 AM KKR Real Estate Finance Trust Inc. Second Quarter 2023 Results KKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended June 30th, 2023 View Transcript\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Financial Results Conference Call Jul 25, 2023 10:00 AM\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Financial Results Conference Call\nJul 25, 2023 10:00 AM\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Results KKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended June 30th, 2023 View Transcript\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Results\nKKR Real Estate Finance Trust Inc. Second Quarter 2023 Results\nKKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended June 30th, 2023\n\nSource: https://www.kkrreit.com/for-investors/news-and-events/events-and-presentations\nTitle: \r\n\tEvents & Presentations \u2013 KKR Real Estate Finance Trust\r\n\nContent: KKR Real Estate Finance Trust Inc. Third Quarter 2024 Results\nKKR Real Estate Finance Trust Inc. Third Quarter 2024 Results\nKKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended September 30th, 2024\nKKR Real Estate Finance Trust Inc. Supplemental Information for the Quarter Ended September 30th, 2024\nDeutsche Bank's 32nd Annual Leveraged Finance Conference Sep 25, 2024 12:20 PM Access to the conference webcast is available here\nDeutsche Bank's 32nd Annual Leveraged Finance Conference Sep 25, 2024 12:20 PM\nDeutsche Bank's 32nd Annual Leveraged Finance Conference\nSep 25, 2024 12:20 PM\nAccess to the conference webcast is available here\nAccess to the conference webcast is available here\nAccess to the conference webcast is available here\nKREF Investor Presentation \u2013 July 2024 Jul 29, 2024 04:30 PM Presentation\nKREF Investor Presentation \u2013 July 2024 Jul 29, 2024 04:30 PM\nKREF Investor Presentation \u2013 July 2024\nJul 29, 2024 04:30 PM\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results\nKKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations.\nThis press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/\nA conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed at http://www.kkrreit.com/investor-relations/events-and-presentations.\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/ A conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed at http://www.kkrreit.com/investor-relations/events-and-presentations. A replay of the live broadcast will be available on KREF\u2019s website or by dialing (877) 344-7529 (U.S. callers) or +1 (412)\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/ A conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed at http://www.kkrreit.com/investor-relations/events-and-presentations. A replay of the live broadcast will be available on KREF\u2019s website or by dialing (877) 344-7529 (U.S. callers) or +1 (412)\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 21, 2024 8:15pm KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/ A conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 21, 2024 8:15pm KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/ A conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may\n\nSource: https://www.morningstar.com/news/business-wire/20241021632422/kkr-real-estate-finance-trust-inc-reports-third-quarter-2024-results\nTitle: KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 21, 2024 8:15pm KKR Real Estate Finance Trust Inc. Reports Third Quarter 2024 Results KKR Real Estate Finance Trust Inc. (\u201cKREF\u201d) (NYSE: KREF) today reported its third quarter 2024 results, which have been posted to the Investor Relations section of KREF\u2019s website at http://www.kkrreit.com/investor-relations/events-and-presentations. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241021632422/en/ A conference call to discuss KREF\u2019s financial results will be held on Tuesday, October 22, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (844) 784-1730 (U.S. callers) or +1 (412) 380-7410 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:18.708898",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html\nTitle: Q3 2024 KKR Real Estate Finance Trust Inc Earnings Call\nContent: value per share decreased 2.6% quarter over quarter to $14.84 per share as of September 30, 2024. Distributable earnings this quarter were $25.9 million or $0.37 per share relative to our Q3 $0.25 per share dividend. With that, I'd now like to turn the call over to Matt. Matthew Salem Thank you, Jack. Good morning, everyone, and thank you for joining our call today. Before going into third quarter results, I'd like to spend some time on a market update. As we enter an interest rate cut cycle, there's increased confidence in growing consensus that lower interest rates will provide tailwinds for commercial real estate property values. We are seeing improved transaction volumes within our own real estate credit pipeline, which currently averages approximately $20 billion a week, up 40% from the beginning of the year. And we have strong conviction that there is a significant lending opportunity ahead of us. From a KKR Real Estate equity perspective, 2024 has been our most active year\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR & Co. Inc. Reports Third Quarter 2024 Results\nKKR & Co. Inc. Reports Third Quarter 2024 Results\nKKR & Co. Inc. Reports Third Quarter 2024 Results\nKKR & Co. Inc. Reports Third Quarter 2024 Results\nProvided by Business Wire Oct 24, 2024 10:50am\nProvided by Business Wire Oct 24, 2024 10:50am\nProvided by Business Wire\nOct 24, 2024 10:50am\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/ A conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A replay of the live broadcast will be available on KKR\u2019s website beginning approximately one hour after the broadcast. ABOUT KKR KKR is a leading global investment firm that\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/ A conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A replay of the live broadcast will be available on KKR\u2019s website beginning approximately one hour after the broadcast. ABOUT KKR KKR is a leading global investment firm that\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 24, 2024 10:50am KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/ A conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 24, 2024 10:50am KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/ A conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: Home News Business Wire KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. Reports Third Quarter 2024 Results Provided by Business Wire Oct 24, 2024 10:50am KKR & Co. Inc. Reports Third Quarter 2024 Results KKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. This press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/ A conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A\n\nSource: https://www.morningstar.com/news/business-wire/20241024827166/kkr-co-inc-reports-third-quarter-2024-results\nTitle: KKR & Co. Inc. Reports Third Quarter 2024 Results | Morningstar\nContent: KKR & Co. Inc. Reports Third Quarter 2024 Results\nKKR & Co. Inc. (NYSE: KKR) today reported its third quarter 2024 results, which have been posted to the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/.\nThis press release features multimedia. View the full release here: https://www.businesswire.com/news/home/20241024827166/en/\nA conference call to discuss KKR\u2019s financial results will be held today, Thursday, October 24, 2024 at 10:00 a.m. ET. The conference call may be accessed by dialing (877) 407-0312 (U.S. callers) or +1 (201) 389-0899 (non-U.S. callers); a pass code is not required. Additionally, the conference call will be broadcast live over the Internet and may be accessed through the Investor Center section of KKR\u2019s website at https://ir.kkr.com/events-presentations/. A replay of the live broadcast will be available on KKR\u2019s website beginning approximately one hour after the broadcast.\n\nSource: https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html\nTitle: Q3 2024 KKR Real Estate Finance Trust Inc Earnings Call\nContent: Switala Great. Thanks, operator, and welcome to the KKR Real Estate Finance Trust earnings call for the third quarter of 2024. As the operator mentioned, this is Jack Switala. This morning, I'm joined on the call by our CEO, Matt Salem; our President and COO, Patrick Mattson; and our CFO, Kendra Decious. I'd like to remind everyone that we will refer to certain non-GAAP financial measures on the call, which are reconciled to GAAP figures in our earnings release and in the supplementary presentation, both of which are available on the Investor Relations portion of our website. This call will also contain certain forward-looking statements, which do not guarantee future events or performance. Please refer to our most recently filed 10-Q for cautionary factors related to these statements. Before I turn the call over to Matt, I'll provide a brief recap of our results. For the third quarter of 2024, we reported GAAP net loss of negative $13 million or negative $0.19 per share, driven by a\n\nSource: https://finance.yahoo.com/news/q3-2024-kkr-real-estate-102413582.html\nTitle: Q3 2024 KKR Real Estate Finance Trust Inc Earnings Call\nContent: $0.40, covering our $0.25 dividend. While lower SOFR and our REO portfolio will impact earnings, we expect that DE ex-losses will continue to be higher than our dividend as we head into 2025. In the third quarter, we received $290 million in loan repayments compared to $55 million in fundings with full repayments across four loans, including multifamily, single-family rental, and an office loan secured by a property located in Oakland, California. In addition to this, post quarter end, we sold a $138 million office loan at par. Repayments have now exceeded fundings in five of the last six quarters. Additionally, future funding obligations are now reduced to 8% of the funded portfolio. Year-to-date, we have received over $1 billion in repayments compared to our original expectation of $1 billion for the full year. KREF as an externally managed vehicle benefits from access to resources and relationships from KKR's global platform. We are fully integrated into KKR's broader real estate\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:21.120509",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\nTitle: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nContent: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nEarnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nEarnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nGuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read\nGuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read\nGuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read\nMon, Oct 21, 2024, 6:34 PM 2 min read\n2 min read\n\nSource: https://www.marketbeat.com/earnings/reports/2024-10-21-kkr-real-estate-finance-trust-inc-stock/\nTitle: \r\n\tKKR Real Estate Finance Trust (NYSE:KREF) Q3 2024 Earnings Report on 10/21/2024\r\n\nContent: KKR Real Estate Finance Trust Q3 2024 Earnings Report $10.60 +0.12 (+1.18%) As of 01/17/2025 03:59 PM Eastern This is a fair market value price provided by Polygon.io. Learn more. Earnings HistoryForecast KKR Real Estate Finance Trust EPS ResultsActual EPS$0.37Consensus EPS $0.34Beat/MissBeat by +$0.03One Year Ago EPS$0.47KKR Real Estate Finance Trust Revenue ResultsActual Revenue$140.15 millionExpected Revenue$39.52 millionBeat/MissBeat by +$100.63 millionYoY Revenue GrowthN/AKKR Real Estate Finance Trust Announcement DetailsQuarterQ3 2024Date10/21/2024TimeAfter Market ClosesKREF Upcoming EarningsKKR Real Estate Finance Trust will be holding an earnings conference call on Tuesday, February 4 at 9:00 AM Eastern. Interested parties can register for or listen to the call or dial in at 412-317-0088 using passcode \"4697062\". Conference Call ResourcesConference CallConference Call TranscriptSlide DeckPress ReleaseSEC FilingKREF Earnings HistorySlide DeckFull Screen Slide DeckPowered by KKR\n\nSource: https://public.com/stocks/kref/earnings\nTitle: KREF Earnings: Latest Report, Earnings Call & Financials\nContent: Signup for full access\nBrowse freeEarningsIncome statementBalance sheetCash flow\nEarningsIncome statementBalance sheetCash flow\nEarningsIncome statementBalance sheetCash flow\nKREF Latest EarningsThe value each KREF share was expected to gain vs. the value each share gained.KREF reported its most recent earnings on Oct 22, 2024 for Q3 2024, posting earnings per share (EPS) of $0.37. This exceeded analysts' expectations of $0.34 by 8.82%, marking a Beat.For comparison, KKR Real Estate Finance Trust reported EPS of $0.25 in the same quarter last year.The company is expected to announce its next earnings report on 02/03/2025, with analysts projecting an EPS of $0.33.\nKREF Latest Earnings\nThe value each KREF share was expected to gain vs. the value each share gained.\n\nSource: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\nTitle: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nContent: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result GuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read In This Article: KREF +1.24% KKR Real Estate Finance Trust Inc (NYSE:KREF) is set to release its Q3 2024 earnings on Oct 22, 2024. The consensus estimate for Q3 2024 revenue is $37.87 million, and the earnings are expected to come in at $0.21 per share. The full year 2024's revenue is expected to be $155.12 million and the earnings are expected to be $0.51 per share. More detailed estimate data can be found on the Forecast page. KKR Real Estate Finance Trust Inc (NYSE:KREF) Estimates Trends Warning! GuruFocus has detected 8 Warning Signs with KREF. Over the past 90 days, full-year revenue estimates for KKR Real Estate Finance Trust Inc (NYSE:KREF) have been revised upward from $149.9 million to $155.12 million for 2024, and from $148.58 million to $153.34 million for 2025. Similarly, earnings estimates have increased from $0 per share to $0.51 per\n\nSource: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\nTitle: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nContent: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result GuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read In This Article: KREF +1.24% KKR Real Estate Finance Trust Inc (NYSE:KREF) is set to release its Q3 2024 earnings on Oct 22, 2024. The consensus estimate for Q3 2024 revenue is $37.87 million, and the earnings are expected to come in at $0.21 per share. The full year 2024's revenue is expected to be $155.12 million and the earnings are expected to be $0.51 per share. More detailed estimate data can be found on the Forecast page. KKR Real Estate Finance Trust Inc (NYSE:KREF) Estimates Trends Warning! GuruFocus has detected 8 Warning Signs with KREF. Over the past 90 days, full-year revenue estimates for KKR Real Estate Finance Trust Inc (NYSE:KREF) have been revised upward from $149.9 million to $155.12 million for 2024, and from $148.58 million to $153.34 million for 2025. Similarly, earnings estimates have increased from $0 per share to $0.51 per\n\nSource: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\nTitle: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nContent: Volatility Sectors Basic Materials Communication Services Consumer Cyclical Consumer Defensive Energy Financial Services Healthcare Industrials Real Estate Technology Utilities Research Screeners Calendar Stock Comparison Advanced Chart Currency Converter Personal Finance Credit Cards Banking Best HYSA Student Loans Personal Loans Insurance Mortgages Mortgage Calculator Taxes Videos Latest Editor's Picks Investing Insights Trending Stocks All Shows Morning Brief Opening Bid Wealth Invest ETF Report YF Awards 2024 Streaming Now \u2026 Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result GuruFocus News Mon, Oct 21, 2024, 6:34 PM 2 min read In This Article: KREF +1.24% KKR Real Estate Finance Trust Inc (NYSE:KREF) is set to release its Q3 2024 earnings on Oct 22, 2024. The consensus estimate for Q3 2024 revenue is $37.87 million, and the earnings are expected to come in at $0.21 per share. The full year 2024's revenue is expected to be $155.12 million and the\n\nSource: https://www.marketbeat.com/stocks/NYSE/KREF/earnings/\nTitle: \r\n\tKKR Real Estate Finance Trust (KREF) Earnings Date and Reports 2025\r\n\nContent: KKR Real Estate Finance Trust (KREF) Earnings Date, Estimates & Call Transcripts $10.60 +0.12 (+1.18%) As of 01/17/2025 03:59 PM Eastern This is a fair market value price provided by Polygon.io. Learn more. Add Compare Share Share Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsDividendEarningsFinancialsHeadlinesInsider TradesOptions ChainOwnershipSEC FilingsShort InterestSustainabilityTrends KKR Real Estate Finance Trust Latest Earnings SummaryUpcoming Q4 Earnings DateFeb. 4Before Market OpensEstimatedActual EPS (Oct. 21) $0.37 Beat By $0.03 Consensus EPS (Oct. 21) $0.34 KKR Real Estate Finance Trust released Q3 2024 earnings on October 21, 2024, reporting an EPS of $0.37, which topped analysts' consensus estimates of $0.34 by $0.03. Quarterly revenue was reported to be $140.15 million, above the consensus estimate of $39.52 million. With a trailing EPS of -$0.30, KKR Real Estate Finance Trust's earnings are expected to decrease -15.00% next year, from $1.40 to $1.19 per\n\nSource: https://www.marketbeat.com/stocks/NYSE/KREF/earnings/\nTitle: \r\n\tKKR Real Estate Finance Trust (KREF) Earnings Date and Reports 2025\r\n\nContent: KKR Real Estate Finance Trust (KREF) Earnings Date, Estimates & Call Transcripts $10.60 +0.12 (+1.18%) As of 01/17/2025 03:59 PM Eastern This is a fair market value price provided by Polygon.io. Learn more. Add Compare Share Share Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsDividendEarningsFinancialsHeadlinesInsider TradesOptions ChainOwnershipSEC FilingsShort InterestSustainabilityTrends KKR Real Estate Finance Trust Latest Earnings SummaryUpcoming Q4 Earnings DateFeb. 4Before Market OpensEstimatedActual EPS (Oct. 21) $0.37 Beat By $0.03 Consensus EPS (Oct. 21) $0.34 KKR Real Estate Finance Trust released Q3 2024 earnings on October 21, 2024, reporting an EPS of $0.37, which topped analysts' consensus estimates of $0.34 by $0.03. Quarterly revenue was reported to be $140.15 million, above the consensus estimate of $39.52 million. With a trailing EPS of -$0.30, KKR Real Estate Finance Trust's earnings are expected to decrease -15.00% next year, from $1.40 to $1.19 per\n\nSource: https://finance.yahoo.com/news/earnings-watch-kkr-real-estate-130413678.html\nTitle: Earnings To Watch: KKR Real Estate Finance Trust Inc (KREF) Reports Q3 2024 Result\nContent: In This Article: KREF +1.24% KKR Real Estate Finance Trust Inc (NYSE:KREF) is set to release its Q3 2024 earnings on Oct 22, 2024. The consensus estimate for Q3 2024 revenue is $37.87 million, and the earnings are expected to come in at $0.21 per share. The full year 2024's revenue is expected to be $155.12 million and the earnings are expected to be $0.51 per share. More detailed estimate data can be found on the Forecast page. KKR Real Estate Finance Trust Inc (NYSE:KREF) Estimates Trends Warning! GuruFocus has detected 8 Warning Signs with KREF. Over the past 90 days, full-year revenue estimates for KKR Real Estate Finance Trust Inc (NYSE:KREF) have been revised upward from $149.9 million to $155.12 million for 2024, and from $148.58 million to $153.34 million for 2025. Similarly, earnings estimates have increased from $0 per share to $0.51 per share for 2024, and from $0.96 per share to $1.09 per share for 2025. KKR Real Estate Finance Trust Inc (NYSE:KREF) Reported History In the\n\nSource: https://public.com/stocks/kref/earnings\nTitle: KREF Earnings: Latest Report, Earnings Call & Financials\nContent: StocksKKR Real Estate Finance TrustEarningsKREFVolume: 254.73K\nStocksKKR Real Estate Finance TrustEarningsKREF\nKKR Real Estate Finance Trust\nKREFKREF Earnings Dates & ReportsKREF Most Recent EarningsReport DateOct 22, 2024Period EndingQ3 2024Est. EPS$0.34Actual EPS$0.37EarningsBeat\nKREFKREF Earnings Dates & Reports\nKREF Earnings Dates & Reports\nKREF Most Recent EarningsReport DateOct 22, 2024Period EndingQ3 2024Est. EPS$0.34Actual EPS$0.37EarningsBeat\nKREF Most Recent Earnings\nReport DateOct 22, 2024Period EndingQ3 2024Est. EPS$0.34Actual EPS$0.37EarningsBeat\nReport DateOct 22, 2024\nOct 22, 2024\nPeriod EndingQ3 2024\nKREF EarningsView the latest KREF earnings report, listen to the earnings call, and analyze historical earnings performance with AI-powered insights.Start investing in KREF\nKREF EarningsView the latest KREF earnings report, listen to the earnings call, and analyze historical earnings performance with AI-powered insights.Start investing in KREF\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:21.137748",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.016335960000000004",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:46:21.169710",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'How did KKR Real Estate perform during Q3 2024'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:47:00.614462",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'How did KKR Real Estate perform during Q3 2024'",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "content": "selected_images",
+ "output": "24 at 04:37 pm EDT*. https://www.marketscreener.com/quote/stock/KKR-REAL-ESTATE-FINANCE-T-34757301/news/KKR-Real-Estate-Q3-Earnings-Snapshot-48122430/\n\n\n",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg",
+ "https://dcfmodeling.com/cdn/shop/files/kkr.png?v=1728124918&width=1100"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1737318429_What is the reason for rising dollar value a.docx b/outputs/task_1737318429_What is the reason for rising dollar value a.docx
new file mode 100644
index 0000000000000000000000000000000000000000..e6f81260ea0e1c458823af1034c91c356f13a545
Binary files /dev/null and b/outputs/task_1737318429_What is the reason for rising dollar value a.docx differ
diff --git a/outputs/task_1737318429_What is the reason for rising dollar value a.md b/outputs/task_1737318429_What is the reason for rising dollar value a.md
new file mode 100644
index 0000000000000000000000000000000000000000..b11e751eee2322c78bbf23574925b5b5927746c3
--- /dev/null
+++ b/outputs/task_1737318429_What is the reason for rising dollar value a.md
@@ -0,0 +1,133 @@
+# The Soaring Dollar: Deciphering its Ascent and Japan's Influence
+
+The US dollar's recent strength against global currencies, particularly the Japanese Yen (JPY), has become a prominent feature of the international financial landscape. This report delves into the multifaceted factors driving the dollar's appreciation, examining the interplay of US economic performance, monetary policy divergence between the Federal Reserve (Fed) and the Bank of Japan (BoJ), and the unique role Japan plays in this dynamic. The dollar's resilience throughout 2024, fueled by robust US economic growth and persistent inflation, created a stark contrast with Japan's economic trajectory. While the US economy defied expectations, maintaining low unemployment and gradually approaching the Fed's 2% inflation target, Japan grappled with deflationary pressures and a historically low interest rate environment. This divergence in economic performance laid the groundwork for significant fluctuations in the USD/JPY exchange rate. Furthermore, the re-election of Donald Trump as US President in 2024 introduced a layer of political uncertainty, potentially impacting fiscal policies, trade relations, and market sentiment. The BoJ's shift towards a tighter monetary policy stance, including interest rate hikes and reduced bond purchases, signals a departure from its long-standing ultra-loose approach, influencing the yen's valuation and carry trade dynamics. This report will analyze the complex interplay of these factors, providing insights into the forces propelling the dollar's rise and Japan's intricate involvement in this global economic narrative. We will explore the historical context of USD/JPY fluctuations, the influence of carry trades ([Why The Japanese Yen 'Carry Trade' Is Making Headlines](https://eaglesjournal.com/understanding-japanese-yen-carry-trade/)), the impact of diverging monetary policies ([USD/JPY Fundamental 2025 Outlook Preview](https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/)), and the potential implications for global markets. Additionally, we will consider the role of Japan's dependence on imports ([How the Yen Drives Japan's Monetary Policy](https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html)) and its historical relationship with the US dollar ([Thinking About the Yen’s True Strength](https://www.nippon.com/en/in-depth/d00882/)).
+
+
+## Table of Contents
+
+- The Influence of Diverging Monetary Policies
+ - The Bank of Japan's Low-Interest Rate Policy and its Impact on the Yen
+ - The US Federal Reserve's Tightening Monetary Policy and its Effect on the Dollar
+ - Impact of Interest Rate Differentials on USD/JPY Exchange Rate Dynamics
+ - Market Sentiment and Risk Aversion: Influence on Yen's Safe-Haven Status
+ - Future Outlook and Potential Shifts in Monetary Policy Divergence
+- The Role of Japan's Economic Conditions and Policies
+ - Japan's Sluggish Economic Growth and Deflationary Pressures
+ - The Bank of Japan's Yield Curve Control Policy and its Implications
+ - Impact of Japan's Trade Balance on the Yen
+ - The Yen's Role as a Funding Currency and Carry Trade Dynamics
+ - Government Intervention and its Effectiveness in Influencing the Yen
+- Impact of Global Economic Factors and Carry Trade
+ - Global Economic Imbalances and Safe-Haven Flows
+ - US Economic Performance and Growth Differentials
+ - Commodity Prices and the Dollar's Role
+ - The Mechanics of the Carry Trade and its Influence on the Dollar/Yen
+ - The Unwinding of Carry Trades and Market Volatility
+
+
+
+
+
+## The Influence of Diverging Monetary Policies
+
+### The Bank of Japan's Low-Interest Rate Policy and its Impact on the Yen
+
+The Bank of Japan (BOJ)'s persistent commitment to maintaining ultra-low interest rates has played a significant role in the weakening of the Japanese yen against the US dollar. This policy, aimed at stimulating economic growth and combating deflation, has created a substantial interest rate differential between Japan and other major economies, particularly the United States. This disparity makes USD-denominated assets more attractive to investors seeking higher returns, driving up demand for the dollar and consequently pushing down the value of the yen. ([https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY](https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY)) The BOJ's quantitative easing programs, which involve purchasing government bonds and other assets, further contribute to the yen's depreciation by increasing the money supply. ([https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/](https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/)) This divergence in monetary policy has been a key driver of the yen's decline throughout 2023 and prior years. The yen plummeted to a 24-year low against the dollar in mid-2022, highlighting the impact of this policy difference. ([https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY](https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY))
+
+### The US Federal Reserve's Tightening Monetary Policy and its Effect on the Dollar
+
+In contrast to the BOJ's accommodative stance, the US Federal Reserve (Fed) has pursued a tighter monetary policy characterized by interest rate hikes and a reduction of its balance sheet. This policy response to rising inflation in the US has made dollar-denominated assets even more appealing to global investors, further exacerbating the interest rate differential between the US and Japan. ([https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236](https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236)) As the Fed raises interest rates, the yield on US Treasury bonds increases, attracting capital flows into the US and strengthening the dollar. This tightening cycle has contributed significantly to the dollar's appreciation against major currencies, including the yen. The market's anticipation of future Fed policy decisions also plays a crucial role. If the market expects the Fed to continue raising rates, the dollar tends to strengthen further. ([https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4](https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4))
+
+### Impact of Interest Rate Differentials on USD/JPY Exchange Rate Dynamics
+
+The widening gap between US and Japanese interest rates has been a primary driver of the USD/JPY exchange rate's upward trajectory. This differential creates a carry trade opportunity, where investors borrow in a low-interest-rate currency (JPY) and invest in a higher-interest-rate currency (USD), profiting from the difference in yields. ([https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY](https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY)) This dynamic fuels demand for the dollar and puts downward pressure on the yen. The larger the interest rate differential, the greater the incentive for carry trades and the stronger the upward pressure on USD/JPY. Market expectations regarding future interest rate movements also influence the exchange rate. If the market anticipates a further widening of the interest rate differential, the USD/JPY is likely to continue appreciating. Conversely, if the market expects the differential to narrow, the USD/JPY may depreciate.
+
+### Market Sentiment and Risk Aversion: Influence on Yen's Safe-Haven Status
+
+The Japanese yen has traditionally been considered a safe-haven currency, meaning that investors tend to flock to it during times of economic uncertainty or geopolitical turmoil. However, the BOJ's ultra-loose monetary policy has somewhat diminished the yen's safe-haven appeal in recent years. ([https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY](https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY)) While geopolitical tensions can still trigger a temporary flight to safety and strengthen the yen, the underlying interest rate differential with the US often limits the extent of such movements. Market sentiment and risk appetite also play a significant role. During periods of heightened risk aversion, the yen may appreciate against the dollar, even in the face of a large interest rate differential. Conversely, when market sentiment is positive and risk appetite is high, the yen may weaken further as investors chase higher returns in other currencies.
+
+### Future Outlook and Potential Shifts in Monetary Policy Divergence
+
+Looking ahead, the future trajectory of the USD/JPY exchange rate will depend crucially on the evolving monetary policies of the Fed and the BOJ. While the Fed is expected to maintain a relatively hawkish stance in the near term, there is increasing speculation that the BOJ may eventually shift towards a less dovish policy, potentially narrowing the interest rate differential. ([https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236](https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236)) Such a shift could lead to a strengthening of the yen against the dollar. However, the timing and magnitude of any policy changes by the BOJ remain uncertain. Furthermore, global economic conditions, geopolitical developments, and market sentiment will continue to influence the USD/JPY exchange rate. Any unexpected economic shocks or shifts in risk appetite could trigger significant volatility in the currency pair. Analysts predict that a less dovish BOJ in 2024 could benefit the yen, but the interplay of various factors will ultimately determine the future direction of the USD/JPY.
+
+
+
+## The Role of Japan's Economic Conditions and Policies
+
+### Japan's Sluggish Economic Growth and Deflationary Pressures
+
+Japan's economic performance has been characterized by sluggish growth and persistent deflationary pressures for several decades. This has significantly influenced the Bank of Japan's (BOJ) monetary policy stance, contributing to the divergence between Japanese and US interest rates. While the previous reports touched upon the BOJ's low-interest rate policy, this section delves deeper into the underlying economic rationale behind this approach. Japan's shrinking economy, driven partly by demographic factors like a declining population, has made sustained economic growth a primary policy objective. ([https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html](https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html)) The fear of deflationary spirals has led the BOJ to maintain ultra-low interest rates and implement quantitative easing programs to stimulate economic activity and boost inflation. This prolonged period of low interest rates has made the yen less attractive to investors compared to higher-yielding currencies like the US dollar, contributing to the yen's depreciation. A study examining the impact of yen appreciation on Japan's economic cycles found that an appreciation shock negatively impacts real GDP and consumption, further justifying the BOJ's cautious approach to raising interest rates. ([https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87](https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87))
+
+### The Bank of Japan's Yield Curve Control Policy and its Implications
+
+While previous reports discussed the BOJ's low-interest rate policy in general, this section specifically analyzes the implications of its Yield Curve Control (YCC) policy on the yen's value. The YCC policy, introduced in 2016, aims to keep 10-year Japanese government bond yields around zero. This policy has further suppressed Japanese interest rates, exacerbating the interest rate differential with the US and contributing to the yen's weakness. The YCC policy has several consequences. First, it limits the BOJ's flexibility in adjusting monetary policy in response to changing economic conditions. Second, it can distort market pricing and create inefficiencies in the bond market. Third, it puts downward pressure on the yen by making Japanese assets less attractive to foreign investors. ([https://www.investopedia.com/terms/y/yield-curve-control-ycc.asp](https://www.investopedia.com/terms/y/yield-curve-control-ycc.asp)) The potential for the BOJ to abandon or modify the YCC policy is a key factor influencing market expectations and the future direction of the USD/JPY exchange rate. Ending YCC could lead to a rise in Japanese bond yields, potentially attracting capital inflows and strengthening the yen. However, it could also destabilize the Japanese government bond market and negatively impact the Japanese economy.
+
+### Impact of Japan's Trade Balance on the Yen
+
+Japan's trade balance, while historically a source of strength for the yen, has become less of a supporting factor in recent years. While previous reports mentioned Japan's trade surplus, this section analyzes the evolving dynamics of its trade balance and its impact on the yen. Although Japan generally maintains a trade surplus, the size of this surplus has been declining due to factors such as rising import costs, particularly for energy. ([https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html](https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html)) A weaker yen can theoretically boost exports by making Japanese goods more competitive in international markets. However, the responsiveness of Japanese exports to exchange rate movements has been limited in recent times, potentially due to factors such as global supply chain disruptions and shifting demand patterns. The declining trade surplus reduces the demand for yen, contributing to its weakness against the dollar. The interplay between Japan's trade balance, the yen's value, and the global economic environment is a complex dynamic that requires careful analysis.
+
+### The Yen's Role as a Funding Currency and Carry Trade Dynamics
+
+The yen's historically low interest rates have made it a popular funding currency for carry trades. This aspect, while touched upon in previous reports regarding interest rate differentials, warrants a dedicated section due to its significant influence on the yen's value. Investors borrow yen at low interest rates and invest in higher-yielding assets denominated in other currencies, such as US dollars. This creates downward pressure on the yen as investors sell yen to buy the target currency. The carry trade dynamic is highly sensitive to changes in interest rate differentials and risk sentiment. When interest rate differentials widen, carry trades become more profitable, leading to increased selling pressure on the yen. Conversely, when risk aversion rises, investors may unwind their carry trades, leading to a temporary strengthening of the yen. ([https://www.investopedia.com/terms/c/carrytrade.asp](https://www.investopedia.com/terms/c/carrytrade.asp)) Understanding the role of the yen as a funding currency is crucial for analyzing the dynamics of the USD/JPY exchange rate.
+
+### Government Intervention and its Effectiveness in Influencing the Yen
+
+The Japanese government, through the Ministry of Finance, has the authority to intervene in the foreign exchange market to influence the yen's value. While one source mentioned governmental intervention triggering a surge in the yen, this section explores the effectiveness and limitations of such interventions. Government intervention typically involves buying or selling yen in the foreign exchange market. Buying yen can create upward pressure on its value, while selling yen can exert downward pressure. ([https://www.investopedia.com/terms/f/forex-intervention.asp](https://www.investopedia.com/terms/f/forex-intervention.asp)) However, the effectiveness of government intervention is often limited, especially when it goes against underlying market trends driven by factors such as interest rate differentials and economic fundamentals. Intervention can be more effective when coordinated with other central banks and when accompanied by policy changes that address the underlying causes of the yen's weakness. The frequency and scale of government intervention, as well as market expectations regarding future interventions, can significantly impact the USD/JPY exchange rate. Analyzing historical intervention episodes and their impact can provide valuable insights into the potential effectiveness of future interventions. Furthermore, understanding the political and economic context surrounding intervention decisions is crucial for assessing their potential impact. Japan's approach to managing its currency, including the role of government intervention, is a complex issue with significant implications for the global economy.
+
+
+
+## Impact of Global Economic Factors and Carry Trade
+
+### Global Economic Imbalances and Safe-Haven Flows
+
+The strength of the US dollar is not solely attributable to Japan's monetary policy. Global economic imbalances play a significant role. The US dollar often benefits from safe-haven flows during periods of global uncertainty. For instance, geopolitical tensions, economic downturns in other major economies, or global pandemics can trigger a flight to safety, increasing demand for US dollar-denominated assets ([https://www.investopedia.com/terms/s/safe-haven.asp](https://www.investopedia.com/terms/s/safe-haven.asp)). While previous reports have touched upon the yen's safe-haven status, this section focuses on the broader global context and how it influences the dollar's strength. The dollar's dominance as a reserve currency amplifies this effect, as central banks and institutional investors hold significant dollar reserves, further supporting its value during times of crisis ([https://www.imf.org/en/Publications/fandd/issues/2022/09/reserve-currencies-and-the-dominance-of-the-dollar-benassy-quere-itskhoki](https://www.imf.org/en/Publications/fandd/issues/2022/09/reserve-currencies-and-the-dominance-of-the-dollar-benassy-quere-itskhoki)). Analyzing capital flows and investor behavior during periods of global stress can shed light on the extent to which safe-haven demand contributes to dollar strength.
+
+### US Economic Performance and Growth Differentials
+
+The relative strength of the US economy compared to other major economies is another crucial factor influencing the dollar's value. Stronger economic growth in the US, relative to Japan and other developed nations, attracts foreign investment, boosting demand for the dollar ([https://www.investopedia.com/articles/forex/09/factors-influence-exchange-rates.asp](https://www.investopedia.com/articles/forex/09/factors-influence-exchange-rates.asp)). While previous reports have mentioned US economic growth, this section delves into the specifics of growth differentials and their impact on currency valuations. Factors such as higher productivity, technological innovation, and favorable demographics can contribute to stronger US economic performance, making dollar-denominated assets more attractive to investors. Examining key economic indicators, such as GDP growth rates, inflation, and employment figures, across different countries can provide insights into the influence of growth differentials on exchange rates.
+
+### Commodity Prices and the Dollar's Role
+
+The relationship between commodity prices and the US dollar is complex and multifaceted. The dollar's role as the primary currency for international trade in many commodities, including oil, creates an inverse relationship between the dollar's value and commodity prices ([https://www.investopedia.com/articles/forex/08/commodities-and-forex.asp](https://www.investopedia.com/articles/forex/08/commodities-and-forex.asp)). A stronger dollar makes commodities more expensive for buyers using other currencies, potentially dampening demand and putting downward pressure on commodity prices. Conversely, a weaker dollar can make commodities more affordable, potentially boosting demand and driving up prices. While previous reports have not explicitly addressed this relationship, this section explores the interplay between commodity markets and the dollar. Analyzing historical data on commodity prices and dollar movements can reveal the extent of this inverse correlation. Furthermore, understanding the specific supply and demand dynamics in individual commodity markets is crucial for assessing the impact of dollar fluctuations on commodity prices.
+
+### The Mechanics of the Carry Trade and its Influence on the Dollar/Yen
+
+The carry trade, where investors borrow in a low-yielding currency like the Japanese yen and invest in a higher-yielding currency like the US dollar, has been a significant factor influencing the USD/JPY exchange rate. While previous reports have mentioned the carry trade, this section provides a more detailed explanation of its mechanics and implications. The profitability of the carry trade depends on the interest rate differential between the two currencies and the stability of the exchange rate. A widening interest rate differential makes the carry trade more attractive, leading to increased demand for the higher-yielding currency (USD) and downward pressure on the lower-yielding currency (JPY). However, the carry trade carries exchange rate risk. If the lower-yielding currency appreciates significantly, the profits from the interest rate differential can be wiped out. Analyzing historical data on carry trade flows and exchange rate movements can provide insights into the impact of this strategy on currency valuations.
+
+### The Unwinding of Carry Trades and Market Volatility
+
+The unwinding of carry trades can lead to significant market volatility, particularly in the currency markets. When investors perceive a shift in interest rate differentials or an increase in exchange rate risk, they may rapidly unwind their carry trade positions, leading to a sharp reversal in currency values. While previous reports have touched upon the risks of carry trades, this section focuses specifically on the unwinding process and its impact on market stability. A sudden unwinding of carry trades can trigger a cascade effect, as margin calls force investors to liquidate positions, further exacerbating the currency movement. This volatility can spill over into other asset classes, creating broader market instability. Examining historical episodes of carry trade unwinding, such as the yen's rapid appreciation in 2024 ([https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade](https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade)), can provide valuable lessons for understanding the potential risks associated with this strategy. Furthermore, understanding the factors that can trigger a carry trade unwinding, such as changes in monetary policy, economic data releases, or geopolitical events, is crucial for managing risk in the currency markets.
+
+
+## Conclusion
+
+The rising dollar value against the Japanese yen is primarily a result of diverging monetary policies between the US and Japan. The Bank of Japan (BOJ)'s persistent low-interest rate policy and Yield Curve Control (YCC), aimed at stimulating Japan's sluggish economy and combating deflationary pressures, have created a significant interest rate differential with the US. This disparity, coupled with the US Federal Reserve's tightening monetary policy to combat inflation, has made USD-denominated assets far more attractive to global investors seeking higher returns ([https://www.investopedia.com/articles/forex/09/factors-influence-exchange-rates.asp](https://www.investopedia.com/articles/forex/09/factors-influence-exchange-rates.asp)). This has driven up demand for the dollar, consequently pushing down the value of the yen, exacerbated by the yen's role as a funding currency in carry trades ([https://www.investopedia.com/terms/c/carrytrade.asp](https://www.investopedia.com/terms/c/carrytrade.asp)). While Japan's trade balance and the yen's safe-haven status play a role, their influence has been overshadowed by the dominant impact of the interest rate differential. Government intervention has had limited effectiveness against these underlying market forces ([https://www.investopedia.com/terms/f/forex-intervention.asp](https://www.investopedia.com/terms/f/forex-intervention.asp)).
+
+The most important finding is the crucial role of the divergence in monetary policy between the BOJ and the Fed. This divergence has fueled the USD/JPY's upward trajectory, with the yen plummeting to multi-decade lows. The BOJ's commitment to YCC further complicates the situation, limiting its flexibility and potentially distorting market pricing ([https://www.investopedia.com/terms/y/yield-curve-control-ycc.asp](https://www.investopedia.com/terms/y/yield-curve-control-ycc.asp)). Furthermore, global factors like the dollar's safe-haven status, US economic performance, and the mechanics of the carry trade amplify these trends. The potential unwinding of carry trades poses a significant risk to market stability, as evidenced by the yen's rapid appreciation in 2024 ([https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade](https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade)).
+
+Looking ahead, the future trajectory of the USD/JPY hinges on the evolution of monetary policies in both countries, particularly any shift in the BOJ's stance. A move towards a less dovish policy by the BOJ, including potential adjustments to or abandonment of YCC, could strengthen the yen. However, the timing and magnitude of such changes remain uncertain. Further research should focus on analyzing potential scenarios for BOJ policy changes and their likely impact on the USD/JPY, considering factors like market sentiment, global economic conditions, and the potential for carry trade unwinding. Monitoring Japan's economic indicators, particularly inflation and export data, will be crucial for assessing the likelihood of policy adjustments. Additionally, analyzing the interplay between commodity prices and the dollar, as well as the impact of global economic imbalances, will provide a more comprehensive understanding of the USD/JPY exchange rate dynamics.
+
+
+
+## References
+
+- [https://thehill.com/opinion/5092814-dollar-weakens-2024/](https://thehill.com/opinion/5092814-dollar-weakens-2024/)
+- [https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/](https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/)
+- [https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/](https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/)
+- [https://synapsetrading.com/yen-carry-trade-unwinding/](https://synapsetrading.com/yen-carry-trade-unwinding/)
+- [https://www.realfacts.com/post/the-u-s-dollar-and-global-markets-trends-and-expectations-for-2025](https://www.realfacts.com/post/the-u-s-dollar-and-global-markets-trends-and-expectations-for-2025)
+- [https://www.fool.com/investing/2024/08/05/what-is-carry-trade-japan-rate-hike-global-selloff/](https://www.fool.com/investing/2024/08/05/what-is-carry-trade-japan-rate-hike-global-selloff/)
+- [https://www.bis.org/publ/qtrpdf/r_qt1003f.pdf](https://www.bis.org/publ/qtrpdf/r_qt1003f.pdf)
+- [https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm](https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm)
+- [https://www.weforum.org/stories/2024/08/explainer-carry-trades-and-how-they-impact-global-markets/](https://www.weforum.org/stories/2024/08/explainer-carry-trades-and-how-they-impact-global-markets/)
+- [https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade](https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade)
+- [https://www.oxfordeconomics.com/resource/japan-key-themes-2025-rising-threats-from-external-uncertainty/](https://www.oxfordeconomics.com/resource/japan-key-themes-2025-rising-threats-from-external-uncertainty/)
+- [https://www.forexgdp.com/learn/dollar-index-2025-insights/](https://www.forexgdp.com/learn/dollar-index-2025-insights/)
+- [https://www.investing.com/analysis/usdjpy-scenarios-to-watch-for-carry-trade-risks-and-potential-reversals-200655610](https://www.investing.com/analysis/usdjpy-scenarios-to-watch-for-carry-trade-risks-and-potential-reversals-200655610)
+- [https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate](https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate)
+- [https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/](https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/)
+- [https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work](https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work)
+- [https://www.investopedia.com/terms/c/currencycarrytrade.asp](https://www.investopedia.com/terms/c/currencycarrytrade.asp)
+- [https://www.forbes.com/councils/forbesbusinesscouncil/2024/09/18/reversing-the-japan-carry-trade-why-business-leaders-should-pay-attention-to-international-monetary-policy/](https://www.forbes.com/councils/forbesbusinesscouncil/2024/09/18/reversing-the-japan-carry-trade-why-business-leaders-should-pay-attention-to-international-monetary-policy/)
+- [https://www.abfjournal.com/a-primer-on-the-japan-reverse-carry-trade-and-its-global-implications/](https://www.abfjournal.com/a-primer-on-the-japan-reverse-carry-trade-and-its-global-implications/)
+- [https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade](https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade)
diff --git a/outputs/task_1737318429_What is the reason for rising dollar value a.pdf b/outputs/task_1737318429_What is the reason for rising dollar value a.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..3ee10e70110a9637dc8c613d5bc31a28e66d3891
Binary files /dev/null and b/outputs/task_1737318429_What is the reason for rising dollar value a.pdf differ
diff --git a/outputs/task_1737318429_What is the reason for rising dollar value and what role did Japan played into this.json b/outputs/task_1737318429_What is the reason for rising dollar value and what role did Japan played into this.json
new file mode 100644
index 0000000000000000000000000000000000000000..857a9cedb75e369e9e5afbf4c26577e049dd88c7
--- /dev/null
+++ b/outputs/task_1737318429_What is the reason for rising dollar value and what role did Japan played into this.json
@@ -0,0 +1,2734 @@
+{
+ "timestamp": "2025-01-20T01:57:09.910445",
+ "events": [
+ {
+ "timestamp": "2025-01-20T01:57:12.280620",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'What is the reason for rising dollar value and what role did Japan played into this?'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:12.296640",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:12.307894",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: What is the reason for rising dollar value and what role did Japan played into this?...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:16.762363",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.152430",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['Japanese yen and US dollar exchange rate trends 2022-2025', 'Impact of Japanese monetary policy on USD strength 2024-2025', 'Role of yen carry trade in recent dollar appreciation', 'Correlation between Japanese inflation and USD/JPY exchange rate', 'What is the reason for rising dollar value and what role did Japan played into this?']...",
+ "metadata": [
+ "Japanese yen and US dollar exchange rate trends 2022-2025",
+ "Impact of Japanese monetary policy on USD strength 2024-2025",
+ "Role of yen carry trade in recent dollar appreciation",
+ "Correlation between Japanese inflation and USD/JPY exchange rate",
+ "What is the reason for rising dollar value and what role did Japan played into this?"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.164338",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Japanese yen and US dollar exchange rate trends 2022-2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.175371",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of Japanese monetary policy on USD strength 2024-2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.188651",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Role of yen carry trade in recent dollar appreciation'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.199152",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Correlation between Japanese inflation and USD/JPY exchange rate'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:20.211120",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'What is the reason for rising dollar value and what role did Japan played into this?'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.097997",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.mitrade.com/insights/forex/forex-basic/japanese-yen-JPY-currency-pairs\n",
+ "metadata": "https://www.mitrade.com/insights/forex/forex-basic/japanese-yen-JPY-currency-pairs"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.122020",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.disruptionbanking.com/2024/11/29/how-strong-will-the-japanese-yen-jpy-be-in-2025/\n",
+ "metadata": "https://www.disruptionbanking.com/2024/11/29/how-strong-will-the-japanese-yen-jpy-be-in-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.131494",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nomuraconnects.com/focused-thinking-posts/japan-macro-outlook-2025/\n",
+ "metadata": "https://www.nomuraconnects.com/focused-thinking-posts/japan-macro-outlook-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.143250",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\n",
+ "metadata": "https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.154663",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\n",
+ "metadata": "https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.164100",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:23.175735",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:25.798455",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:25.810341",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 17 total images",
+ "metadata": [
+ "https://www.disruptionbanking.com//app/uploads/2025/01/top-5-banks-CEE.jpg",
+ "https://www.disruptionbanking.com//app/uploads/2025/01/Italy-Bitcoin.jpg",
+ "https://www.disruptionbanking.com//app/uploads/2025/01/Manchester-City-Revolut-Announcement.jpg",
+ "https://tw.mitrade.com/cms_uploads/uedimg/20240920/17267951624555.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:25.821429",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:25.834746",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of Japanese monetary policy on USD strength 2024-2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.238025",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.exchange-rates.org/exchange-rate-history/usd-jpy-2025-01-15\n",
+ "metadata": "https://www.exchange-rates.org/exchange-rate-history/usd-jpy-2025-01-15"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.297444",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\n",
+ "metadata": "https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.309484",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://fred.stlouisfed.org/series/DEXJPUS/\n",
+ "metadata": "https://fred.stlouisfed.org/series/DEXJPUS/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.320996",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.exchangerates.org.uk/currency-forecasts/us-dollar-to-yen-forecast\n",
+ "metadata": "https://www.exchangerates.org.uk/currency-forecasts/us-dollar-to-yen-forecast"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.331930",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:26.343583",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.442423",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.453640",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 4 total images",
+ "metadata": [
+ "https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.465770",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.478972",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Japanese yen and US dollar exchange rate trends 2022-2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.594591",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\n",
+ "metadata": "https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.607648",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.rieti.go.jp/en/columns/a01_0702.html\n",
+ "metadata": "https://www.rieti.go.jp/en/columns/a01_0702.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.619958",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.europeanbusinessreview.com/the-yen-dance-how-inflation-in-japan-shapes-usd-jpy-rate/\n",
+ "metadata": "https://www.europeanbusinessreview.com/the-yen-dance-how-inflation-in-japan-shapes-usd-jpy-rate/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.631865",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\n",
+ "metadata": "https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.644323",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\n",
+ "metadata": "https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.656621",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:27.670058",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:30.951712",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:30.972188",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 4 total images",
+ "metadata": [
+ "https://www.europeanbusinessreview.com/wp-content/uploads/2024/09/business-event-calendar-banner.jpg",
+ "https://www.europeanbusinessreview.com/wp-content/uploads/2024/02/TEE-event-calendar-banner.png",
+ "https://www.europeanbusinessreview.com/wp-content/uploads/2024/09/mba-pulse.png",
+ "https://www.europeanbusinessreview.com/wp-content/uploads/2019/03/TEBR-logo-white-1.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:30.992398",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.012589",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Correlation between Japanese inflation and USD/JPY exchange rate...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.300409",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.business-standard.com/markets/news/here-s-why-everyone-is-talking-about-japanese-yen-and-carry-trade-124080500437_1.html\n",
+ "metadata": "https://www.business-standard.com/markets/news/here-s-why-everyone-is-talking-about-japanese-yen-and-carry-trade-124080500437_1.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.317603",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\n",
+ "metadata": "https://eaglesjournal.com/understanding-japanese-yen-carry-trade/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.334754",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.gspublishing.com/content/research/en/reports/2024/08/12/33eddb23-b1e7-480d-a219-89be661edad2.html\n",
+ "metadata": "https://www.gspublishing.com/content/research/en/reports/2024/08/12/33eddb23-b1e7-480d-a219-89be661edad2.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.353731",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.wellington.com/en-us/institutional/insights/the-yen-carry-trade-unwind\n",
+ "metadata": "https://www.wellington.com/en-us/institutional/insights/the-yen-carry-trade-unwind"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.370730",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate\n",
+ "metadata": "https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.388151",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:31.405944",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:33.851713",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:33.865920",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:33.879024",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:33.893149",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Role of yen carry trade in recent dollar appreciation...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.395798",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\n",
+ "metadata": "https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.480037",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007\n",
+ "metadata": "https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.492248",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketwatch.com/story/6-reasons-for-the-japanese-yens-big-2016-rally-2016-04-12\n",
+ "metadata": "https://www.marketwatch.com/story/6-reasons-for-the-japanese-yens-big-2016-rally-2016-04-12"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.504668",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://economics.stackexchange.com/questions/6179/how-did-the-plaza-accord-revalue-the-japanese-yen\n",
+ "metadata": "https://economics.stackexchange.com/questions/6179/how-did-the-plaza-accord-revalue-the-japanese-yen"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.518566",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nippon.com/en/in-depth/d00882/\n",
+ "metadata": "https://www.nippon.com/en/in-depth/d00882/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.529577",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:34.542524",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:35.160376",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:35.172560",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 2 total images",
+ "metadata": [
+ "https://www.thebalancemoney.com/thmb/Tlq86GMkynQcQ3UGA5NnMxJeKuI=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/GettyImages-6233166631-5995dca46f53ba001029cdf6.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:35.184933",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:35.198892",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: What is the reason for rising dollar value and what role did Japan played into this?...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:46.240525",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is possible in December, which would affirm the prevailing bullish trend.MonthOpen, \u00a5Min\u2013Max, \u00a5Close,\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: analysis and market forecastsForex humor Latest Forex humour news+447520644437\u00d7\u00d7\u00d7HomeBlogAnalysts\u2019 opinionsUSDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030 and BeyondUSDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030 and BeyondAuthorJana KaneUpdated 19.12.202425Sep.202411:212024.12.19 2024.09.25 USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030 and BeyondJana Kanehttps://www.litefinance.org/blog/authors/jana-kane/The USDJPY is one of the most traded currency pairs on Forex. It is popular with both traders and investors. The pair\u2019s rate is determined by the economic situation in the US and Japan and by global financial and political developments. Most experts predict the USDJPY will grow soon.This article analyzes the USDJPY pair and expert opinions regarding its exchange rate in the next few years and beyond. These insights will help you develop a profit-yielding trading strategy.The article covers the following subjects:Highlights and Key Points:\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: US dollar by 2025, reflecting an overall upward trend.Will USDJPY go up or down?The USDJPY rate is likely to rise. According to WalletInvestor forecasts, quotes may reach \u00a5174 by the end of 2026. This bullish trend is expected to continue in 2027\u20132028.What are the key factors influencing the current USD to JPY forecast?The main factors affecting the USDJPY currency pair are divergence in the monetary policy of the US and Japan, economic indicators such as GDP growth, inflation rate, and employment, as well as global risks and geopolitical developments. Investment flows between countries also play an important role as the demand for the US dollar and yen depends on them.How does the economic climate contribute to the USD to JPY forecast for the upcoming years?Strong US economic indicators, such as GDP growth and inflation, lead to a stronger US dollar. At the same time, Japan's weak economy and low interest rates may weaken the yen. Geopolitical risks and the global economic outlook\n\nSource: https://www.litefinance.org/blog/analysts-opinions/usd-jpy-price-prediction/\nTitle: USDJPY Price Forecast & Predictions for 2025, 2026, 2027\u20132030, 2040 and Beyond | LiteFinance\nContent: to depreciate. In such an environment, it would be advantageous to open short positions on the USDJPY pair with a take-profit order set in the Target Zone of \u00a5144\u2013\u00a5146 and \u00a5140\u2013\u00a5142 if the greenback plunges significantly.Long-term consolidation. If the economic situation stabilizes, the exchange rate may consolidate within the Target Zone of \u00a5145\u2013\u00a5150.USD JPY Forecast for 2025Let's take a look at USDJPY's expert forecasts for 2025. Most analysts expect modest growth.LongForecastPrice range in 2025: \u00a5140\u2013\u00a5160 (as of 22.09.2024).LongForecast projects that the January-February 2025 period will exhibit minimal volatility, with the pair trading within a narrow range. However, robust growth is anticipated in March, persisting through June.A modest decline is projected during summer, followed by a resurgence in US dollar strength from September. In the fall, a period of accelerated growth is expected. The exchange rate will likely reach its yearly high in November. A minor correction is\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:57:52.392298",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.nippon.com/en/in-depth/d00882/\nTitle: Thinking About the Yen\u2019s True Strength | Nippon.com\nContent: This situation is readily understood by examining the movement of the yen over the last 20 years when deflationary conditions persisted in Japan. Comparing the dollar-yen rate between 2000 and 2020, we see that it was largely unchanged at around \u00a5107 to the dollar. However, the trend for prices and wages between 2000 and 2020 paints a very different picture. While prices and wages have barely changed in Japan, they have risen by about 2% per year in the United States, or by about 48% during this 20-year period.\nIn other words, even though the dollar-yen rate barely changed over a period of 20 years, prices and wages in Japan fell by nearly 50% compared to the United States. We can therefore conclude that the value and strength of the yen has diminished by nearly 50% in relative terms.\n\nSource: https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007\nTitle: Japan's Economy and Its Impact on the U.S. Economy\nContent: promote more efficient production methods. Japan was the first country to ratify the Comprehensive and Progressive Agreement for Trans-Pacific Partnership. The massive trade deal includes ten other countries from the Asia-Pacific region. They signed it after President Donald Trump pulled the United States out of the agreement. How Japan Affects the U.S. Economy On July 17, 2018, the EU signed a trade agreement with Japan, which reduces or ends tariffs on almost all goods. It came into force in 2019 after ratification. Critics say the deal will hurt U.S. auto and agricultural exporters. The Bank of Japan had been the largest foreign holder of U.S. debt until China replaced it in 2008. Both Japan and China do this to control the value of their currencies relative to the dollar. They must keep their exports competitively priced. A low yen made Japan's auto industry very competitive. That was one reason that Toyota became the No.1 automaker in the world in 2007. If Japan's central bank\n\nSource: https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007\nTitle: Japan's Economy and Its Impact on the U.S. Economy\nContent: promote more efficient production methods. Japan was the first country to ratify the Comprehensive and Progressive Agreement for Trans-Pacific Partnership. The massive trade deal includes ten other countries from the Asia-Pacific region. They signed it after President Donald Trump pulled the United States out of the agreement. How Japan Affects the U.S. Economy On July 17, 2018, the EU signed a trade agreement with Japan, which reduces or ends tariffs on almost all goods. It came into force in 2019 after ratification. Critics say the deal will hurt U.S. auto and agricultural exporters. The Bank of Japan had been the largest foreign holder of U.S. debt until China replaced it in 2008. Both Japan and China do this to control the value of their currencies relative to the dollar. They must keep their exports competitively priced. A low yen made Japan's auto industry very competitive. That was one reason that Toyota became the No.1 automaker in the world in 2007. If Japan's central bank\n\nSource: https://www.nippon.com/en/in-depth/d00882/\nTitle: Thinking About the Yen\u2019s True Strength | Nippon.com\nContent: point at the start of 2021, widened to nearly 4 points in October 2022, when the yen fell to its lowest level. In November, the dollar-yen exchange rate rate turned, and the yen began to strengthen again. As I write this article at the start of 2023, the yen is showing signs of returning to the 120\u2013130 level against the dollar. Two factors are closely related to the movements of the dollar-yen rate in October and November. The first factor is the downtrend of the interest rate in the United States. Based on the assumption that inflation has peaked, the long-term interest rate has begun to fall in the United States. The second factor is the uptrend of the yen interest rate. When the Bank of Japan announced in December 2022 that it would raise the upper limit of the trading range for the long-term rate from 0.25% to 0.5%, market participants strengthened their expectations that Japan\u2019s interest rate would begin to rise. Based on the assumption that the spread in interest rates between\n\nSource: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\nTitle: Governmental Intervention Triggers Surge of the Japanese Yen - What Does This Mean for the USD and Global Markets? - The Global Treasurer\nContent: higher-yielding assets. This delicate balance of maintaining sustainable inflation and managing currency value is a tightrope walk for Japan\u2019s economic stewards. Leave a Reply Cancel replyYour email address will not be published. Required fields are marked *Comment * Name * Email * Website Save my name, email, and website in this browser for the next time I comment. Subscribe to get your daily business insights\n\nSource: https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007\nTitle: Japan's Economy and Its Impact on the U.S. Economy\nContent: the Bank of Japan to initiate expansive monetary policies through quantitative easing.That lowered the value of the yen from $0.013 in 2012 to $0.0083 by May 2013. That's expressed in terms of the value of the dollar, which rose from 76.88 yen in 2011 to 124.27 yen by 2015. By 2019, the yen strengthened against the dollar. One dollar could only purchase 110.5 Japanese yen. Making the yen cheaper should have increased exports. Their prices drop in dollar terms, making them more competitively priced, but Japanese companies didn't increase exports as expected. Some companies didn't lower their foreign prices but pocketed the profits instead. Others had already outsourced factories to lower-cost areas, so the devaluation didn't help. Still others weren't helped because they had moved production into their markets. For example, Toyota made nearly 2 million vehicles in the United States in 2017. The devaluation hurt Japanese businesses that rely on imports. Their costs rose. It also hurt\n\nSource: https://www.nippon.com/en/in-depth/d00882/\nTitle: Thinking About the Yen\u2019s True Strength | Nippon.com\nContent: of exchange was more or less stable. This situation, however, changed dramatically when inflation accelerated in the United States in 2021. The dollar interest rate began to rise, and the spread in interest rates between Japan and the United States began to widen. This caused the dollar-yen rate to depreciate suddenly. An exchange rate that was around \u00a5113 to the dollar in January 2022 breached \u00a5150 in October. During this period, the spread in interest rates between the dollar and the yen became the focus of market participants. With few indications that inflation would quickly subside, the interest rate rose rapidly in the United States. The Bank of Japan, however, held fast and sought to maintain the upper limit of the long-term rate (the yield on 10-year Japanese government bonds) at 0.25%. As a result, the spread in interest rates between the dollar and yen, which was around 1 percentage point at the start of 2021, widened to nearly 4 points in October 2022, when the yen fell to\n\nSource: https://www.nippon.com/en/in-depth/d00882/\nTitle: Thinking About the Yen\u2019s True Strength | Nippon.com\nContent: Two factors are closely related to the movements of the dollar-yen rate in October and November. The first factor is the downtrend of the interest rate in the United States. Based on the assumption that inflation has peaked, the long-term interest rate has begun to fall in the United States. The second factor is the uptrend of the yen interest rate. When the Bank of Japan announced in December 2022 that it would raise the upper limit of the trading range for the long-term rate from 0.25% to 0.5%, market participants strengthened their expectations that Japan\u2019s interest rate would begin to rise. Based on the assumption that the spread in interest rates between the dollar and the yen would narrow, the yen changed course and began to strengthen.\n\nSource: https://www.thebalancemoney.com/japan-s-economy-recession-effect-on-u-s-and-world-3306007\nTitle: Japan's Economy and Its Impact on the U.S. Economy\nContent: power over farmers. That allowed the government to promote more efficient production methods. Japan was the first country to ratify the Comprehensive and Progressive Agreement for Trans-Pacific Partnership. The massive trade deal includes ten other countries from the Asia-Pacific region. They signed it after President Donald Trump pulled the United States out of the agreement. How Japan Affects the U.S. Economy On July 17, 2018, the EU signed a trade agreement with Japan, which reduces or ends tariffs on almost all goods. It came into force in 2019 after ratification. Critics say the deal will hurt U.S. auto and agricultural exporters. The Bank of Japan had been the largest foreign holder of U.S. debt until China replaced it in 2008. Both Japan and China do this to control the value of their currencies relative to the dollar. They must keep their exports competitively priced. A low yen made Japan's auto industry very competitive. That was one reason that Toyota became the No.1\n\nSource: https://www.nippon.com/en/in-depth/d00882/\nTitle: Thinking About the Yen\u2019s True Strength | Nippon.com\nContent: rate rose rapidly in the United States. The Bank of Japan, however, held fast and sought to maintain the upper limit of the long-term rate (the yield on 10-year Japanese government bonds) at 0.25%. As a result, the spread in interest rates between the dollar and yen, which was around 1 percentage point at the start of 2021, widened to nearly 4 points in October 2022, when the yen fell to its lowest level. In November, the dollar-yen exchange rate rate turned, and the yen began to strengthen again. As I write this article at the start of 2023, the yen is showing signs of returning to the 120\u2013130 level against the dollar. Two factors are closely related to the movements of the dollar-yen rate in October and November. The first factor is the downtrend of the interest rate in the United States. Based on the assumption that inflation has peaked, the long-term interest rate has begun to fall in the United States. The second factor is the uptrend of the yen interest rate. When the Bank of\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:00.775955",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: April 30, 2023 Source: Bloomberg At the Mercy of the USD/JPY Exchange Rate Because Japan relies so heavily on imports of food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November,\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: April 30, 2023 Source: Bloomberg At the Mercy of the USD/JPY Exchange Rate Because Japan relies so heavily on imports of food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November,\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: At the Mercy of the USD/JPY Exchange Rate Because Japan relies so heavily on imports of food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices.\nAt the Mercy of the USD/JPY Exchange Rate Because Japan relies so heavily on imports of food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices.\nAt the Mercy of the USD/JPY Exchange Rate\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n\nSource: https://www.alliancebernstein.com/us/en-us/investments/insights/investment-insights/how-the-yen-drives-japans-monetary-policy.html\nTitle: How the Yen Drives Japan's Monetary Policy | AB\nContent: food and energy, even Japan\u2019s CPI ex fresh food and energy is strongly influenced by the spillover effects of crude oil and other commodity prices. Consequently, yen-denominated commodity prices are the simplest leading indicator of inflation (Display), with yen-denominated commodity prices a function of the USD/JPY exchange rate and US dollar-denominated commodity prices. Yen-Denominated Commodity Prices Are a Leading Indicator for Inflation Refinitiv/CoreCommodity CRB Excess Return Index (JPY) vs. Core CPI ex Fresh Food and Energy Past and current analyses do not predict future results. Through April 30, 2023 Source: Bloomberg As a result, Japan\u2019s monetary policy is effectively at the mercy of its exchange rate, to which it pays extraordinarily close attention. For example, in October 2022, the USD/JPY rate reached 150, its highest level since 1990. In November, the government requested action from the BOJ. And at the BOJ\u2019s December policy meeting, the board decided to expand the\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:12.848892",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\nTitle: \r\n\tJapanese Yen set for volatile 2025 on diverging Fed-BoJ outlook, Trump wild card\r\n\nContent: if the opposition finally gets its act together and returns to power, there is room for fiscal expansion that could weaken the currency. If the government continues without an election, its budget will likely be less ambitious, thus maintaining the Yen bid. Monetary policy diverging against current trends Despite the growing impact of politics, when it comes to currency trading, the main dish remains monetary policy. As mentioned, the Fed slashed borrowing costs in 2024 while the BoJ raised them. Here is why that could flip in 2025. Fed may keep rates high on US economic strength The US economy is doing well \u2013 that is what the numbers show and what Americans report about their personal well-being. While many complain about the general state of matters and long for lower prices, they keep consuming. As of late 2024, the jobless rate is close to 4% and inflation is edging closer to the Fed's 2% target. America remains exceptional in rich-world growth, expanding the gap with other\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\nTitle: USD/JPY Fundamental 2025 Outlook Preview\nContent: 23, 2024 1:32 PM12/23/2024 6:32:00 PM Share this: By : David Scutt, Market Analyst This is an excerpt from our full 2025 USD/JPY Outlook report, one of nine detailed reports about what to expect in the coming year. US Treasury yields and Fed policy will be key drivers for USD/JPY in 2025 Diverging economic performance between the US and Japan creates backdrop for ongoing volatility. Trump\u2019s fiscal policies, including potential tax cuts and tariffs, could influence inflation expectations and the dollar. BoJ\u2019s moves, including potential rate hikes and intervention, carry sizeable reversal risks Carry trades flows vulnerable to expensive asset valuations, narrowing yield differentials Summary 2024 was a wild year for USD/JPY, with shifting interest rate dynamics and Donald Trump\u2019s re-election as US President driving significant market moves. This report explores the critical factors behind the price action, including relative economic performance, policy responses from the Fed and BoJ,\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\nTitle: USD/JPY Fundamental 2025 Outlook Preview\nContent: This report explores the critical factors behind the price action, including relative economic performance, policy responses from the Fed and BoJ, along with the importance of continued buoyancy in asset valuations. Key insights include: How correlation between USD/JPY and US Treasury yields shaped market moves, with longer-dated yields taking the lead The role of carry trade flows and Japanese monetary policy in influencing short-term yen fluctuations A technical and fundamental outlook on US Treasury yields and their implications for USD/JPY in 2025 The analysis offers a comprehensive view of USD/JPY\u2019s drivers and scenarios for potential outcomes, providing traders and investors with actionable insights for the year ahead. US exceptionalism on full display 2024 was the year when the US economy simply wouldn\u2019t quit, roaring back to life just as it seemed activity was rolling over, maintaining its streak of exceptional performance relative to other developed nations, including Japan.\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\nTitle: USD/JPY Fundamental 2025 Outlook Preview\nContent: This report explores the critical factors behind the price action, including relative economic performance, policy responses from the Fed and BoJ, along with the importance of continued buoyancy in asset valuations. Key insights include: How correlation between USD/JPY and US Treasury yields shaped market moves, with longer-dated yields taking the lead The role of carry trade flows and Japanese monetary policy in influencing short-term yen fluctuations A technical and fundamental outlook on US Treasury yields and their implications for USD/JPY in 2025 The analysis offers a comprehensive view of USD/JPY\u2019s drivers and scenarios for potential outcomes, providing traders and investors with actionable insights for the year ahead. US exceptionalism on full display 2024 was the year when the US economy simply wouldn\u2019t quit, roaring back to life just as it seemed activity was rolling over, maintaining its streak of exceptional performance relative to other developed nations, including Japan.\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\nTitle: USD/JPY Fundamental 2025 Outlook Preview\nContent: This report explores the critical factors behind the price action, including relative economic performance, policy responses from the Fed and BoJ, along with the importance of continued buoyancy in asset valuations. Key insights include: How correlation between USD/JPY and US Treasury yields shaped market moves, with longer-dated yields taking the lead The role of carry trade flows and Japanese monetary policy in influencing short-term yen fluctuations A technical and fundamental outlook on US Treasury yields and their implications for USD/JPY in 2025 The analysis offers a comprehensive view of USD/JPY\u2019s drivers and scenarios for potential outcomes, providing traders and investors with actionable insights for the year ahead. US exceptionalism on full display 2024 was the year when the US economy simply wouldn\u2019t quit, roaring back to life just as it seemed activity was rolling over, maintaining its streak of exceptional performance relative to other developed nations, including Japan.\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\nTitle: \r\n\tJapanese Yen set for volatile 2025 on diverging Fed-BoJ outlook, Trump wild card\r\n\nContent: If the government continues without an election, its budget will likely be less ambitious, thus maintaining the Yen bid. Monetary policy diverging against current trends Despite the growing impact of politics, when it comes to currency trading, the main dish remains monetary policy. As mentioned, the Fed slashed borrowing costs in 2024 while the BoJ raised them. Here is why that could flip in 2025. Fed may keep rates high on US economic strength The US economy is doing well \u2013 that is what the numbers show and what Americans report about their personal well-being. While many complain about the general state of matters and long for lower prices, they keep consuming. As of late 2024, the jobless rate is close to 4% and inflation is edging closer to the Fed's 2% target. America remains exceptional in rich-world growth, expanding the gap with other developed nations. Will this change in 2025? The economy is expected to remain robust, almost regardless of Trump's policies. Advancements in\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\nTitle: \r\n\tJapanese Yen set for volatile 2025 on diverging Fed-BoJ outlook, Trump wild card\r\n\nContent: If the government continues without an election, its budget will likely be less ambitious, thus maintaining the Yen bid. Monetary policy diverging against current trends Despite the growing impact of politics, when it comes to currency trading, the main dish remains monetary policy. As mentioned, the Fed slashed borrowing costs in 2024 while the BoJ raised them. Here is why that could flip in 2025. Fed may keep rates high on US economic strength The US economy is doing well \u2013 that is what the numbers show and what Americans report about their personal well-being. While many complain about the general state of matters and long for lower prices, they keep consuming. As of late 2024, the jobless rate is close to 4% and inflation is edging closer to the Fed's 2% target. America remains exceptional in rich-world growth, expanding the gap with other developed nations. Will this change in 2025? The economy is expected to remain robust, almost regardless of Trump's policies. Advancements in\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\nTitle: \r\n\tJapanese Yen set for volatile 2025 on diverging Fed-BoJ outlook, Trump wild card\r\n\nContent: If the government continues without an election, its budget will likely be less ambitious, thus maintaining the Yen bid. Monetary policy diverging against current trends Despite the growing impact of politics, when it comes to currency trading, the main dish remains monetary policy. As mentioned, the Fed slashed borrowing costs in 2024 while the BoJ raised them. Here is why that could flip in 2025. Fed may keep rates high on US economic strength The US economy is doing well \u2013 that is what the numbers show and what Americans report about their personal well-being. While many complain about the general state of matters and long for lower prices, they keep consuming. As of late 2024, the jobless rate is close to 4% and inflation is edging closer to the Fed's 2% target. America remains exceptional in rich-world growth, expanding the gap with other developed nations. Will this change in 2025? The economy is expected to remain robust, almost regardless of Trump's policies. Advancements in\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\nTitle: \r\n\tJapanese Yen set for volatile 2025 on diverging Fed-BoJ outlook, Trump wild card\r\n\nContent: If the government continues without an election, its budget will likely be less ambitious, thus maintaining the Yen bid. Monetary policy diverging against current trends Despite the growing impact of politics, when it comes to currency trading, the main dish remains monetary policy. As mentioned, the Fed slashed borrowing costs in 2024 while the BoJ raised them. Here is why that could flip in 2025. Fed may keep rates high on US economic strength The US economy is doing well \u2013 that is what the numbers show and what Americans report about their personal well-being. While many complain about the general state of matters and long for lower prices, they keep consuming. As of late 2024, the jobless rate is close to 4% and inflation is edging closer to the Fed's 2% target. America remains exceptional in rich-world growth, expanding the gap with other developed nations. Will this change in 2025? The economy is expected to remain robust, almost regardless of Trump's policies. Advancements in\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-fundamental-2025-outlook-preview/\nTitle: USD/JPY Fundamental 2025 Outlook Preview\nContent: the price action, including relative economic performance, policy responses from the Fed and BoJ, along with the importance of continued buoyancy in asset valuations. Key insights include: How correlation between USD/JPY and US Treasury yields shaped market moves, with longer-dated yields taking the lead The role of carry trade flows and Japanese monetary policy in influencing short-term yen fluctuations A technical and fundamental outlook on US Treasury yields and their implications for USD/JPY in 2025 The analysis offers a comprehensive view of USD/JPY\u2019s drivers and scenarios for potential outcomes, providing traders and investors with actionable insights for the year ahead. US exceptionalism on full display 2024 was the year when the US economy simply wouldn\u2019t quit, roaring back to life just as it seemed activity was rolling over, maintaining its streak of exceptional performance relative to other developed nations, including Japan. You can see the divergence in the first chart,\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:15.365046",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several factors contribute to this rapid appreciation:Bank of Japan\u2019s Monetary Policy: The Bank of Japan (BoJ) recently raised its benchmark interest rate by 15 basis points to 0.25%, the highest level since 2008. This rate hike, along with plans to halve monthly bond purchases, signals a shift from the ultra-loose monetary policy maintained for the past 17 years.Global Economic Conditions: The ongoing uncertainties in global markets, including inflationary pressures and geopolitical tensions, have driven investors towards safe-haven assets like the Yen. As\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several factors contribute to this rapid appreciation:Bank of Japan\u2019s Monetary Policy: The Bank of Japan (BoJ) recently raised its benchmark interest rate by 15 basis points to 0.25%, the highest level since 2008. This rate hike, along with plans to halve monthly bond purchases, signals a shift from the ultra-loose monetary policy maintained for the past 17 years.Global Economic Conditions: The ongoing uncertainties in global markets, including inflationary pressures and geopolitical tensions, have driven investors towards safe-haven assets like the Yen. As\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several factors contribute to this rapid appreciation:Bank of Japan\u2019s Monetary Policy: The Bank of Japan (BoJ) recently raised its benchmark interest rate by 15 basis points to 0.25%, the highest level since 2008. This rate hike, along with plans to halve monthly bond purchases, signals a shift from the ultra-loose monetary policy maintained for the past 17 years.Global Economic Conditions: The ongoing uncertainties in global markets, including inflationary pressures and geopolitical tensions, have driven investors towards safe-haven assets like the Yen. As\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several factors contribute to this rapid appreciation:Bank of Japan\u2019s Monetary Policy: The Bank of Japan (BoJ) recently raised its benchmark interest rate by 15 basis points to 0.25%, the highest level since 2008. This rate hike, along with plans to halve monthly bond purchases, signals a shift from the ultra-loose monetary policy maintained for the past 17 years.Global Economic Conditions: The ongoing uncertainties in global markets, including inflationary pressures and geopolitical tensions, have driven investors towards safe-haven assets like the Yen. As\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several factors contribute to this rapid appreciation:Bank of Japan\u2019s Monetary Policy: The Bank of Japan (BoJ) recently raised its benchmark interest rate by 15 basis points to 0.25%, the highest level since 2008. This rate hike, along with plans to halve monthly bond purchases, signals a shift from the ultra-loose monetary policy maintained for the past 17\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: The Japanese Yen (JPY), traditionally seen as a safe-haven currency, has recently appreciated significantly, creating ripples in global financial markets. This appreciation has profound implications for the carry trade strategy, which has been a staple for many investors seeking higher returns.What is a Carry Trade?A carry trade involves borrowing funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: The Japanese Yen (JPY), traditionally seen as a safe-haven currency, has recently appreciated significantly, creating ripples in global financial markets. This appreciation has profound implications for the carry trade strategy, which has been a staple for many investors seeking higher returns.What is a Carry Trade?A carry trade involves borrowing funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: The Japanese Yen (JPY), traditionally seen as a safe-haven currency, has recently appreciated significantly, creating ripples in global financial markets. This appreciation has profound implications for the carry trade strategy, which has been a staple for many investors seeking higher returns.What is a Carry Trade?A carry trade involves borrowing funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese YenOver the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. This surge has brought the Yen to mid-January highs around 145.28 per dollar. Several\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: Over the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. Analysts have noted that the Yen was significantly oversold, making it ripe for a rebound. The Japanese Yen (JPY), traditionally seen as a safe-haven currency, has recently appreciated significantly, creating ripples in global financial markets. This appreciation has profound implications for the carry trade strategy, which has been a staple for many investors seeking higher returns.What is a Carry Trade?A carry trade involves borrowing funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese\n\nSource: https://eaglesjournal.com/understanding-japanese-yen-carry-trade/\nTitle: Why The Japanese Yen 'Carry Trade' Is Making Headlines: A Deep Dive Into Recent Market Trends And Global Markets\nContent: Over the past three weeks, the Japanese Yen has appreciated by approximately 10% against the US Dollar. Analysts have noted that the Yen was significantly oversold, making it ripe for a rebound. The Japanese Yen (JPY), traditionally seen as a safe-haven currency, has recently appreciated significantly, creating ripples in global financial markets. This appreciation has profound implications for the carry trade strategy, which has been a staple for many investors seeking higher returns.What is a Carry Trade?A carry trade involves borrowing funds in a currency with low-interest rates and investing them in assets denominated in a currency with higher interest rates. This strategy aims to capture the difference in interest rates, known as the \u201ccarry,\u201d while also benefiting from exchange rate movements. The Japanese Yen has been a popular funding currency for carry trades due to Japan\u2019s historically low-interest rates and the Yen\u2019s status as a safe-haven currency.The Surge of the Japanese\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:15.383019",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.0204106",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:15.398504",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_subtopics",
+ "output": "\ud83c\udf33 Generating subtopics for 'What is the reason for rising dollar value and what role did Japan played into this?'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:18.341681",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subtopics_generated",
+ "output": "\ud83d\udcca Subtopics generated for 'What is the reason for rising dollar value and what role did Japan played into this?'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:18.355320",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_introduction",
+ "output": "\u270d\ufe0f Writing introduction for 'What is the reason for rising dollar value and what role did Japan played into this?'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:32.027536",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "introduction_written",
+ "output": "\ud83d\udcdd Introduction written for 'What is the reason for rising dollar value and what role did Japan played into this?'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:32.045905",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'The Influence of Diverging Monetary Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:32.062122",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:32.077458",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: The Influence of Diverging Monetary Policies...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:36.134976",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:39.568694",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['US dollar appreciation and Japanese monetary policy divergence', 'Impact of Bank of Japan policies on USD/JPY exchange rate', 'Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025', 'Relationship between US dollar strength and Japanese Yen weakness since 2023']...",
+ "metadata": [
+ "US dollar appreciation and Japanese monetary policy divergence",
+ "Impact of Bank of Japan policies on USD/JPY exchange rate",
+ "Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025",
+ "Relationship between US dollar strength and Japanese Yen weakness since 2023"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:39.582954",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'US dollar appreciation and Japanese monetary policy divergence'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:39.596756",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of Bank of Japan policies on USD/JPY exchange rate'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:39.612519",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:39.637750",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Relationship between US dollar strength and Japanese Yen weakness since 2023'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.487408",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\n",
+ "metadata": "https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.502199",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fitchratings.com/research/sovereigns/yen-weakness-against-dollar-to-start-reversing-slowly-in-2023-17-02-2022\n",
+ "metadata": "https://www.fitchratings.com/research/sovereigns/yen-weakness-against-dollar-to-start-reversing-slowly-in-2023-17-02-2022"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.517751",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.dir.co.jp/english/research/report/analysis/20221205_023452.pdf\n",
+ "metadata": "https://www.dir.co.jp/english/research/report/analysis/20221205_023452.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.532210",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nbcnewyork.com/news/business/money-report/why-is-the-japanese-yen-hovering-near-three-month-lows-against-the-dollar/5918115/\n",
+ "metadata": "https://www.nbcnewyork.com/news/business/money-report/why-is-the-japanese-yen-hovering-near-three-month-lows-against-the-dollar/5918115/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.549079",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reuters.com/markets/currencies/dollar-firms-yen-weakens-intervention-wary-level-2023-10-26/\n",
+ "metadata": "https://www.reuters.com/markets/currencies/dollar-firms-yen-weakens-intervention-wary-level-2023-10-26/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.562916",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:42.578720",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.352913",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.375240",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 5 total images",
+ "metadata": [
+ "https://www.forex.com/en-us/-/media/project/gain-capital/forex/heroes/landing-pages/interest-promotion/five-percent-promo/fx-5pct-interest-rate-nav-graphic.png?h=576&iar=0&w=576&hash=189BBFC93276BE8FE16807E1782C9372",
+ "https://www.forex.com/en-us/-/media/research/global/news-analysis/featured-image/2021/03/0-news-and-analysis-new-header-images-2023/japan/japan_10.jpg?h=300&iar=0&w=1170",
+ "https://www.forex.com/en-us/-/media/research/global/news-analysis/featured-image/2021/03/0-news-and-analysis-new-header-images-2023/japan/japan_02.jpg?h=300&iar=0&w=1170",
+ "https://www.forex.com/en-us/-/media/research/global/news-analysis/featured-image/banners/nanda-banner-us_flag_map_eye.jpg?h=300&iar=0&w=1170"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.389275",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.404302",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Relationship between US dollar strength and Japanese Yen weakness since 2023...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.485082",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://fxopen.com/blog/en/analytical-usd-to-jpy-predictions-in-2024-2025-and-beyond/\n",
+ "metadata": "https://fxopen.com/blog/en/analytical-usd-to-jpy-predictions-in-2024-2025-and-beyond/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.499505",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\n",
+ "metadata": "https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.514216",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.spglobal.com/marketintelligence/en/news-insights/latest-news-headlines/surging-yen-threatens-megabanks-currency-gains-amid-diverging-us-japan-policies-82916539\n",
+ "metadata": "https://www.spglobal.com/marketintelligence/en/news-insights/latest-news-headlines/surging-yen-threatens-megabanks-currency-gains-amid-diverging-us-japan-policies-82916539"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.528844",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132\n",
+ "metadata": "https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-traders-set-for-rocky-2025-on-rediverging-interest-rates-trump-and-north-korea-202412191132"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.542872",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://money.usnews.com/investing/news/articles/2024-04-15/analysis-dollars-rally-supercharged-by-diverging-us-rate-outlook\n",
+ "metadata": "https://money.usnews.com/investing/news/articles/2024-04-15/analysis-dollars-rally-supercharged-by-diverging-us-rate-outlook"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.557442",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:44.572800",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:48.894585",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:48.910787",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:48.926071",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:48.942069",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Diverging monetary policies USA Japan effect on dollar strength 2023 2024 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.153574",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://yen.jp/en/archives/816\n",
+ "metadata": "https://yen.jp/en/archives/816"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.169292",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.wellington.com/en/insights/impact-Japan-monetary-policy-shift\n",
+ "metadata": "https://www.wellington.com/en/insights/impact-Japan-monetary-policy-shift"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.189576",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\n",
+ "metadata": "https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.205610",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://yen.jp/en/archives/1332\n",
+ "metadata": "https://yen.jp/en/archives/1332"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.222187",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://talkmarkets.com/content/global-markets/the-impact-for-the-jpy-on-a-boj-rate-change?post=477981\n",
+ "metadata": "https://talkmarkets.com/content/global-markets/the-impact-for-the-jpy-on-a-boj-rate-change?post=477981"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.240092",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:49.256892",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.449599",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.465028",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 2 total images",
+ "metadata": [
+ "https://yen.jp/wp-content/uploads/2024/04/cropped-yen-yoko.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.485773",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.502142",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of Bank of Japan policies on USD/JPY exchange rate...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.694046",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.newyorkfed.org/medialibrary/media/newsevents/news/markets/2021/fxq421.pdf\n",
+ "metadata": "https://www.newyorkfed.org/medialibrary/media/newsevents/news/markets/2021/fxq421.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.708027",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4\n",
+ "metadata": "https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.723292",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\n",
+ "metadata": "https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.737565",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.traditiondata.com/news/us-dollar-appreciation-continues-against-japanese-yen/\n",
+ "metadata": "https://www.traditiondata.com/news/us-dollar-appreciation-continues-against-japanese-yen/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.753389",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.mydayfinance.com/archives/24385\n",
+ "metadata": "https://www.mydayfinance.com/archives/24385"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.768450",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:52.783564",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:55.200236",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:55.215485",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://www.traditiondata.com/wp-content/uploads/2024/05/Graphic-USDJPY-1024x535.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:55.229765",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:55.245605",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: US dollar appreciation and Japanese monetary policy divergence...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:58:57.818436",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research December 5, 2023 3:36 PM12/5/2023 8:36:00 PM Share this: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research December 5, 2023 3:36 PM12/5/2023 8:36:00 PM Share this: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research December 5, 2023 3:36 PM12/5/2023 8:36:00 PM Share this: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: / News & Analysis / USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here... By : Matt Weller CFA, CMT, Head of Market Research December 5, 2023 3:36 PM12/5/2023 8:36:00 PM Share this: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: / News & Analysis / USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here... By : Matt Weller CFA, CMT, Head of Market Research December 5, 2023 3:36 PM12/5/2023 8:36:00 PM Share this: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading into 2024. Now, the expectation is that most developed central banks will be cutting interest rates, with\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading into 2024. Now, the expectation is that most developed central banks will be cutting interest rates, with\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: By : Matt Weller CFA, CMT, Head of Market Research USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading into 2024. Now, the expectation is that most developed central banks will be cutting interest rates, with\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading into 2024. Now, the expectation is that most developed central banks will be cutting interest rates, with markets pricing in around five 2024 interest rate cuts\n\nSource: https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/\nTitle: USD/JPY Analysis: Moment of Truth for the Yen\u2019s 2023 Trend\nContent: USD/JPY Key Points The Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, Japan\u2019s interest rate spreads may narrow from both sides in 2024, setting the stage for potential downtrends in XXX/JPY pairs. USD/JPY bulls are defending support in the 147.00 area, but the pair is vulnerable to a big breakdown from here. Japanese Yen Fundamental Analysis For forex traders, the single most dominant trend of 2023 has been persistent weakness in the Japanese yen. Left behind by major central banks raising interest rates more aggressively than expected, the Bank of Japan\u2019s steadfast devotion to keeping interest rates at 0% has served as an albatross on the yen\u2019s neck all year long. However, that obvious-in-retrospect trade may be poised to reverse heading into 2024. Now, the expectation is that most developed central banks will be cutting interest rates, with markets pricing in around five 2024 interest rate cuts\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:17.034786",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: AdvertisementMacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMPNeal KimberleyPublished: 8:31am, 3 Oct 2017Updated: 10:52pm, 3 Oct 2017Last month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over. The glaring divergence in monetary policy between Japan and the United States becomes ever more apparent even as both Tokyo and Washington are both heading, if in different ways, towards stances on tax that are more fiscally expansive.AdvertisementIn Japan\u2019s case, the decision of Prime Minister Shinzo Abe to call a press stud Lower House election for October 22 has not only resulted in a reconfiguration of Japan\u2019s political landscape but also resulted in a recalibration of intentions with regard to a hike in the country\u2019s\n\nSource: https://www.scmp.com/business/companies/article/2113745/glaring-divergence-monetary-policy-between-japan-and-united\nTitle: Macroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent | South China Morning Post\nContent: MacroscopeBusinessCompaniesNeal KimberleyMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparentLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be overReading Time:3 minutesWhy you can trust SCMP\nMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent\nMacroscope | Glaring divergence in monetary policy between Japan and the United States becomes ever more apparent\nGlaring divergence in monetary policy between Japan and the United States becomes ever more apparent\nLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over\nLast month\u2019s drift lower in the value of the Japanese yen versus the US dollar may not be over\nReading Time:3 minutesWhy you can trust SCMP\nReading Time:3 minutes\nWhy you can trust SCMP\nWhy you can trust SCMP\n\nSource: https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4\nTitle: NLI Research \u2014 Weekly Economist Letter: Rapid Yen Appreciation | by Norbert Gehrke | Tokyo FinTech | Medium\nContent: USD/JPY exchange rate, with a particular focus on US economic conditions and the monetary policy divergence between the Fed and the BOJ. While the baseline scenario anticipates a gradual yen appreciation, it emphasizes the potential for significant volatility stemming from unexpected economic developments and political uncertainties in both Japan and the US.\n\nSource: https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4\nTitle: NLI Research \u2014 Weekly Economist Letter: Rapid Yen Appreciation | by Norbert Gehrke | Tokyo FinTech | Medium\nContent: USD/JPY exchange rate, with a particular focus on US economic conditions and the monetary policy divergence between the Fed and the BOJ. While the baseline scenario anticipates a gradual yen appreciation, it emphasizes the potential for significant volatility stemming from unexpected economic developments and political uncertainties in both Japan and the US.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:19.374534",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: Divergent monetary policies to continue playing a role in 2024\nDivergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among those central banks expected to cut rates.\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY.\nDivergent monetary policies to continue playing a role in 2024\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy continues to outperform and other central banks cut rates before the Fed. However, it is not against the Japanese Yen as the BoJ is not among\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: The regular correlation suggests that US yields tend to decrease when market sentiment turns negative, while yields rise alongside stock price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: The regular correlation suggests that US yields tend to decrease when market sentiment turns negative, while yields rise alongside stock price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy\n\nSource: https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236\nTitle: \r\n\tJapanese Yen set to benefit from less dovish BoJ in 2024\r\n\nContent: The regular correlation suggests that US yields tend to decrease when market sentiment turns negative, while yields rise alongside stock price increases. However, there were instances when US yields dropped despite upbeat markets, which somewhat limited the strength of USD/JPY. On the contrary, when fears of higher interest rates permeated the markets, leading stocks to decline, US yields moved higher, thereby maintaining bullish pressure on USD/JPY. Divergent monetary policies to continue playing a role in 2024 Divergent monetary policy is expected to continue playing a pivotal role in 2024, along with developments in the bond market. According to a Reuters poll conducted in December, economists anticipate the Fed keeping interest rates unchanged until at least July 2024. The majority of experts view the first rate cut as an adjustment to real interest rates rather than the start of a stimulus cycle. This could weigh on the US Dollar, but it should be limited if the US economy\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:20.276765",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: What is the impact of the Bank of Japan's monetary policy on the USD/JPY exchange rate? The Bank of Japan's monetary policy directly influences the USD/JPY pair. For example, if the BoJ were to adopt a policy of low interest rates or engage in quantitative easing, it would typically result in a weakening of the Japanese yen.\nWhat is the impact of the Bank of Japan's monetary policy on the USD/JPY exchange rate?\nThe Bank of Japan's monetary policy directly influences the USD/JPY pair. For example, if the BoJ were to adopt a policy of low interest rates or engage in quantitative easing, it would typically result in a weakening of the Japanese yen.\nHow do geopolitical tensions sway the USD/JPY exchange rate? Geopolitical tensions can trigger fluctuations in the USD/JPY exchange rate. For instance, conflicts or political instability can lead to a surge in demand for safe haven currencies like the yen, causing it to appreciate against the dollar.\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate.\nHow do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate.\nHow do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate?\nThe monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate.\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How do the Federal Reserve and the Bank of Japan shape the USD/JPY exchange rate? The monetary policies set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) have a direct bearing on the USD/JPY exchange rate. Their decisions on interest rates and other policy measures can trigger substantial shifts in the exchange rate. What is the impact of the interest rate differential between the Federal Reserve and the Bank of Japan on the USD/JPY exchange rate? The gap in policy rates between the Federal Reserve and the BoJ plays a pivotal role in determining the USD/JPY exchange rate. A higher interest rate makes a currency more appealing as it allows asset holders to earn a higher yield. What was the fallout on the USD/JPY exchange rate when the BoJ chose not to raise interest rates like other central banks? The yen plummeted to a 24-year low against the dollar in mid-2022 when the BoJ decided not to hike interest rates like other central banks. This was because Japan's central bank\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:20.294162",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.011411679999999999",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:20.311099",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'The Influence of Diverging Monetary Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:24.578690",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'The Influence of Diverging Monetary Policies'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:24.599197",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: The Influence of Diverging Monetary Policies...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:24.633555",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'The Influence of Diverging Monetary Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:57.825328",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'The Influence of Diverging Monetary Policies'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:57.852221",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'The Role of Japan's Economic Conditions and Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:57.868345",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T01:59:57.886048",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: The Role of Japan's Economic Conditions and Policies...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:02.578499",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:04.449988",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: [\"Japan's economic policies contributing to rising dollar value\", 'Impact of Japanese monetary policy on USD/JPY exchange rate', \"Correlation between Japan's economic stagnation and dollar appreciation\", \"Role of Japan's foreign economic policies in strengthening the US dollar\"]...",
+ "metadata": [
+ "Japan's economic policies contributing to rising dollar value",
+ "Impact of Japanese monetary policy on USD/JPY exchange rate",
+ "Correlation between Japan's economic stagnation and dollar appreciation",
+ "Role of Japan's foreign economic policies in strengthening the US dollar"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:04.469507",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Japan's economic policies contributing to rising dollar value'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:04.488028",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of Japanese monetary policy on USD/JPY exchange rate'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:04.504201",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Correlation between Japan's economic stagnation and dollar appreciation'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:04.522082",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Role of Japan's foreign economic policies in strengthening the US dollar'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.402783",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://yen.jp/en/archives/816\n",
+ "metadata": "https://yen.jp/en/archives/816"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.418209",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\n",
+ "metadata": "https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.435885",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://talkmarkets.com/content/global-markets/the-impact-for-the-jpy-on-a-boj-rate-change?post=477981\n",
+ "metadata": "https://talkmarkets.com/content/global-markets/the-impact-for-the-jpy-on-a-boj-rate-change?post=477981"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.451426",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://yen.jp/en/archives/1204\n",
+ "metadata": "https://yen.jp/en/archives/1204"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.467721",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\n",
+ "metadata": "https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.484816",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:07.502258",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:08.804303",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:08.824506",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 2 total images",
+ "metadata": [
+ "https://yen.jp/wp-content/uploads/2024/04/cropped-yen-yoko.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:08.841135",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:08.857289",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of Japanese monetary policy on USD/JPY exchange rate...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:08.980033",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://worldfinancialreview.com/revisiting-the-japans-economic-stagnation/\n",
+ "metadata": "https://worldfinancialreview.com/revisiting-the-japans-economic-stagnation/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.003117",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\n",
+ "metadata": "https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.022783",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://eml.berkeley.edu/~obstfeld/paper_march09.pdf\n",
+ "metadata": "https://eml.berkeley.edu/~obstfeld/paper_march09.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.040906",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://jscholarship.library.jhu.edu/server/api/core/bitstreams/885f2282-b9e8-4662-aebf-25f0b8495a88/content\n",
+ "metadata": "https://jscholarship.library.jhu.edu/server/api/core/bitstreams/885f2282-b9e8-4662-aebf-25f0b8495a88/content"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.057800",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.rieti.go.jp/en/papers/contribution/ito-hiroyuki/05.html\n",
+ "metadata": "https://www.rieti.go.jp/en/papers/contribution/ito-hiroyuki/05.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.088853",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:09.105854",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:26.727896",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:26.756904",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 14 total images",
+ "metadata": [
+ "https://www.rieti.go.jp/en/papers/contribution/ito-hiroyuki/data/05_figure_1.png",
+ "https://www.rieti.go.jp/en/papers/contribution/ito-hiroyuki/data/05_figure_3.png",
+ "https://www.rieti.go.jp/en/papers/contribution/ito-hiroyuki/data/05_figure_4.png",
+ "https://worldfinancialreview.com/wp-content/uploads/2024/02/figure-1a.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:26.772835",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:26.791784",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Correlation between Japan's economic stagnation and dollar appreciation...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.060709",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\n",
+ "metadata": "https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.077943",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nber.org/system/files/chapters/c5854/c5854.pdf\n",
+ "metadata": "https://www.nber.org/system/files/chapters/c5854/c5854.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.096289",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://academic.oup.com/edited-volume/28215/chapter/213218659\n",
+ "metadata": "https://academic.oup.com/edited-volume/28215/chapter/213218659"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.114166",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.sciencedirect.com/science/article/pii/S2667111523000129\n",
+ "metadata": "https://www.sciencedirect.com/science/article/pii/S2667111523000129"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.131631",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.ft.com/content/a1d7501b-f470-4700-8b87-7fa7e98f3b5c\n",
+ "metadata": "https://www.ft.com/content/a1d7501b-f470-4700-8b87-7fa7e98f3b5c"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.149175",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:27.167712",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.417265",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.434528",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.451757",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.473083",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Role of Japan's foreign economic policies in strengthening the US dollar...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.543527",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\n",
+ "metadata": "https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.563143",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.stlouisfed.org/on-the-economy/2024/oct/japans-consolidated-balance-sheet-challenges-monetary-policy\n",
+ "metadata": "https://www.stlouisfed.org/on-the-economy/2024/oct/japans-consolidated-balance-sheet-challenges-monetary-policy"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.581532",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\n",
+ "metadata": "https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.599460",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:28.622339",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:32.868089",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:32.888154",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:32.905411",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:32.923052",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Japan's economic policies contributing to rising dollar value...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:33.054953",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: Monetary Policies Affect USD/JPY\nMonetary Policies Affect USD/JPY\nTechnical Analysis of USD/JPY\nTechnical Analysis of USD/JPY\nFinancial Knowledge Test Center\nFinancial Knowledge Test Center\nCounterfeit Currency In Japan\nCounterfeit Currency In Japan\nLive Exchange Market Status\nLive Exchange Market Status\nKYC & AML\nKYC & AML\nFinancial Insights & News Cultural Insights Currency SpotlightDaily Rate Update Economic InsightsExchange Rate ReportsExchange Trends Expat Stories Historical PerspectivesInvestment Tips Market News Money Saving TipsRegulatory Updates Technology in Finance Travel Tips\nFinancial Insights & News\nDaily Rate Update\nDaily Rate Update\nExchange Rate Reports\nExchange Rate Reports\nMoney Saving Tips\nMoney Saving Tips\nTechnology in Finance\nTechnology in Finance\nRatesCalculator LocationsNews Chart Markets\nRatesCalculator LocationsNews Chart Markets\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\nHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How does the U.S. Federal Reserve's monetary policy shape the USD/JPY exchange rate?\nThe monetary policy set by the U.S. Federal Reserve has a direct bearing on the USD/JPY pair. For instance, if the federal funds rate were to rise from near zero to 2% while the BoJ's policy rate remained near zero, the dollar would likely gain strength against the yen.\nWhat is the impact of the Bank of Japan's monetary policy on the USD/JPY exchange rate? The Bank of Japan's monetary policy directly influences the USD/JPY pair. For example, if the BoJ were to adopt a policy of low interest rates or engage in quantitative easing, it would typically result in a weakening of the Japanese yen.\n\nSource: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\nTitle: Key Factors Influencing JPY to USD Exchange Rate Dynamics - Accounting Insights\nContent: Another influential factor is interest rate differentials between Japan and the United States. When the US Federal Reserve raises interest rates, it often leads to a stronger USD as higher returns attract foreign capital. On the other hand, if the Bank of Japan maintains lower interest rates, the JPY may weaken as investors seek higher yields elsewhere. This dynamic interplay between interest rates can create significant volatility in the exchange rate.\nMarket speculation also plays a crucial role. Traders and financial institutions often engage in speculative activities based on anticipated movements in the exchange rate. These speculations can be driven by a variety of factors, including economic data releases, political events, and even natural disasters. For instance, an unexpected economic downturn in Japan might lead speculators to sell off yen, causing its value to drop against the dollar.\nImpact of Monetary Policies\n\nSource: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\nTitle: Key Factors Influencing JPY to USD Exchange Rate Dynamics - Accounting Insights\nContent: rates and quantitative easing. These measures are designed to increase liquidity in the market, encouraging borrowing and spending. However, such policies can also lead to a depreciation of the JPY, as lower interest rates make yen-denominated assets less attractive to investors seeking higher returns. The divergence in monetary policy between the two central banks can create a significant impact on the exchange rate. For example, during periods when the Federal Reserve is tightening its monetary policy while the BoJ is easing, the USD typically strengthens against the JPY. This divergence can be observed in the market through the widening of interest rate differentials, which traders closely monitor to make informed decisions. In addition to interest rate policies, other monetary tools such as open market operations and forward guidance also play a role. The Federal Reserve\u2019s use of open market operations to control the money supply can influence the USD\u2019s value. Similarly, the BoJ\u2019s\n\nSource: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\nTitle: Key Factors Influencing JPY to USD Exchange Rate Dynamics - Accounting Insights\nContent: rates and quantitative easing. These measures are designed to increase liquidity in the market, encouraging borrowing and spending. However, such policies can also lead to a depreciation of the JPY, as lower interest rates make yen-denominated assets less attractive to investors seeking higher returns. The divergence in monetary policy between the two central banks can create a significant impact on the exchange rate. For example, during periods when the Federal Reserve is tightening its monetary policy while the BoJ is easing, the USD typically strengthens against the JPY. This divergence can be observed in the market through the widening of interest rate differentials, which traders closely monitor to make informed decisions. In addition to interest rate policies, other monetary tools such as open market operations and forward guidance also play a role. The Federal Reserve\u2019s use of open market operations to control the money supply can influence the USD\u2019s value. Similarly, the BoJ\u2019s\n\nSource: https://accountinginsights.org/key-factors-influencing-jpy-to-usd-exchange-rate-dynamics/\nTitle: Key Factors Influencing JPY to USD Exchange Rate Dynamics - Accounting Insights\nContent: Impact of Monetary Policies\nMonetary policies enacted by central banks are among the most influential factors affecting the JPY to USD exchange rate. The Federal Reserve and the Bank of Japan (BoJ) wield significant power through their monetary policy decisions, which can either bolster or weaken their respective currencies. For instance, when the Federal Reserve adopts a hawkish stance, raising interest rates to combat inflation, it often results in a stronger USD. This is because higher interest rates offer better returns on investments denominated in dollars, attracting foreign capital and increasing demand for the currency.\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: What is the impact of the Bank of Japan's monetary policy on the USD/JPY exchange rate? The Bank of Japan's monetary policy directly influences the USD/JPY pair. For example, if the BoJ were to adopt a policy of low interest rates or engage in quantitative easing, it would typically result in a weakening of the Japanese yen.\nWhat is the impact of the Bank of Japan's monetary policy on the USD/JPY exchange rate?\nThe Bank of Japan's monetary policy directly influences the USD/JPY pair. For example, if the BoJ were to adopt a policy of low interest rates or engage in quantitative easing, it would typically result in a weakening of the Japanese yen.\nHow do geopolitical tensions sway the USD/JPY exchange rate? Geopolitical tensions can trigger fluctuations in the USD/JPY exchange rate. For instance, conflicts or political instability can lead to a surge in demand for safe haven currencies like the yen, causing it to appreciate against the dollar.\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: How does the U.S. Federal Reserve's monetary policy shape the USD/JPY exchange rate? The monetary policy set by the U.S. Federal Reserve has a direct bearing on the USD/JPY pair. For instance, if the federal funds rate were to rise from near zero to 2% while the BoJ's policy rate remained near zero, the dollar would likely gain strength against the yen.\nHow does the U.S. Federal Reserve's monetary policy shape the USD/JPY exchange rate? The monetary policy set by the U.S. Federal Reserve has a direct bearing on the USD/JPY pair. For instance, if the federal funds rate were to rise from near zero to 2% while the BoJ's policy rate remained near zero, the dollar would likely gain strength against the yen.\nHow does the U.S. Federal Reserve's monetary policy shape the USD/JPY exchange rate?\n\nSource: https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY\nTitle: \r\n\tHow Monetary Policies Affect the USD/JPY | JapanChangeMoney.com\r\n\nContent: Monetary Policies Wield Considerable Influence on the USD/JPY Exchange Rate. Interest Rate Differential The disparity in interest rates set by the Federal Reserve (Fed) and the Bank of Japan (BoJ) is a pivotal determinant of the USD/JPY exchange rate. A currency becomes more enticing when its interest rates are high as it allows those who own assets in that currency to garner a higher yield. BoJ's Influence In mid-2022, the yen plunged to a 24-year low against the dollar when the BoJ chose not to hike interest rates like other central banks. This was attributed to the Japanese central bank and government's perception of deflation as a more imminent threat than near-term inflation. Safe Haven Currency Amid deflation, Japan's persistently low domestic interest rates have transformed the yen into a safe haven currency. This means that its value tends to surge during periods of market upheaval, causing the yen to appreciate against the dollar. Market Stress During periods of market\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:36.335856",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.ft.com/content/a1d7501b-f470-4700-8b87-7fa7e98f3b5c\nTitle: Subscribe to read\nContent: China and Japan push back against dollar strengthSubscribe to unlock this articleLimited time offerSave 50% on Digital EditionRs1950 for your first 6 months. Then Rs1950 per 3 months. The new FT Digital Edition: today's FT, cover to cover on any device. This subscription does not include access to ft.com or the FT App.Save 50%What\u2019s includedFT Digital EditionGlobal news & analysisExpert opinionSpecial featuresExclusive FT analysis\nChina and Japan push back against dollar strengthSubscribe to unlock this articleLimited time offerSave 50% on Digital EditionRs1950 for your first 6 months. Then Rs1950 per 3 months. The new FT Digital Edition: today's FT, cover to cover on any device. This subscription does not include access to ft.com or the FT App.Save 50%What\u2019s includedFT Digital EditionGlobal news & analysisExpert opinionSpecial featuresExclusive FT analysis\n\nSource: https://www.ft.com/content/a1d7501b-f470-4700-8b87-7fa7e98f3b5c\nTitle: Subscribe to read\nContent: China and Japan push back against dollar strengthSubscribe to unlock this articleLimited time offerSave 50% on Digital EditionRs1950 for your first 6 months. Then Rs1950 per 3 months. The new FT Digital Edition: today's FT, cover to cover on any device. This subscription does not include access to ft.com or the FT App.Save 50%What\u2019s includedFT Digital EditionGlobal news & analysisExpert opinionSpecial featuresExclusive FT analysis\nChina and Japan push back against dollar strength\nChina and Japan push back against dollar strength\nChina and Japan push back against dollar strength\nSubscribe to unlock this article\nSubscribe to unlock this article\nSubscribe to unlock this article\nSubscribe to unlock this article\nSubscribe to unlock this article\nLimited time offerSave 50% on Digital EditionRs1950 for your first 6 months. Then Rs1950 per 3 months. The new FT Digital Edition: today's FT, cover to cover on any device. This subscription does not include access to ft.com or the FT App.\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: since World War II, owing to China\u2019s pressure in the East China Sea, tensions in the Taiwan Strait, and increased provocations from North Korea. Therefore, these threats have made increased partnership between Japan and the United States even more important than ever. In a meeting with U.S. President Joe Biden in May, Japan\u2019s Prime Minister Kishida Fumio put forth his commitment to reinforce Japan\u2019s defense capabilities and increase the defense budget to cope with the worsening regional security environment. Japan and the U.S. aim at making their economies more competitive and resilient by countering the threats to economic security and to the rules-based international economic order. The U.S.-Japan Economic Policy Consultative Committee can perhaps prove to be a solution maker for promoting economic growth and also for addressing threats to the global economic order to enhance economic security. Japan-U.S. ties can also focus on engaging multilaterally, such as with Australia or\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: since World War II, owing to China\u2019s pressure in the East China Sea, tensions in the Taiwan Strait, and increased provocations from North Korea. Therefore, these threats have made increased partnership between Japan and the United States even more important than ever. In a meeting with U.S. President Joe Biden in May, Japan\u2019s Prime Minister Kishida Fumio put forth his commitment to reinforce Japan\u2019s defense capabilities and increase the defense budget to cope with the worsening regional security environment. Japan and the U.S. aim at making their economies more competitive and resilient by countering the threats to economic security and to the rules-based international economic order. The U.S.-Japan Economic Policy Consultative Committee can perhaps prove to be a solution maker for promoting economic growth and also for addressing threats to the global economic order to enhance economic security. Japan-U.S. ties can also focus on engaging multilaterally, such as with Australia or\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: since World War II, owing to China\u2019s pressure in the East China Sea, tensions in the Taiwan Strait, and increased provocations from North Korea. Therefore, these threats have made increased partnership between Japan and the United States even more important than ever. In a meeting with U.S. President Joe Biden in May, Japan\u2019s Prime Minister Kishida Fumio put forth his commitment to reinforce Japan\u2019s defense capabilities and increase the defense budget to cope with the worsening regional security environment. Japan and the U.S. aim at making their economies more competitive and resilient by countering the threats to economic security and to the rules-based international economic order. The U.S.-Japan Economic Policy Consultative Committee can perhaps prove to be a solution maker for promoting economic growth and also for addressing threats to the global economic order to enhance economic security. Japan-U.S. ties can also focus on engaging multilaterally, such as with Australia or\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: since World War II, owing to China\u2019s pressure in the East China Sea, tensions in the Taiwan Strait, and increased provocations from North Korea. Therefore, these threats have made increased partnership between Japan and the United States even more important than ever. In a meeting with U.S. President Joe Biden in May, Japan\u2019s Prime Minister Kishida Fumio put forth his commitment to reinforce Japan\u2019s defense capabilities and increase the defense budget to cope with the worsening regional security environment. Japan and the U.S. aim at making their economies more competitive and resilient by countering the threats to economic security and to the rules-based international economic order. The U.S.-Japan Economic Policy Consultative Committee can perhaps prove to be a solution maker for promoting economic growth and also for addressing threats to the global economic order to enhance economic security. Japan-U.S. ties can also focus on engaging multilaterally, such as with Australia or\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: Tokyo Report | Economy | East Asia The Japan-US Alliance Embraces the Economic The advent of the new Economic 2+2 shows how closely economic issues are intertwined with security concerns. By Simran Walia August 12, 2022 Secretary of State Antony J. Blinken and Secretary of Commerce Gina Raimondo co-host the inaugural ministerial meeting of the U.S.-Japan Economic Policy Consultative Committee with Japanese Foreign Minister Hayashi Yoshimasa and Japanese Minister of Economy, Trade, and Industry Haguida Koichi at the U.S. Department of State in Washington, D.C., on July 29, 2022. Credit: U.S. State Department Photo by Ron Przysucha Subscribe for ads-free reading The Japan-U.S. alliance is one of the cornerstones of Japan\u2019s foreign policy and the Indo-Pacific region and has evolved its nature with the changing times in the international arena. The alliance, once purely security-oriented and focused on mutual defense, has over time come to include regional security and prosperity. The\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: Tokyo Report | Economy | East Asia The Japan-US Alliance Embraces the Economic The advent of the new Economic 2+2 shows how closely economic issues are intertwined with security concerns. By Simran Walia August 12, 2022 Secretary of State Antony J. Blinken and Secretary of Commerce Gina Raimondo co-host the inaugural ministerial meeting of the U.S.-Japan Economic Policy Consultative Committee with Japanese Foreign Minister Hayashi Yoshimasa and Japanese Minister of Economy, Trade, and Industry Haguida Koichi at the U.S. Department of State in Washington, D.C., on July 29, 2022. Credit: U.S. State Department Photo by Ron Przysucha Subscribe for ads-free reading The Japan-U.S. alliance is one of the cornerstones of Japan\u2019s foreign policy and the Indo-Pacific region and has evolved its nature with the changing times in the international arena. The alliance, once purely security-oriented and focused on mutual defense, has over time come to include regional security and prosperity. The\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: Credit: U.S. State Department Photo by Ron Przysucha\nThe Japan-U.S. alliance is one of the cornerstones of Japan\u2019s foreign policy and the Indo-Pacific region and has evolved its nature with the changing times in the international arena. The alliance, once purely security-oriented and focused on mutual defense, has over time come to include regional security and prosperity.\nThe U.S.-Japan Economic Policy Consultative Committee (the Economic 2+2), which held its first meeting on July 29, was one such step toward deepening the alliance between the two nations. This meeting was a timely one, as both countries are struggling with the reality that economic security is a vital part of national security.\n\nSource: https://thediplomat.com/2022/08/the-japan-us-alliance-embraces-the-economic/\nTitle: The Japan-US Alliance Embraces the Economic \u2013 The Diplomat\nContent: work together to defend the rules-based economic order, one in which all countries can participate, compete and prosper,\u201d Blinken said. According to Japanese officials, the regional security environment is the worst seen since World War II, owing to China\u2019s pressure in the East China Sea, tensions in the Taiwan Strait, and increased provocations from North Korea. Therefore, these threats have made increased partnership between Japan and the United States even more important than ever. In a meeting with U.S. President Joe Biden in May, Japan\u2019s Prime Minister Kishida Fumio put forth his commitment to reinforce Japan\u2019s defense capabilities and increase the defense budget to cope with the worsening regional security environment. Japan and the U.S. aim at making their economies more competitive and resilient by countering the threats to economic security and to the rules-based international economic order. The U.S.-Japan Economic Policy Consultative Committee can perhaps prove to be a\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:41.873580",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\nTitle: Governmental Intervention Triggers Surge of the Japanese Yen - What Does This Mean for the USD and Global Markets? - The Global Treasurer\nContent: higher-yielding assets. This delicate balance of maintaining sustainable inflation and managing currency value is a tightrope walk for Japan\u2019s economic stewards. Leave a Reply Cancel replyYour email address will not be published. Required fields are marked *Comment * Name * Email * Website Save my name, email, and website in this browser for the next time I comment. Subscribe to get your daily business insights\n\nSource: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\nTitle: Governmental Intervention Triggers Surge of the Japanese Yen - What Does This Mean for the USD and Global Markets? - The Global Treasurer\nContent: higher US interest rates, driving capital flows out of yen and into higher-yielding assets. This delicate balance of maintaining sustainable inflation and managing currency value is a tightrope walk for Japan\u2019s economic stewards. Leave a Reply Cancel replyYour email address will not be published. Required fields are marked *Comment * Name * Email * Website Save my name, email, and website in this browser for the next time I comment. Subscribe to get your daily business insights Get the latest analysis and reports delivered to your inbox daily Get the latest analysis and reports delivered to your inbox daily Sign up Whitepapers & Resources Banking 2021 Transaction Banking Services Survey CGI Transaction Banking Survey 2020 Payments TIS Sanction Screening Survey Report Payments Enhancing your strategic position: Digitalization in Treasury Netting: An Immersive Guide to Global Reconciliation\n\nSource: https://www.theglobaltreasurer.com/2024/05/03/understanding-the-japanese-yen-dynamics-and-its-relationships-to-the-dollar/\nTitle: Governmental Intervention Triggers Surge of the Japanese Yen - What Does This Mean for the USD and Global Markets? - The Global Treasurer\nContent: competitive abroad, poses challenges for policymakers. It increases import costs, contributing to inflationary pressures and squeezing household budgets. The Bank of Japan\u2019s (BOJ) monetary policy, particularly its stance on interest rates, plays a crucial role in shaping the yen\u2019s trajectory. The BOJ\u2019s reluctance to raise rates rapidly, due to concerns over destabilizing the heavily indebted government and economy, contrasts with higher US interest rates, driving capital flows out of yen and into higher-yielding assets. This delicate balance of maintaining sustainable inflation and managing currency value is a tightrope walk for Japan\u2019s economic stewards. Leave a Reply Cancel replyYour email address will not be published. Required fields are marked *Comment * Name * Email * Website Save my name, email, and website in this browser for the next time I comment. Subscribe to get your daily business insights Get the latest analysis and reports delivered to your inbox daily Get the latest\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: Figure 8: The U.S. has been growing, Europe stagnating, Japan shrinking\nAs such, ending yield curve control might boost JPY for two reasons: It would allow the BoJ to shrink its balance sheet more quickly. A steeper yield curve could boost economic growth by encouraging bank lending and thereby draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD.\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: The idea that yield curve control and negative rates are burdening Japan\u2019s economy is supported to some extent by Japan\u2019s GDP data. While the U.S. economy has been slowly expanding and Europe\u2019s has been stagnating, Japan\u2019s economy has been shrinking (Figure 8). The shrinkage is driven in part by demographics: it\u2019s population growth is negative, whereas population growth in the U.S. remains positive. This has probably weighed on the yen as currency investors tend to prefer to be \u201clong\u201d in currencies with expanding economies.\nFigure 8: The U.S. has been growing, Europe stagnating, Japan shrinking\nFigure 8: The U.S. has been growing, Europe stagnating, Japan shrinking\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD. The trade balance When it comes to trade balances, the U.S. runs consistent capital account surpluses and current account deficits owing largely to the USD\u2019s position as the global reserve currency. As such, because the U.S. runs trade deficits, the rest of the world tends to run trade surpluses and Japan is no exception. The relative size of the U.S. deficits and Japanese surpluses do, however, vary over time. Recently, U.S. deficits have shrunk modestly while the size of Japan\u2019s surplus has come down significantly (Figure 9). The relative size of Japan\u2019s surplus to the U.S. trade deficit has also been a major driver \u2013 and sometimes a leading indicator \u2013 of movements in JPYUSD (Figure 10). Figure 9: Japan\u2019s trade surplus\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD. The trade balance When it comes to trade balances, the U.S. runs consistent capital account surpluses and current account deficits owing largely to the USD\u2019s position as the global reserve currency. As such, because the U.S. runs trade deficits, the rest of the world tends to run trade surpluses and Japan is no exception. The relative size of the U.S. deficits and Japanese surpluses do, however, vary over time. Recently, U.S. deficits have shrunk modestly while the size of Japan\u2019s surplus has come down significantly (Figure 9). The relative size of Japan\u2019s surplus to the U.S. trade deficit has also been a major driver \u2013 and sometimes a leading indicator \u2013 of movements in JPYUSD (Figure 10). Figure 9: Japan\u2019s trade surplus\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD. The trade balance When it comes to trade balances, the U.S. runs consistent capital account surpluses and current account deficits owing largely to the USD\u2019s position as the global reserve currency. As such, because the U.S. runs trade deficits, the rest of the world tends to run trade surpluses and Japan is no exception. The relative size of the U.S. deficits and Japanese surpluses do, however, vary over time. Recently, U.S. deficits have shrunk modestly while the size of Japan\u2019s surplus has come down significantly (Figure 9). The relative size of Japan\u2019s surplus to the U.S. trade deficit has also been a major driver \u2013 and sometimes a leading indicator \u2013 of movements in JPYUSD (Figure 10). Figure 9: Japan\u2019s trade surplus\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD. The trade balance When it comes to trade balances, the U.S. runs consistent capital account surpluses and current account deficits owing largely to the USD\u2019s position as the global reserve currency. As such, because the U.S. runs trade deficits, the rest of the world tends to run trade surpluses and Japan is no exception. The relative size of the U.S. deficits and Japanese surpluses do, however, vary over time. Recently, U.S. deficits have shrunk modestly while the size of Japan\u2019s surplus has come down significantly (Figure 9). The relative size of Japan\u2019s surplus to the U.S. trade deficit has also been a major driver \u2013 and sometimes a leading indicator \u2013 of movements in JPYUSD (Figure 10). Figure 9: Japan\u2019s trade surplus\n\nSource: https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html\nTitle: Four Factors That Impact Yen-Dollar Exchange Rate - CME Group\nContent: draw more capital into the yen. At some point the Fed\u2019s tightening might produce a slowdown in the U.S. while the weaker JPY might boost Japanese growth. If that happens, the growth gap could move in the opposite direction to the benefit of the JPY relative to USD. The trade balance When it comes to trade balances, the U.S. runs consistent capital account surpluses and current account deficits owing largely to the USD\u2019s position as the global reserve currency. As such, because the U.S. runs trade deficits, the rest of the world tends to run trade surpluses and Japan is no exception. The relative size of the U.S. deficits and Japanese surpluses do, however, vary over time. Recently, U.S. deficits have shrunk modestly while the size of Japan\u2019s surplus has come down significantly (Figure 9). The relative size of Japan\u2019s surplus to the U.S. trade deficit has also been a major driver \u2013 and sometimes a leading indicator \u2013 of movements in JPYUSD (Figure 10). Figure 9: Japan\u2019s trade surplus\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:55.323204",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the long term, the overvaluation puzzlingly\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the long term, the overvaluation puzzlingly\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: their long-run relationships through cointegration tests. The test period is from 1991Q2 to 2007Q2 between Japan\u2019s asset bubble collapse and the Global Financial Crisis, minimizing potential distortion by structural breaks. This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: their long-run relationships through cointegration tests. The test period is from 1991Q2 to 2007Q2 between Japan\u2019s asset bubble collapse and the Global Financial Crisis, minimizing potential distortion by structural breaks. This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: their long-run relationships through cointegration tests. The test period is from 1991Q2 to 2007Q2 between Japan\u2019s asset bubble collapse and the Global Financial Crisis, minimizing potential distortion by structural breaks. This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: their long-run relationships through cointegration tests. The test period is from 1991Q2 to 2007Q2 between Japan\u2019s asset bubble collapse and the Global Financial Crisis, minimizing potential distortion by structural breaks. This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the long term, the overvaluation puzzlingly correlates with a rise in housing prices despite the connection between overvaluation and a wage decline.\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the long term, the overvaluation puzzlingly correlates with a rise in housing prices despite the connection between overvaluation and a wage decline.\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: potential distortion by structural breaks. This study finds that the USDJPY exchange rate does not function perfectly as a shock absorber. An appreciation shock has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the\n\nSource: https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87\nTitle: The Dollar-Yen Exchange Rate: Appreciation Impact on Japan's Economic Cycles and Long-run Equilibrium between Its Deviation from Purchasing Power Parity and the Economic Performance\nContent: has negatively and persistently impacted real GDP and consumption. At a minimum, the one-time shock of appreciation by 4 percent reduces real GDP by 0.2 percentage points. Given the yen\u2019s historical swing, the adverse impact of the yen's appreciation could be significant relative to Japan\u2019s sluggish growth (average annual growth after 1991Q2 is 0.7 percent). Unit labor costs are sticky in the short term and negatively cointegrated with the deviations from PPP in the long term. Given Japan\u2019s low productivity over the decades, the result implies the correlation between the yen\u2019s overvaluation and a wage cut. Exports and imports are irresponsive to an appreciation shock. Share prices demonstrate a persistent and positive response, implying the price rigidity of listed companies. Housing prices also show price stickiness. In the long term, the overvaluation puzzlingly correlates with a rise in housing prices despite the connection between overvaluation and a wage\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:55.344901",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.014051240000000003",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:00:55.364052",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'The Role of Japan's Economic Conditions and Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:01.579865",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'The Role of Japan's Economic Conditions and Policies'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:01.601174",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: The Role of Japan's Economic Conditions and Policies...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:02.604952",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "relevant_contents_context",
+ "output": "\ud83d\udcc3 Title: The Bank of Japan's Low-Interest Rate Policy and its Impact on the Yen\nContent: The Bank of Japan (BOJ)'s persistent commitment to maintaining ultra-low interest rates has played a significant role in the weakening of the Japanese yen against the US dollar. This policy, aimed at stimulating economic growth and combating deflation, has created a substantial interest rate differential between Japan and other major economies, particularly the United States. This disparity makes USD-denominated assets more attractive to investors seeking higher returns, driving up demand for the dollar and consequently pushing down the value of the yen. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY) The BOJ's quantitative easing programs, which involve purchasing government bonds and other assets, further contribute to the yen's depreciation by increasing the money supply. (https://www.forex.com/en-us/news-and-analysis/usdjpy-analysis-moment-of-truth-for-the-yens-2023-trend/) This divergence in monetary policy has been a key driver of the yen's\n\nTitle: The US Federal Reserve's Tightening Monetary Policy and its Effect on the Dollar\nContent: If the market expects the Fed to continue raising rates, the dollar tends to strengthen further. (https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4)\n\nTitle: Impact of Interest Rate Differentials on USD/JPY Exchange Rate Dynamics\nContent: The widening gap between US and Japanese interest rates has been a primary driver of the USD/JPY exchange rate's upward trajectory. This differential creates a carry trade opportunity, where investors borrow in a low-interest-rate currency (JPY) and invest in a higher-interest-rate currency (USD), profiting from the difference in yields. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY) This dynamic fuels demand for the dollar and puts downward pressure on the yen. The larger the interest rate differential, the greater the incentive for carry trades and the stronger the upward pressure on USD/JPY. Market expectations regarding future interest rate movements also influence the exchange rate. If the market anticipates a further widening of the interest rate differential, the USD/JPY is likely to continue appreciating. Conversely, if the market expects the differential to narrow, the USD/JPY may depreciate.\n\nTitle: Market Sentiment and Risk Aversion: Influence on Yen's Safe-Haven Status\nContent: The Japanese yen has traditionally been considered a safe-haven currency, meaning that investors tend to flock to it during times of economic uncertainty or geopolitical turmoil. However, the BOJ's ultra-loose monetary policy has somewhat diminished the yen's safe-haven appeal in recent years. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY) While geopolitical tensions can still trigger a temporary flight to safety and strengthen the yen, the underlying interest rate differential with the US often limits the extent of such movements. Market sentiment and risk appetite also play a significant role. During periods of heightened risk aversion, the yen may appreciate against the dollar, even in the face of a large interest rate differential. Conversely, when market sentiment is positive and risk appetite is high, the yen may weaken further as investors chase higher returns in other currencies.\n\nTitle: The US Federal Reserve's Tightening Monetary Policy and its Effect on the Dollar\nContent: In contrast to the BOJ's accommodative stance, the US Federal Reserve (Fed) has pursued a tighter monetary policy characterized by interest rate hikes and a reduction of its balance sheet. This policy response to rising inflation in the US has made dollar-denominated assets even more appealing to global investors, further exacerbating the interest rate differential between the US and Japan. (https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236) As the Fed raises interest rates, the yield on US Treasury bonds increases, attracting capital flows into the US and strengthening the dollar. This tightening cycle has contributed significantly to the dollar's appreciation against major currencies, including the yen. The market's anticipation of future Fed policy decisions also plays a crucial role. If the market expects the Fed to continue raising rates, the dollar tends to strengthen further.\n\nTitle: Future Outlook and Potential Shifts in Monetary Policy Divergence\nContent: significant volatility in the currency pair. Analysts predict that a less dovish BOJ in 2024 could benefit the yen, but the interplay of various factors will ultimately determine the future direction of the USD/JPY.\n\nTitle: Future Outlook and Potential Shifts in Monetary Policy Divergence\nContent: Looking ahead, the future trajectory of the USD/JPY exchange rate will depend crucially on the evolving monetary policies of the Fed and the BOJ. While the Fed is expected to maintain a relatively hawkish stance in the near term, there is increasing speculation that the BOJ may eventually shift towards a less dovish policy, potentially narrowing the interest rate differential. (https://www.fxstreet.com/analysis/usd-jpy-price-annual-forecast-less-divergence-in-monetary-policy-should-benefit-the-yen-202312191236) Such a shift could lead to a strengthening of the yen against the dollar. However, the timing and magnitude of any policy changes by the BOJ remain uncertain. Furthermore, global economic conditions, geopolitical developments, and market sentiment will continue to influence the USD/JPY exchange rate. Any unexpected economic shocks or shifts in risk appetite could trigger significant volatility in the currency pair. Analysts predict that a less dovish BOJ in 2024 could\n\nTitle: The Bank of Japan's Low-Interest Rate Policy and its Impact on the Yen\nContent: This divergence in monetary policy has been a key driver of the yen's decline throughout 2023 and prior years. The yen plummeted to a 24-year low against the dollar in mid-2022, highlighting the impact of this policy difference. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY)\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:02.649833",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'The Role of Japan's Economic Conditions and Policies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:35.713094",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'The Role of Japan's Economic Conditions and Policies'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:35.741741",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Impact of Global Economic Factors and Carry Trade'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:35.761381",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:35.780389",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Impact of Global Economic Factors and Carry Trade...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:40.588038",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:43.622529",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['rising dollar value 2025 global economic factors', 'Japan reverse carry trade impact on dollar 2025', 'yen carry trade unwinding effect on dollar appreciation', 'impact of global carry trades on USD exchange rates']...",
+ "metadata": [
+ "rising dollar value 2025 global economic factors",
+ "Japan reverse carry trade impact on dollar 2025",
+ "yen carry trade unwinding effect on dollar appreciation",
+ "impact of global carry trades on USD exchange rates"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:43.650866",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'rising dollar value 2025 global economic factors'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:43.670803",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Japan reverse carry trade impact on dollar 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:43.690765",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'yen carry trade unwinding effect on dollar appreciation'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:43.709931",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'impact of global carry trades on USD exchange rates'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.528166",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.bis.org/publ/qtrpdf/r_qt1003f.pdf\n",
+ "metadata": "https://www.bis.org/publ/qtrpdf/r_qt1003f.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.548410",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.weforum.org/stories/2024/08/explainer-carry-trades-and-how-they-impact-global-markets/\n",
+ "metadata": "https://www.weforum.org/stories/2024/08/explainer-carry-trades-and-how-they-impact-global-markets/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.568225",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investopedia.com/terms/c/currencycarrytrade.asp\n",
+ "metadata": "https://www.investopedia.com/terms/c/currencycarrytrade.asp"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.588060",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fool.com/investing/2024/08/05/what-is-carry-trade-japan-rate-hike-global-selloff/\n",
+ "metadata": "https://www.fool.com/investing/2024/08/05/what-is-carry-trade-japan-rate-hike-global-selloff/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.607422",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\n",
+ "metadata": "https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.629299",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:46.649485",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.271250",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.289763",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.310316",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.328963",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: impact of global carry trades on USD exchange rates...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.882871",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://thehill.com/opinion/5092814-dollar-weakens-2024/\n",
+ "metadata": "https://thehill.com/opinion/5092814-dollar-weakens-2024/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.901802",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forexgdp.com/learn/dollar-index-2025-insights/\n",
+ "metadata": "https://www.forexgdp.com/learn/dollar-index-2025-insights/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.922946",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/\n",
+ "metadata": "https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.941770",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.realfacts.com/post/the-u-s-dollar-and-global-markets-trends-and-expectations-for-2025\n",
+ "metadata": "https://www.realfacts.com/post/the-u-s-dollar-and-global-markets-trends-and-expectations-for-2025"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.962085",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/\n",
+ "metadata": "https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:49.986916",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:50.008796",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:52.937772",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:52.961336",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 7 total images",
+ "metadata": [
+ "https://www.forexgdp.com/wp-content/uploads/2024/07/The-outlook-for-Federal-Reserve-policy-is-a-major-catalyst.jpg",
+ "https://www.forexgdp.com/wp-content/uploads/2024/08/Common-Mistakes-in-Counter-Trend.jpg",
+ "https://realeconomy.wpenginepowered.com/wp-content/uploads/2024/12/12_18_2024_global_outlook_2.jpg",
+ "https://www.forexgdp.com/wp-content/uploads/2024/05/USD-INDEX-1.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.025357",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.043884",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: rising dollar value 2025 global economic factors...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.383386",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.oxfordeconomics.com/resource/japan-key-themes-2025-rising-threats-from-external-uncertainty/\n",
+ "metadata": "https://www.oxfordeconomics.com/resource/japan-key-themes-2025-rising-threats-from-external-uncertainty/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.403470",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\n",
+ "metadata": "https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.427558",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forbes.com/councils/forbesbusinesscouncil/2024/09/18/reversing-the-japan-carry-trade-why-business-leaders-should-pay-attention-to-international-monetary-policy/\n",
+ "metadata": "https://www.forbes.com/councils/forbesbusinesscouncil/2024/09/18/reversing-the-japan-carry-trade-why-business-leaders-should-pay-attention-to-international-monetary-policy/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.453519",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.abfjournal.com/a-primer-on-the-japan-reverse-carry-trade-and-its-global-implications/\n",
+ "metadata": "https://www.abfjournal.com/a-primer-on-the-japan-reverse-carry-trade-and-its-global-implications/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.473681",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investing.com/analysis/usdjpy-scenarios-to-watch-for-carry-trade-risks-and-potential-reversals-200655610\n",
+ "metadata": "https://www.investing.com/analysis/usdjpy-scenarios-to-watch-for-carry-trade-risks-and-potential-reversals-200655610"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.496275",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:53.518836",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.222379",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.244524",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 3 new images from 3 total images",
+ "metadata": [
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/06/27111121/Email-footer-EN-1.png",
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/11/08093103/Blog-ad_160-x-600-EN-.png",
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/02/17185711/Join-Live-Webinars-min.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.265039",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.285549",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Japan reverse carry trade impact on dollar 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.451855",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade\n",
+ "metadata": "https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.471168",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\n",
+ "metadata": "https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.491998",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://synapsetrading.com/yen-carry-trade-unwinding/\n",
+ "metadata": "https://synapsetrading.com/yen-carry-trade-unwinding/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.511100",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate\n",
+ "metadata": "https://www.ndtvprofit.com/global-economics/yen-appreciation-impact-on-carry-trade-japanese-yen-us-dollar-exchange-rate"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.530641",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/\n",
+ "metadata": "https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/"
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.558647",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:55.585489",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:59.952786",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:59.975490",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:01:59.994774",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:00.020659",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: yen carry trade unwinding effect on dollar appreciation...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:09.072920",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: Antecedents for the Trend\nThe rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide.\nLooking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap.\nThe Set Up for 2025\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has been getting inflation under control, and central banks moving towards easing. Just like not all central banks were even in hiking,\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: HomeFundamental AnalysisBig Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade\nBig Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade\nBig Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought back the carry trade, a significant feature of the currency markets. That is where traders take advantage of large differences in the interest rates of two economies to borrow in one at a low rate and lend in another at a higher rate. Japan kept its ultra-low rates in place while other countries, most notably the US, raised rates enough to ignite the carry trade. As a result, the value of the yen plummeted to the point that the Japanese government had to step in on several occasions to prevent the slide. Looking at the yen, we can see how much of an impact carry trade can have on the relative price of a currency. Minor fluctuations in interest rates generate minor moves in the forex pair. But, if the gap widens enough, then it can precipitate a strong move as investors pile into the currency to take advantage of the momentum and interest rate gap. The Set Up for 2025 The theme of 2024 has\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: HomeFundamental AnalysisBig Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade Big Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade Fundamental Analysis By Daniel John Grady On Dec 31, 2024 0 45 Share There is an unusual situation brewing in the currency markets for 2025, with the Euro potentially being one of the protagonists. Depending on how the economic data in Britain evolves, the pound could also be involved as well. Other notable currencies include the Canadian and New Zealand dollars. The key issue is how these currencies will relate to the US dollar as each central bank plots its own monetary policy course. While there are individual situations with each of the mentioned currencies, the Euro is the most notable being the largest economy and the most actively traded. So, we\u2019ll focus on that pair, as the situation for the other currencies is similar, since the main component is the Fed\u2019s interest rate policy compared to other countries. Antecedents for\n\nSource: https://www.orbex.com/blog/en/2024/12/big-moves-in-the-currency-markets-for-2025-reverse-carry-trade\nTitle: Big Moves in the Currency Markets for 2025\nContent: Big Moves in the Currency Markets for 2025: \u201cReverse\u201d Carry Trade Fundamental Analysis By Daniel John Grady On Dec 31, 2024 0 45 Share There is an unusual situation brewing in the currency markets for 2025, with the Euro potentially being one of the protagonists. Depending on how the economic data in Britain evolves, the pound could also be involved as well. Other notable currencies include the Canadian and New Zealand dollars. The key issue is how these currencies will relate to the US dollar as each central bank plots its own monetary policy course. While there are individual situations with each of the mentioned currencies, the Euro is the most notable being the largest economy and the most actively traded. So, we\u2019ll focus on that pair, as the situation for the other currencies is similar, since the main component is the Fed\u2019s interest rate policy compared to other countries. Antecedents for the Trend The rise of interest rates in the post-pandemic period to fight inflation brought\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:10.664764",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://fsgjournal.nl/article/2024-09-25-the-collapse-of-the-yen-carry-trade\nTitle: The Collapse of the Yen Carry Trade - Financial Study Association Groningen\nContent: Figure 8: JPY/USD Exchange Rate Over the Last 3 Months (as of September 20, 2024) Source: Yahoo Finance\nIt's important to note that many carry traders use financial leverage, including FX forwards and futures, to amplify returns. When the market turns against them, margin calls force them to close positions at a loss, accelerating the unwinding process. As more short-yen investors are forced to liquidate their positions, the yen continues to appreciate, creating a vicious cycle of forced selling and further currency appreciation.\n\nSource: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\nTitle: Explained - How unwinding of the popular 'carry trade' has spooked global markets - CNBC TV18\nContent: HomeMarket NewsExplained - How unwinding of the popular 'carry trade' has spooked global marketsExplained - How unwinding of the popular 'carry trade' has spooked global marketsUS markets have seen a sharp sell-off on Friday and the futures are also pointing to a sell-off not ending anytime soon. The Dow futures are down over 300 points, while the Nasdaq futures are also down over 2.5%.By Hormaz Fatakia August 5, 2024, 2:49:47 PM IST (Updated)3 Min ReadThe Japanese Yen has seen a significant appreciation of nearly 10% against the US Dollar over the last three weeks after multiple triggers from the Bank of Japan and the local government.Share MarketView All Nifty GainersView All CompanyValueChange%Change Last week, the Japanese government confirmed a $36.8 billion Yen intervention after the currency had declined to a 38-year low against the US Dollar. This was the second intervention since late-May, which was the first intervention since October 2022. Amidst this, the Bank of Japan\n\nSource: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\nTitle: Explained - How unwinding of the popular 'carry trade' has spooked global markets - CNBC TV18\nContent: HomeMarket NewsExplained - How unwinding of the popular 'carry trade' has spooked global marketsExplained - How unwinding of the popular 'carry trade' has spooked global marketsUS markets have seen a sharp sell-off on Friday and the futures are also pointing to a sell-off not ending anytime soon. The Dow futures are down over 300 points, while the Nasdaq futures are also down over 2.5%.By Hormaz Fatakia August 5, 2024, 2:49:47 PM IST (Updated)3 Min ReadThe Japanese Yen has seen a significant appreciation of nearly 10% against the US Dollar over the last three weeks after multiple triggers from the Bank of Japan and the local government.Share MarketView All Nifty GainersView All CompanyValueChange%Change Last week, the Japanese government confirmed a $36.8 billion Yen intervention after the currency had declined to a 38-year low against the US Dollar. This was the second intervention since late-May, which was the first intervention since October 2022. Amidst this, the Bank of Japan\n\nSource: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\nTitle: Explained - How unwinding of the popular 'carry trade' has spooked global markets - CNBC TV18\nContent: HomeMarket NewsExplained - How unwinding of the popular 'carry trade' has spooked global marketsExplained - How unwinding of the popular 'carry trade' has spooked global marketsUS markets have seen a sharp sell-off on Friday and the futures are also pointing to a sell-off not ending anytime soon. The Dow futures are down over 300 points, while the Nasdaq futures are also down over 2.5%.By Hormaz Fatakia August 5, 2024, 2:49:47 PM IST (Updated)3 Min ReadThe Japanese Yen has seen a significant appreciation of nearly 10% against the US Dollar over the last three weeks after multiple triggers from the Bank of Japan and the local government.Share MarketView All Nifty GainersView All CompanyValueChange%Change Last week, the Japanese government confirmed a $36.8 billion Yen intervention after the currency had declined to a 38-year low against the US Dollar. This was the second intervention since late-May, which was the first intervention since October 2022. Amidst this, the Bank of Japan\n\nSource: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\nTitle: Explained - How unwinding of the popular 'carry trade' has spooked global markets - CNBC TV18\nContent: HomeLatest NewsFeaturedLive TVCNBC-TV18CNBC AwaazCNBC BajarMarket LiveMinisPodcastsCNBC-TV18 SpecialsGlobal Leadership SummitYoung TurksFuture Female Forward11:11 Newsletter NiveshKaSahiKadamPhotosSectionsMarketGlobal MarketsStocksMoneyCompaniesEconomyTechnologyCryptocurrencyTerms and ConditionsDisclaimerTerms of UsePrivacy PolicyHomeMarket NewsExplained - How unwinding of the popular 'carry trade' has spooked global marketsExplained - How unwinding of the popular 'carry trade' has spooked global marketsUS markets have seen a sharp sell-off on Friday and the futures are also pointing to a sell-off not ending anytime soon. The Dow futures are down over 300 points, while the Nasdaq futures are also down over 2.5%.By Hormaz Fatakia August 5, 2024, 2:49:47 PM IST (Updated)3 Min ReadThe Japanese Yen has seen a significant appreciation of nearly 10% against the US Dollar over the last three weeks after multiple triggers from the Bank of Japan and the local government.Share MarketView All\n\nSource: https://www.cnbctv18.com/market/carry-trade-explained-yen-us-dollar-bank-of-japan-intervention-interest-rates-global-market-selloff-dow-nasdaq-19454439.htm\nTitle: Explained - How unwinding of the popular 'carry trade' has spooked global markets - CNBC TV18\nContent: LiveLatest NewsNifty SensexMCXHomeLatest NewsFeaturedLive TVCNBC-TV18CNBC AwaazCNBC BajarMarket LiveMinisPodcastsCNBC-TV18 SpecialsGlobal Leadership SummitYoung TurksFuture Female Forward11:11 Newsletter NiveshKaSahiKadamPhotosSectionsMarketGlobal MarketsStocksMoneyCompaniesEconomyTechnologyCryptocurrencyTerms and ConditionsDisclaimerTerms of UsePrivacy PolicyHomeMarket NewsExplained - How unwinding of the popular 'carry trade' has spooked global marketsExplained - How unwinding of the popular 'carry trade' has spooked global marketsUS markets have seen a sharp sell-off on Friday and the futures are also pointing to a sell-off not ending anytime soon. The Dow futures are down over 300 points, while the Nasdaq futures are also down over 2.5%.By Hormaz Fatakia August 5, 2024, 2:49:47 PM IST (Updated)3 Min ReadThe Japanese Yen has seen a significant appreciation of nearly 10% against the US Dollar over the last three weeks after multiple triggers from the Bank of Japan and the local\n\nSource: https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/\nTitle: The Unwinding of the Yen Carry Trade | LAT Blog\nContent: a carry trade.However, it is important to understand that the carry trade is a relatively high-risk strategy, and for it to continue to work, the interest rate differential needs to stay high. Unwinding the Carry Trade The unwinding of the Yen carry trade refers to the process where investors reverse their initial carry trade strategy. The significant issue here, is that, while the creation of the carry trade is generally a calm and gradual process, the unwinding of the carry trade is often more violent and volatile as investors rush to the exits.Recently, we\u2019ve seen a perfect storm of triggers to reverse the current Yen carry trade: Interest rates in Japan have increased and US bond yields (reflecting potential US interest rates) have collapsed. Since the cost of borrowing Yen is getting more expensive and returns from the dollar are reduced, the interest rate differential has narrowed. This reduces the carry trade profitability and forces investors to exit their carry trades by\n\nSource: https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/\nTitle: The Unwinding of the Yen Carry Trade | LAT Blog\nContent: markets across the world.The unwinding of the carry trade has led to some sudden and significant volatility in currency values and stock markets, which in turn may severely affect economic stability. It will be interesting to see how the Fed reacts to the recent volatility, with many commentators expecting an emergency interest rate cut in the US in the coming days.In summary, the Yen carry trade has been a significant driver of financial markets in recent months and years, and the unwinding of this trade has caused some significant market disruption across stocks and currencies. It is also a good illustration of the interconnectivity between global financial assets, and an understanding of the dynamics of the carry trade is important in helping to appreciate the broader global financial market movements.\n\nSource: https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/\nTitle: The Unwinding of the Yen Carry Trade | LAT Blog\nContent: markets across the world.The unwinding of the carry trade has led to some sudden and significant volatility in currency values and stock markets, which in turn may severely affect economic stability. It will be interesting to see how the Fed reacts to the recent volatility, with many commentators expecting an emergency interest rate cut in the US in the coming days.In summary, the Yen carry trade has been a significant driver of financial markets in recent months and years, and the unwinding of this trade has caused some significant market disruption across stocks and currencies. It is also a good illustration of the interconnectivity between global financial assets, and an understanding of the dynamics of the carry trade is important in helping to appreciate the broader global financial market movements.\n\nSource: https://www.lat.london/news-resources/news-blog/the-unwinding-of-the-yen-carry-trade/\nTitle: The Unwinding of the Yen Carry Trade | LAT Blog\nContent: markets across the world.The unwinding of the carry trade has led to some sudden and significant volatility in currency values and stock markets, which in turn may severely affect economic stability. It will be interesting to see how the Fed reacts to the recent volatility, with many commentators expecting an emergency interest rate cut in the US in the coming days.In summary, the Yen carry trade has been a significant driver of financial markets in recent months and years, and the unwinding of this trade has caused some significant market disruption across stocks and currencies. It is also a good illustration of the interconnectivity between global financial assets, and an understanding of the dynamics of the carry trade is important in helping to appreciate the broader global financial market movements.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:23.040097",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: Factors Driving the Dollar Index in 2025\nInterest Rates and the Federal Reserve\nInterest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth?\nGlobal Economic Trends\nThe global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical tensions and post-pandemic recovery trends are key factors influencing the DXY.\nThe Dollar Index and Global Trade\nImpact on Imports and Exports\n\nSource: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: Interest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth? Global Economic Trends The global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical tensions and post-pandemic recovery trends are key factors influencing the DXY. The Dollar Index and Global Trade Impact on Imports and Exports A strong dollar sounds great, right? Not always. When the dollar is strong, U.S. goods become more expensive abroad, hurting exports. On the flip side, imports get cheaper, which might be good for consumers but bad for domestic manufacturers. Emerging Markets\u2019 Struggles Emerging markets often borrow in dollars. When\n\nSource: https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/\nTitle: Global economic outlook for 2025: Modest growth amid trade tensions\nContent: a quarter-point reduction in each quarter. Should the Reserve Bank of Australia delay the start of its easing cycle, we expect a half-point cut midyear followed by quarter-point cuts in the third and fourth quarters of 2025. Stronger dollar The appreciation of the U.S. dollar figures to be one of the major economic narratives in 2025 as global investors seek to take advantage of the rising returns on investment inside the American economy. The real trade-weighted adjusted dollar index shows the dollar sitting at 117.2 at the end of November\u2014a multidecade high. This increase is already stoking protectionist measures inside the U.S. economy, which implies years of adjustment for major American trading partners and emerging markets that peg their currencies to the greenback or use carefully managed floats. This dynamic will no doubt result in calls for a managed devaluation of the greenback. But unlike the 1980s, when a devaluation was possible, it\u2019s a difficult proposition today. To\n\nSource: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: This uneven weighting reflects trade flows and economic significance. The Formula Without getting too technical, the DXY uses a geometric mean to compare the dollar\u2019s value against the weighted average of the basket currencies. The result is a single number that goes up when the dollar strengthens and down when it weakens. Simple, right? Factors Driving the Dollar Index in 2025 Interest Rates and the Federal Reserve Interest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth? Global Economic Trends The global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical tensions and post-pandemic recovery\n\nSource: https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/\nTitle: \n \n \n Where is the U.S. dollar headed in 2025?\n \n \n | J.P. Morgan Asset Management\n \nContent: that challenge the dollar's trade dominance or reserve currency status. Even with the factors supporting the dollar, its ascent is unlikely to continue indefinitely. Currently, the dollar is two standard deviations above its 50-year average, suggesting limited room for further appreciation. Historically, the dollar has alternated between periods of strength and weakness, making a downturn likely at some point, though the timing is uncertain. Additionally, the U.S.'s persistent trade balance deficit, at 4.2% of GDP as of September 2024, poses a long-term constraint, highlighting a structural challenge that could eventually pressure the currency. A strong dollar can hurt international company performance for U.S.-based investors. It can also negatively impact U.S. companies with significant international exposure and U.S. exports by making goods more expensive abroad. While a stronger dollar could bolster the 'U.S. exceptionalism' narrative in 2025, investors should carefully assess its\n\nSource: https://am.jpmorgan.com/us/en/asset-management/adv/insights/market-insights/market-updates/on-the-minds-of-investors/where-is-the-us-dollar-headed-in-2025/\nTitle: \n \n \n Where is the U.S. dollar headed in 2025?\n \n \n | J.P. Morgan Asset Management\n \nContent: Even with the factors supporting the dollar, its ascent is unlikely to continue indefinitely. Currently, the dollar is two standard deviations above its 50-year average, suggesting limited room for further appreciation. Historically, the dollar has alternated between periods of strength and weakness, making a downturn likely at some point, though the timing is uncertain. Additionally, the U.S.'s persistent trade balance deficit, at 4.2% of GDP as of September 2024, poses a long-term constraint, highlighting a structural challenge that could eventually pressure the currency.\nA strong dollar can hurt international company performance for U.S.-based investors. It can also negatively impact U.S. companies with significant international exposure and U.S. exports by making goods more expensive abroad. While a stronger dollar could bolster the 'U.S. exceptionalism' narrative in 2025, investors should carefully assess its potential impact on their portfolios.\n\nSource: https://realeconomy.rsmus.com/global-economic-outlook-for-2025-modest-growth-amid-trade-tensions/\nTitle: Global economic outlook for 2025: Modest growth amid trade tensions\nContent: Amid these trends, the American dollar is likely to surge in value to levels not seen in a quarter century. As of the end of November, the dollar had increased by about 7.5% on a real trade-weighted basis over the past year.\nA stronger dollar amid rising tariffs will create the conditions for a resurgence in inflation across many emerging markets that import oil and settle their trades in more expensive dollars.\nInflation outlook: Slowing toward 4%\nGlobal inflation, which surged as economies emerged from the pandemic, continues to abate. Following the global peak in inflation of 9.4% in the third quarter of 2022, we expect inflation to ease toward 4% in 2025.\nThe synchronized tightening of monetary policy in recent years, which helped stabilize prices, is beginning to unwind.\n\nSource: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: the Swedish krona contributes only 4.2%. This uneven weighting reflects trade flows and economic significance. The Formula Without getting too technical, the DXY uses a geometric mean to compare the dollar\u2019s value against the weighted average of the basket currencies. The result is a single number that goes up when the dollar strengthens and down when it weakens. Simple, right? Factors Driving the Dollar Index in 2025 Interest Rates and the Federal Reserve Interest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth? Global Economic Trends The global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical\n\nSource: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: the Swedish krona contributes only 4.2%. This uneven weighting reflects trade flows and economic significance. The Formula Without getting too technical, the DXY uses a geometric mean to compare the dollar\u2019s value against the weighted average of the basket currencies. The result is a single number that goes up when the dollar strengthens and down when it weakens. Simple, right? Factors Driving the Dollar Index in 2025 Interest Rates and the Federal Reserve Interest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth? Global Economic Trends The global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical\n\nSource: https://www.forexgdp.com/learn/dollar-index-2025-insights/\nTitle: The Dollar Index in 2025: Why It Matters\nContent: the Swedish krona contributes only 4.2%. This uneven weighting reflects trade flows and economic significance. The Formula Without getting too technical, the DXY uses a geometric mean to compare the dollar\u2019s value against the weighted average of the basket currencies. The result is a single number that goes up when the dollar strengthens and down when it weakens. Simple, right? Factors Driving the Dollar Index in 2025 Interest Rates and the Federal Reserve Interest rates are like magnets for currency value. When the Federal Reserve raises rates, the dollar usually strengthens. Why? Because higher rates attract foreign investors looking for better returns. In 2025, all eyes are on the Fed\u2019s policies. Will they hike rates to combat inflation, or will they pivot to support growth? Global Economic Trends The global economy plays a massive role in the Dollar Index. If Europe or Asia faces economic troubles, the dollar often strengthens as investors seek a safe haven. In 2025, geopolitical\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:34.185290",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: Borrowing lower-yielding currencies and reinvesting the proceeds in higher-yielding currencies/assets elsewhere, with the aim of profiting from the interest rate differentials, is called a carry trade. Over the first half of this year, traders relied heavily on carry trades given wide interest rate differentials, and low volatility in the FX market. This led to a massive buildup in carry trade positions, one of the largest in decades. A significant portion of these positions was funded with JPY, the only major currency with interest rates near zero at that time\u2014estimates of JPY-funded carry trade positions ranged from USD 2 to 20 trillion at the peak. Some positions also used the Swiss Franc (CHF), offshore Chinese Yuan (CNH), and the Euro (EUR) as funders.\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies.\nAt this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work?\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: Therefore, currencies with stable exchange rates or even depreciation pressures usually work better as funders. Liquidity: Liquidity is also important as it ensures that large transactions can be executed without significantly affecting market prices, allowing investors to enter and exit positions easily. The cost of trading, i.e., the bid-ask spread, is also lower with better liquidity. Borrowing lower-yielding currencies and reinvesting the proceeds in higher-yielding currencies/assets elsewhere, with the aim of profiting from the interest rate differentials, is called a carry trade. Over the first half of this year, traders relied heavily on carry trades given wide interest rate differentials, and low volatility in the FX market. This led to a massive buildup in carry trade positions, one of the largest in decades. A significant portion of these positions was funded with JPY, the only major currency with interest rates near zero at that time\u2014estimates of JPY-funded carry trade\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: Therefore, currencies with stable exchange rates or even depreciation pressures usually work better as funders. Liquidity: Liquidity is also important as it ensures that large transactions can be executed without significantly affecting market prices, allowing investors to enter and exit positions easily. The cost of trading, i.e., the bid-ask spread, is also lower with better liquidity. Borrowing lower-yielding currencies and reinvesting the proceeds in higher-yielding currencies/assets elsewhere, with the aim of profiting from the interest rate differentials, is called a carry trade. Over the first half of this year, traders relied heavily on carry trades given wide interest rate differentials, and low volatility in the FX market. This led to a massive buildup in carry trade positions, one of the largest in decades. A significant portion of these positions was funded with JPY, the only major currency with interest rates near zero at that time\u2014estimates of JPY-funded carry trade\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: Therefore, currencies with stable exchange rates or even depreciation pressures usually work better as funders. Liquidity: Liquidity is also important as it ensures that large transactions can be executed without significantly affecting market prices, allowing investors to enter and exit positions easily. The cost of trading, i.e., the bid-ask spread, is also lower with better liquidity. Borrowing lower-yielding currencies and reinvesting the proceeds in higher-yielding currencies/assets elsewhere, with the aim of profiting from the interest rate differentials, is called a carry trade. Over the first half of this year, traders relied heavily on carry trades given wide interest rate differentials, and low volatility in the FX market. This led to a massive buildup in carry trade positions, one of the largest in decades. A significant portion of these positions was funded with JPY, the only major currency with interest rates near zero at that time\u2014estimates of JPY-funded carry trade\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: currency. Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies. At this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work? In this analysis, we explore the concept of funding currencies and carry trades, assess the outlook for popular funding currencies, and discuss the implications for investors.\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: currency. Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies. At this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work? In this analysis, we explore the concept of funding currencies and carry trades, assess the outlook for popular funding currencies, and discuss the implications for investors.\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: currency. Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies. At this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work? In this analysis, we explore the concept of funding currencies and carry trades, assess the outlook for popular funding currencies, and discuss the implications for investors.\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: currency. Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies. At this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work? In this analysis, we explore the concept of funding currencies and carry trades, assess the outlook for popular funding currencies, and discuss the implications for investors.\n\nSource: https://privatebank.jpmorgan.com/eur/en/insights/markets-and-investing/amid-rate-cuts-do-carry-trades-still-work\nTitle: \n Amid rate cuts, do carry trades still work | J.P. Morgan Private Bank EMEA\nContent: currency. Throughout 2024, the dynamics of funding currencies and carry trades have taken center stage in the foreign exchange market. With interest rates elevated across the U.S. dollar and many G10 currencies, investors have increasingly turned to specific lower-yielding currencies as funding sources. This shift has resulted in notable market volatility over the summer, raising critical questions about carry trade strategies and the selection of funding currencies. At this juncture, the global interest rate landscape is undergoing major shifts. The Federal Reserve delivered its first interest rate cut in four years at the September meeting, while several major European counterparts initiated similar actions earlier this year. What implications does this hold for investors? Do carry trades still work? In this analysis, we explore the concept of funding currencies and carry trades, assess the outlook for popular funding currencies, and discuss the implications for investors.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:34.207289",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.01143036",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:34.227293",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'Impact of Global Economic Factors and Carry Trade'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:40.384040",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'Impact of Global Economic Factors and Carry Trade'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:40.410845",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: Impact of Global Economic Factors and Carry Trade...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:41.575003",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "relevant_contents_context",
+ "output": "\ud83d\udcc3 Title: Japan's Sluggish Economic Growth and Deflationary Pressures\nContent: This prolonged period of low interest rates has made the yen less attractive to investors compared to higher-yielding currencies like the US dollar, contributing to the yen's depreciation. A study examining the impact of yen appreciation on Japan's economic cycles found that an appreciation shock negatively impacts real GDP and consumption, further justifying the BOJ's cautious approach to raising interest rates. (https://jscholarship.library.jhu.edu/items/ceed1135-99f4-4051-860b-8bfed858ed87)\n\nTitle: The Yen's Role as a Funding Currency and Carry Trade Dynamics\nContent: Understanding the role of the yen as a funding currency is crucial for analyzing the dynamics of the USD/JPY exchange rate.\n\nTitle: The US Federal Reserve's Tightening Monetary Policy and its Effect on the Dollar\nContent: If the market expects the Fed to continue raising rates, the dollar tends to strengthen further. (https://medium.com/tokyo-fintech/nli-research-weekly-economist-letter-rapid-yen-appreciation-29f06b0310c4)\n\nTitle: Impact of Interest Rate Differentials on USD/JPY Exchange Rate Dynamics\nContent: The widening gap between US and Japanese interest rates has been a primary driver of the USD/JPY exchange rate's upward trajectory. This differential creates a carry trade opportunity, where investors borrow in a low-interest-rate currency (JPY) and invest in a higher-interest-rate currency (USD), profiting from the difference in yields. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY) This dynamic fuels demand for the dollar and puts downward pressure on the yen. The larger the interest rate differential, the greater the incentive for carry trades and the stronger the upward pressure on USD/JPY. Market expectations regarding future interest rate movements also influence the exchange rate. If the market anticipates a further widening of the interest rate differential, the USD/JPY is likely to continue appreciating. Conversely, if the market expects the differential to narrow, the USD/JPY may depreciate.\n\nTitle: Market Sentiment and Risk Aversion: Influence on Yen's Safe-Haven Status\nContent: The Japanese yen has traditionally been considered a safe-haven currency, meaning that investors tend to flock to it during times of economic uncertainty or geopolitical turmoil. However, the BOJ's ultra-loose monetary policy has somewhat diminished the yen's safe-haven appeal in recent years. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY) While geopolitical tensions can still trigger a temporary flight to safety and strengthen the yen, the underlying interest rate differential with the US often limits the extent of such movements. Market sentiment and risk appetite also play a significant role. During periods of heightened risk aversion, the yen may appreciate against the dollar, even in the face of a large interest rate differential. Conversely, when market sentiment is positive and risk appetite is high, the yen may weaken further as investors chase higher returns in other currencies.\n\nTitle: The Yen's Role as a Funding Currency and Carry Trade Dynamics\nContent: The yen's historically low interest rates have made it a popular funding currency for carry trades. This aspect, while touched upon in previous reports regarding interest rate differentials, warrants a dedicated section due to its significant influence on the yen's value. Investors borrow yen at low interest rates and invest in higher-yielding assets denominated in other currencies, such as US dollars. This creates downward pressure on the yen as investors sell yen to buy the target currency. The carry trade dynamic is highly sensitive to changes in interest rate differentials and risk sentiment. When interest rate differentials widen, carry trades become more profitable, leading to increased selling pressure on the yen. Conversely, when risk aversion rises, investors may unwind their carry trades, leading to a temporary strengthening of the yen. (https://www.investopedia.com/terms/c/carrytrade.asp) Understanding the role of the yen as a funding currency is crucial for analyzing\n\nTitle: Government Intervention and its Effectiveness in Influencing the Yen\nContent: that address the underlying causes of the yen's weakness. The frequency and scale of government intervention, as well as market expectations regarding future interventions, can significantly impact the USD/JPY exchange rate. Analyzing historical intervention episodes and their impact can provide valuable insights into the potential effectiveness of future interventions. Furthermore, understanding the political and economic context surrounding intervention decisions is crucial for assessing their potential impact. Japan's approach to managing its currency, including the role of government intervention, is a complex issue with significant implications for the global economy.\n\nTitle: Impact of Japan's Trade Balance on the Yen\nContent: Japan's trade balance, while historically a source of strength for the yen, has become less of a supporting factor in recent years. While previous reports mentioned Japan's trade surplus, this section analyzes the evolving dynamics of its trade balance and its impact on the yen. Although Japan generally maintains a trade surplus, the size of this surplus has been declining due to factors such as rising import costs, particularly for energy. (https://www.cmegroup.com/insights/economic-research/2023/four-factors-that-impact-yen-dollar-exchange-rate.html) A weaker yen can theoretically boost exports by making Japanese goods more competitive in international markets. However, the responsiveness of Japanese exports to exchange rate movements has been limited in recent times, potentially due to factors such as global supply chain disruptions and shifting demand patterns. The declining trade surplus reduces the demand for yen, contributing to its weakness against the dollar. The interplay\n\nTitle: Impact of Japan's Trade Balance on the Yen\nContent: surplus reduces the demand for yen, contributing to its weakness against the dollar. The interplay between Japan's trade balance, the yen's value, and the global economic environment is a complex dynamic that requires careful analysis.\n\nTitle: The Bank of Japan's Low-Interest Rate Policy and its Impact on the Yen\nContent: This divergence in monetary policy has been a key driver of the yen's decline throughout 2023 and prior years. The yen plummeted to a 24-year low against the dollar in mid-2022, highlighting the impact of this policy difference. (https://japanchangemoney.com/Learning-Resources/Monetary-Policies-Affect-USD-JPY)\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:02:41.621010",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'Impact of Global Economic Factors and Carry Trade'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:03:09.783453",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'Impact of Global Economic Factors and Carry Trade'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:03:09.819007",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_conclusion",
+ "output": "\u270d\ufe0f Writing conclusion for 'What is the reason for rising dollar value and what role did Japan played into this?'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-20T02:03:27.306396",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "conclusion_written",
+ "output": "\ud83d\udcdd Conclusion written for 'What is the reason for rising dollar value and what role did Japan played into this?'",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "output": " strengthen the yen. However, the timing and magnitude of such changes remain uncertain. Further research should focus on analyzing potential scenarios for BOJ policy changes and their likely impact on the USD/JPY, considering factors like market sentiment, global economic conditions, and the potential for carry trade unwinding. Monitoring Japan's economic indicators, particularly inflation and export data, will be crucial for assessing the likelihood of policy adjustments. Additionally, analyzing the interplay between commodity prices and the dollar, as well as the impact of global economic imbalances, will provide a more comprehensive understanding of the USD/JPY exchange rate dynamics.\n",
+ "content": "selected_images",
+ "metadata": [
+ "https://www.forexgdp.com/wp-content/uploads/2024/07/The-outlook-for-Federal-Reserve-policy-is-a-major-catalyst.jpg",
+ "https://www.forexgdp.com/wp-content/uploads/2024/08/Common-Mistakes-in-Counter-Trend.jpg",
+ "https://realeconomy.wpenginepowered.com/wp-content/uploads/2024/12/12_18_2024_global_outlook_2.jpg",
+ "https://www.forexgdp.com/wp-content/uploads/2024/05/USD-INDEX-1.jpg",
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/06/27111121/Email-footer-EN-1.png",
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/11/08093103/Blog-ad_160-x-600-EN-.png",
+ "https://assets.iorbex.com/blog/wp-content/uploads/2023/02/17185711/Join-Live-Webinars-min.webp"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1737989590_What is USA.json b/outputs/task_1737989590_What is USA.json
new file mode 100644
index 0000000000000000000000000000000000000000..230c48fd9cbc4f6afb9eb13958aa3f474ac42a08
--- /dev/null
+++ b/outputs/task_1737989590_What is USA.json
@@ -0,0 +1,11 @@
+{
+ "timestamp": "2025-01-27T20:23:10.040620",
+ "events": [],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738061631_What is DeepSeek R1.docx b/outputs/task_1738061631_What is DeepSeek R1.docx
new file mode 100644
index 0000000000000000000000000000000000000000..31101ab42a42611671a8627a800d466fcca7b6b4
Binary files /dev/null and b/outputs/task_1738061631_What is DeepSeek R1.docx differ
diff --git a/outputs/task_1738061631_What is DeepSeek R1.json b/outputs/task_1738061631_What is DeepSeek R1.json
new file mode 100644
index 0000000000000000000000000000000000000000..28d531df4e49c656d85923d83ac1d6689b632f15
--- /dev/null
+++ b/outputs/task_1738061631_What is DeepSeek R1.json
@@ -0,0 +1,773 @@
+{
+ "timestamp": "2025-01-28T16:23:51.699727",
+ "events": [
+ {
+ "timestamp": "2025-01-28T16:24:02.605542",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'What is DeepSeek R1'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:02.615122",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 Tech Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:02.633209",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: What is DeepSeek R1...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:06.780296",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.885841",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['DeepSeek R1 capabilities comparison OpenAI o1', 'DeepSeek R1 reinforcement learning training process XAI', 'DeepSeek R1 open-source license cost advantages', 'DeepSeek R1 performance benchmarks reasoning tasks datasets', 'What is DeepSeek R1']...",
+ "metadata": [
+ "DeepSeek R1 capabilities comparison OpenAI o1",
+ "DeepSeek R1 reinforcement learning training process XAI",
+ "DeepSeek R1 open-source license cost advantages",
+ "DeepSeek R1 performance benchmarks reasoning tasks datasets",
+ "What is DeepSeek R1"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.904677",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'DeepSeek R1 capabilities comparison OpenAI o1'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.916304",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'DeepSeek R1 reinforcement learning training process XAI'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.922684",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'DeepSeek R1 open-source license cost advantages'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.934404",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'DeepSeek R1 performance benchmarks reasoning tasks datasets'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:09.949469",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'What is DeepSeek R1'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.553747",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://fireworks.ai/blog/deepseek-r1-deepdive\n",
+ "metadata": "https://fireworks.ai/blog/deepseek-r1-deepdive"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.573197",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.tomsguide.com/ai/deepseek-r1-is-the-chinese-ai-model-disrupting-openai-and-anthropic-what-you-need-to-know\n",
+ "metadata": "https://www.tomsguide.com/ai/deepseek-r1-is-the-chinese-ai-model-disrupting-openai-and-anthropic-what-you-need-to-know"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.583207",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\n",
+ "metadata": "https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.596040",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.business-standard.com/world-news/deepseek-r1-chinese-ai-research-breakthrough-challenging-openai-explained-125012700327_1.html\n",
+ "metadata": "https://www.business-standard.com/world-news/deepseek-r1-chinese-ai-research-breakthrough-challenging-openai-explained-125012700327_1.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.602667",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnn.com/2025/01/27/tech/deepseek-ai-explainer/index.html\n",
+ "metadata": "https://www.cnn.com/2025/01/27/tech/deepseek-ai-explainer/index.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.616303",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:12.623705",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:13.832133",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:13.843224",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 7 total images",
+ "metadata": [
+ "https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-2196223475.jpg?c=16x9&q=w_1280,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/jon-stewart-01-27.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/videothumbnails/32276036-68347919-generated-thumbnail.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/still-21319316-26194-119-still.jpg?c=16x9&q=h_144,w_256,c_fill"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:13.848362",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:13.866742",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: What is DeepSeek R1...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.619032",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bottr.me/blog/deepseek\n",
+ "metadata": "https://bottr.me/blog/deepseek"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.663501",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://blog.promptlayer.com/openai-vs-deepseek-an-analysis-of-r1-and-o1-models/\n",
+ "metadata": "https://blog.promptlayer.com/openai-vs-deepseek-an-analysis-of-r1-and-o1-models/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.674804",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/o1/deepseek-r1\n",
+ "metadata": "https://docsbot.ai/models/compare/o1/deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.681995",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\n",
+ "metadata": "https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.698932",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.geeky-gadgets.com/deepseek-r1-vs-openai-o1/\n",
+ "metadata": "https://www.geeky-gadgets.com/deepseek-r1-vs-openai-o1/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.702861",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:16.717686",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:17.945502",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:17.951621",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://www.geeky-gadgets.com/wp-content/uploads/2024/11/deepseek-r1-vs-openai-o1-comparison.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.024255",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.035654",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: DeepSeek R1 capabilities comparison OpenAI o1...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.211132",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://aipapersacademy.com/deepseek-r1/\n",
+ "metadata": "https://aipapersacademy.com/deepseek-r1/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.221778",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@namnguyenthe/deepseek-r1-architecture-and-training-explain-83319903a684\n",
+ "metadata": "https://medium.com/@namnguyenthe/deepseek-r1-architecture-and-training-explain-83319903a684"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.238625",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://unfoldai.com/deepseek-r1/\n",
+ "metadata": "https://unfoldai.com/deepseek-r1/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.252234",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it\n",
+ "metadata": "https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.264721",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/abs/2501.12948\n",
+ "metadata": "https://arxiv.org/abs/2501.12948"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.269085",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:18.285878",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.708141",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.721635",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 14 total images",
+ "metadata": [
+ "https://unfoldai.com/storage/2025/01/lm-studio-deepseek-r1.jpg",
+ "https://unfoldai.com/storage/2025/01/DeepSeek-R1-performance.jpg",
+ "https://unfoldai.com/storage/2025/01/distill-models-deepseek-r1-performance.jpg",
+ "https://aipapersacademy.com/wp-content/uploads/2025/01/image-6.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.732623",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.745468",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: DeepSeek R1 reinforcement learning training process XAI...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.874537",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\n",
+ "metadata": "https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.882371",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/html/2501.12948v1\n",
+ "metadata": "https://arxiv.org/html/2501.12948v1"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.898152",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://techcrunch.com/2025/01/27/deepseek-claims-its-reasoning-model-beats-openais-o1-on-certain-benchmarks/\n",
+ "metadata": "https://techcrunch.com/2025/01/27/deepseek-claims-its-reasoning-model-beats-openais-o1-on-certain-benchmarks/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.909597",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://pub.towardsai.net/inside-deepseek-r1-the-amazing-model-that-matches-gpt-o1-on-reasoning-at-a-fraction-of-the-cost-e314561ca12c\n",
+ "metadata": "https://pub.towardsai.net/inside-deepseek-r1-the-amazing-model-that-matches-gpt-o1-on-reasoning-at-a-fraction-of-the-cost-e314561ca12c"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.917756",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:19.931140",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:21.903361",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:21.914524",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 2 new images from 2 total images",
+ "metadata": [
+ "https://techcrunch.com/wp-content/uploads/2024/04/GettyImages-1652364481.jpg?w=1024",
+ "https://techcrunch.com/wp-content/uploads/2025/01/Screenshot-2025-01-20-at-11.31.39AM.png?w=680"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:21.920194",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:21.935227",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: DeepSeek R1 performance benchmarks reasoning tasks datasets...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.052042",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://dataguy.in/deepseek-r1-open-source-ai/\n",
+ "metadata": "https://dataguy.in/deepseek-r1-open-source-ai/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.065616",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\n",
+ "metadata": "https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.072972",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://c3.unu.edu/blog/deepseek-r1-pioneering-open-source-thinking-model-and-its-impact-on-the-llm-landscape\n",
+ "metadata": "https://c3.unu.edu/blog/deepseek-r1-pioneering-open-source-thinking-model-and-its-impact-on-the-llm-landscape"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.087330",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://apidog.com/blog/deepseek-r1-review-api/\n",
+ "metadata": "https://apidog.com/blog/deepseek-r1-review-api/"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.107351",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://decrypt.co/302161/chinese-open-source-ai-deepseek-r1-openai-o1\n",
+ "metadata": "https://decrypt.co/302161/chinese-open-source-ai-deepseek-r1-openai-o1"
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.124203",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:22.137356",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:23.483499",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:23.498196",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 5 total images",
+ "metadata": [
+ "https://assets.apidog.com/blog-next/2025/01/image-51.png",
+ "https://assets.apidog.com/blog-next/2025/01/image-53.png",
+ "https://assets.apidog.com/blog-next/2025/01/image-50.png",
+ "https://assets.apidog.com/blog-next/2025/01/image-52.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:23.511877",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:23.525810",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: DeepSeek R1 open-source license cost advantages...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:34.540057",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: lead, outperforming o1 in terms of processing speed and result quality. These benchmarks highlight DeepSeek\u2019s focus on real-world applications where hybrid data types dominate. OpenAI o1 Comparison: A Legacy of Excellence OpenAI o1 isn\u2019t without its merits. With years of refinement and optimization, o1 excels in tasks requiring extensive context processing, such as summarizing lengthy documents or handling intricate conversational flows. Moreover, OpenAI o1 is backed by a robust ecosystem, making it an attractive choice for developers. From APIs to documentation, OpenAI\u2019s infrastructure remains a key differentiator in the DeepSeek vs OpenAI debate. R1 vs o1 Showdown: Performance Metrics Language Processing In language processing benchmarks, R1 scored higher in nuanced tasks like sarcasm detection and idiomatic expressions, highlighting its improved contextual understanding. Meanwhile, o1 maintained its edge in handling large-scale datasets more efficiently, underscoring its\n\nSource: https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown\nTitle: DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison\nContent: Conclusion: R1 Dethrones o1?\nSo, can DeepSeek R1 dethrone OpenAI o1? The answer isn\u2019t straightforward. While R1 outperforms o1 in certain benchmarks, o1\u2019s robustness and ecosystem remain compelling. The R1 vs o1 showdown is far from over, but one thing is clear\u2014DeepSeek R1 has firmly established itself as a worthy competitor.\nWhat are the key differences between DeepSeek R1 and OpenAI o1?DeepSeek R1 focuses on efficiency and innovation, while OpenAI o1 offers scalability and a robust ecosystem.\nWhat are the key differences between DeepSeek R1 and OpenAI o1?DeepSeek R1 focuses on efficiency and innovation, while OpenAI o1 offers scalability and a robust ecosystem.\nHow do DeepSeek R1 benchmarks compare to OpenAI o1?R1 outperforms o1 in language comprehension and multi-modal tasks, while o1 excels in large-scale data processing.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:37.685996",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: The public reception of DeepSeek R1 has been overwhelmingly positive, fueled by its open-source nature and the stark cost advantages it presents. Users across the globe, particularly in smaller enterprises and academic circles, have praised its accessibility and the democratization of AI capabilities it represents. However, as with any emerging technology, concerns have surfaced regarding potential biases inherent in its training data and the security implications of an open-source model.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: The accessibility of DeepSeek R1 is further highlighted by its MIT license, allowing developers to freely use and modify the model without restrictions. This open-source approach not only democratizes access to advanced AI tools but also encourages innovation by enabling developers to build upon the existing framework. Such accessibility not only fosters a collaborative environment but also accelerates the adoption of AI technology across various sectors, from small businesses to academic institutions.\nMoreover, the development community is abuzz with enthusiasm, as the availability of R1 on platforms like Github and Hugging Face lowers the barrier for entry. This, coupled with its low-cost structure, makes sophisticated AI capabilities more attainable for developers worldwide, fostering an environment where experimentation and customization thrive.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: The model's substantial cost reduction has not only drawn commendation but also stimulated a broader discussion on the sustainability and accessibility of AI technologies. Stakeholders, ranging from developers to academic researchers, have expressed enthusiasm for the open-source nature and economic viability of R1, which could spur further innovations in AI applications and broaden its accessibility to a wider audience.Amidst these advantages, the implications for the AI industry are profound, signifying a shift toward more collaborative, open-source developmental methodologies. As open-source AI models like DeepSeek R1 continue to gain traction, they are likely to spur a wave of innovation, enhance collaborative research, and give rise to new market opportunities focused on AI optimization and customization services. These dynamics indicate a vibrant future where cost efficiency becomes central to AI advancement and adoption.Accessibility and Developer EngagementAs the digital\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: The model's substantial cost reduction has not only drawn commendation but also stimulated a broader discussion on the sustainability and accessibility of AI technologies. Stakeholders, ranging from developers to academic researchers, have expressed enthusiasm for the open-source nature and economic viability of R1, which could spur further innovations in AI applications and broaden its accessibility to a wider audience.Amidst these advantages, the implications for the AI industry are profound, signifying a shift toward more collaborative, open-source developmental methodologies. As open-source AI models like DeepSeek R1 continue to gain traction, they are likely to spur a wave of innovation, enhance collaborative research, and give rise to new market opportunities focused on AI optimization and customization services. These dynamics indicate a vibrant future where cost efficiency becomes central to AI advancement and adoption.Accessibility and Developer EngagementAs the digital\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: Learn to use AI like a Pro\nGet the latest AI workflows to boost your productivity and business performance, delivered weekly by expert consultants. Enjoy step-by-step guides, weekly Q&A sessions, and full access to our AI workflow archive.\nLearn More (And Unlock 50% off!)\nFurthermore, the public reaction to DeepSeek R1 has underscored its economic impact. The model's substantial cost reduction has not only drawn commendation but also stimulated a broader discussion on the sustainability and accessibility of AI technologies. Stakeholders, ranging from developers to academic researchers, have expressed enthusiasm for the open-source nature and economic viability of R1, which could spur further innovations in AI applications and broaden its accessibility to a wider audience.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: Furthermore, the public reaction to DeepSeek R1 has underscored its economic impact. The model's substantial cost reduction has not only drawn commendation but also stimulated a broader discussion on the sustainability and accessibility of AI technologies. Stakeholders, ranging from developers to academic researchers, have expressed enthusiasm for the open-source nature and economic viability of R1, which could spur further innovations in AI applications and broaden its accessibility to a wider audience.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: Furthermore, the public reaction to DeepSeek R1 has underscored its economic impact. The model's substantial cost reduction has not only drawn commendation but also stimulated a broader discussion on the sustainability and accessibility of AI technologies. Stakeholders, ranging from developers to academic researchers, have expressed enthusiasm for the open-source nature and economic viability of R1, which could spur further innovations in AI applications and broaden its accessibility to a wider audience.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: The public reception of DeepSeek R1 has been overwhelmingly positive, fueled by its open-source nature and the stark cost advantages it presents. Users across the globe, particularly in smaller enterprises and academic circles, have praised its accessibility and the democratization of AI capabilities it represents. However, as with any emerging technology, concerns have surfaced regarding potential biases inherent in its training data and the security implications of an open-source model.\nThe public reception of DeepSeek R1 has been overwhelmingly positive, fueled by its open-source nature and the stark cost advantages it presents. Users across the globe, particularly in smaller enterprises and academic circles, have praised its accessibility and the democratization of AI capabilities it represents. However, as with any emerging technology, concerns have surfaced regarding potential biases inherent in its training data and the security implications of an open-source model.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: Amidst these advantages, the implications for the AI industry are profound, signifying a shift toward more collaborative, open-source developmental methodologies. As open-source AI models like DeepSeek R1 continue to gain traction, they are likely to spur a wave of innovation, enhance collaborative research, and give rise to new market opportunities focused on AI optimization and customization services. These dynamics indicate a vibrant future where cost efficiency becomes central to AI advancement and adoption.\n\nSource: https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money\nTitle: DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money | AI News\nContent: Amidst these advantages, the implications for the AI industry are profound, signifying a shift toward more collaborative, open-source developmental methodologies. As open-source AI models like DeepSeek R1 continue to gain traction, they are likely to spur a wave of innovation, enhance collaborative research, and give rise to new market opportunities focused on AI optimization and customization services. These dynamics indicate a vibrant future where cost efficiency becomes central to AI advancement and adoption.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:37.947209",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://unfoldai.com/deepseek-r1/\nTitle: DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement Learning | UnfoldAI\nContent: handle sequences up to 128K tokens in length. The architecture\u2019s efficiency becomes apparent in the model\u2019s ability to generate thousands of reasoning tokens per response while maintaining coherence and accuracy throughout extended chains of thought. Implementation overview The core innovation in DeepSeek-R1 lies in its training approach. Instead of relying on supervised fine-tuning, the initial model (DeepSeek-R1-Zero) uses pure reinforcement learning to develop reasoning capabilities. This approach begins with the base model and employs Group Relative Policy Optimization (GRPO), eliminating the need for a separate critic model. The GRPO implementation uses a reward function that combines accuracy and format adherence: def compute_reward(response, ground_truth): accuracy_reward = evaluate_correctness(response, ground_truth) format_reward = check_formatting(response) return accuracy_reward + format_reward * format_weight Training pipeline The training process consists of four distinct\n\nSource: https://unfoldai.com/deepseek-r1/\nTitle: DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement Learning | UnfoldAI\nContent: handle sequences up to 128K tokens in length. The architecture\u2019s efficiency becomes apparent in the model\u2019s ability to generate thousands of reasoning tokens per response while maintaining coherence and accuracy throughout extended chains of thought. Implementation overview The core innovation in DeepSeek-R1 lies in its training approach. Instead of relying on supervised fine-tuning, the initial model (DeepSeek-R1-Zero) uses pure reinforcement learning to develop reasoning capabilities. This approach begins with the base model and employs Group Relative Policy Optimization (GRPO), eliminating the need for a separate critic model. The GRPO implementation uses a reward function that combines accuracy and format adherence: def compute_reward(response, ground_truth): accuracy_reward = evaluate_correctness(response, ground_truth) format_reward = check_formatting(response) return accuracy_reward + format_reward * format_weight Training pipeline The training process consists of four distinct\n\nSource: https://unfoldai.com/deepseek-r1/\nTitle: DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement Learning | UnfoldAI\nContent: handle sequences up to 128K tokens in length. The architecture\u2019s efficiency becomes apparent in the model\u2019s ability to generate thousands of reasoning tokens per response while maintaining coherence and accuracy throughout extended chains of thought. Implementation overview The core innovation in DeepSeek-R1 lies in its training approach. Instead of relying on supervised fine-tuning, the initial model (DeepSeek-R1-Zero) uses pure reinforcement learning to develop reasoning capabilities. This approach begins with the base model and employs Group Relative Policy Optimization (GRPO), eliminating the need for a separate critic model. The GRPO implementation uses a reward function that combines accuracy and format adherence: def compute_reward(response, ground_truth): accuracy_reward = evaluate_correctness(response, ground_truth) format_reward = check_formatting(response) return accuracy_reward + format_reward * format_weight Training pipeline The training process consists of four distinct\n\nSource: https://unfoldai.com/deepseek-r1/\nTitle: DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement Learning | UnfoldAI\nContent: of reasoning tokens per response while maintaining coherence and accuracy throughout extended chains of thought. Implementation overview The core innovation in DeepSeek-R1 lies in its training approach. Instead of relying on supervised fine-tuning, the initial model (DeepSeek-R1-Zero) uses pure reinforcement learning to develop reasoning capabilities. This approach begins with the base model and employs Group Relative Policy Optimization (GRPO), eliminating the need for a separate critic model. The GRPO implementation uses a reward function that combines accuracy and format adherence: def compute_reward(response, ground_truth): accuracy_reward = evaluate_correctness(response, ground_truth) format_reward = check_formatting(response) return accuracy_reward + format_reward * format_weight Training pipeline The training process consists of four distinct phases. The initial phase applies RL directly to the base model, generating DeepSeek-R1-Zero. This model achieves a 71.0% accuracy on\n\nSource: https://medium.com/@namnguyenthe/deepseek-r1-architecture-and-training-explain-83319903a684\nTitle: DeepSeek-R1: Architecture and training explain | by The Nam | Jan, 2025 | Medium\nContent: DeepSeek-R1 aims to improve from the Zero by incorporating a multi-stage post-training process.\nIn contrast to R1-Zero, R1 began with Supervised Fine-Tuning (SFT) to overcome the cold-start phase of RL. The labels were first generated by directly prompting R1-Zero for answers using a few-shot demonstration. These labels were then refined through post-processing by human annotators. Thousands of cold-start samples were collected for fine-tuning during this step.\nAfter fine-tuning DeepSeek-V3-Base on the cold-start data, the authors applied the same large-scale reinforcement learning training process used in R1-Zero. This phase focused on enhancing the model\u2019s reasoning capabilities. To address the language mixing issue, they introduced a language consistency reward during RL training, which is calculated as the proportion of target language words in the Chain-of-Thought (CoT).\n\nSource: https://aipapersacademy.com/deepseek-r1/\nTitle: DeepSeek-R1 Paper Explained - A New RL LLMs Era in AI?\nContent: To address these issues, DeepSeek-R1 is trained in a four phases pipeline:\nCold Start (Phase 1): Starting with the pre-trained model DeepSeek-V3-Base, the model undergoes supervised fine-tuning on a small dataset of results collected from DeepSeek-R1-Zero. These results were validated as high-quality and readable. This dataset contains thousands of samples, making it relatively small. Incorporating a supervised fine-tuning phase on this small, high-quality dataset helps DeepSeek-R1 mitigate the readability issues observed in the initial model.\nReasoning Reinforcement Learning (Phase 2): This phase applies the same large-scale reinforcement learning we\u2019ve reviewed for the previous model to enhance the model\u2019s reasoning capabilities. Specifically, in tasks such as coding, math, science and logic reasoning, where clear solutions can define rewarding rules for the reinforcement learning process.\n\nSource: https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it\nTitle: How DeepSeek-R1 Was Built; For dummies\nContent: these challenges. In the case of training the DeepSeek-R1 model, a lot of training methods were used:Here\u00e2\u0080\u0099s a quick explanation of each training stage and what it was done:Step 1: They fine-tuned a base model (DeepSeek-V3-Base) with thousands of cold-start data points to lay a solid foundation. FYI, thousands of cold-start data points is a tiny fraction compared to the millions or even billions of labeled data points typically required for supervised learning at scale.Step 2: Applied pure RL (similar to R1-Zero) to enhance reasoning skills.Step 3: Near RL convergence, they used rejection sampling where the model created it\u00e2\u0080\u0099s own labeled data (synthetic data) by selecting the best examples from the last successful RL run. Those rumors you've heard about OpenAI using smaller model to generate synthetic data for the O1 model? This is basically it.Step 4: The new synthetic data was merged with supervised data from DeepSeek-V3-Base in domains like writing, factual QA, and\n\nSource: https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it\nTitle: How DeepSeek-R1 Was Built; For dummies\nContent: these challenges. In the case of training the DeepSeek-R1 model, a lot of training methods were used:Here\u00e2\u0080\u0099s a quick explanation of each training stage and what it was done:Step 1: They fine-tuned a base model (DeepSeek-V3-Base) with thousands of cold-start data points to lay a solid foundation. FYI, thousands of cold-start data points is a tiny fraction compared to the millions or even billions of labeled data points typically required for supervised learning at scale.Step 2: Applied pure RL (similar to R1-Zero) to enhance reasoning skills.Step 3: Near RL convergence, they used rejection sampling where the model created it\u00e2\u0080\u0099s own labeled data (synthetic data) by selecting the best examples from the last successful RL run. Those rumors you've heard about OpenAI using smaller model to generate synthetic data for the O1 model? This is basically it.Step 4: The new synthetic data was merged with supervised data from DeepSeek-V3-Base in domains like writing, factual QA, and\n\nSource: https://aipapersacademy.com/deepseek-r1/\nTitle: DeepSeek-R1 Paper Explained - A New RL LLMs Era in AI?\nContent: presents a state-of-the-art, open-source reasoning model and a detailed recipe for training such models using large-scale reinforcement learning techniques. DeepSeek-R1 paper title (Source) Recap: LLMs Training Process LLMs Training Process Before we dive into the paper itself, let\u2019s briefly recap the training process for LLMs. Typically, LLMs undergo three main stages of training: Pre-training: In this stage, LLMs are pre-trained on vast amounts of text and code to learn general-purpose knowledge. This step helps the model become proficient at predicting the next token in a sequence. For example, given an input like \u201cwrite a bedtime _,\u201d the model can complete it with a reasonable word, such as \u201cstory.\u201d However, after pre-training, the model still struggles to follow human instructions. The next stage addresses this. Supervised Fine-tuning: In this stage, the model is fine-tuned on an instruction dataset. Each sample from the dataset consists of an instruction-response pair, where the\n\nSource: https://unfoldai.com/deepseek-r1/\nTitle: DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement Learning | UnfoldAI\nContent: The training pipeline combines pure RL (DeepSeek-R1-Zero) with cold-start data and iterative fine-tuning, enabling deployment on consumer hardware through distilled versions as small as 1.5B parameters. Important links: https://huggingface.co/deepseek-ai/DeepSeek-R1 (original model card) https://www.deepseek.com/ (Official website) https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf (technical paper) https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B (Distilled model, based on Qwen \u2013 1.5B) https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B (Distilled model, based on Qwen \u2013 7B) https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B (Distilled model, based on Qwen \u2013 32B) https://ollama.com/library/deepseek-r1 (Ollama DeepSeek R1) https://unsloth.ai/blog/deepseek-r1 (DeepSeek R1 in Unsloth) Model architecture DeepSeek-R1 builds upon the Mixture of Experts (MoE) architecture from its base model DeepSeek-V3, employing a sparse activation\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:39.492109",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: Performance Benchmarks DeepSeek\u00e2\u0080\u0099s R1 model performs on par with OpenAI\u00e2\u0080\u0099s A1 models across many reasoning benchmarks: Reasoning and Math Tasks: R1 rivals or outperforms A1 models in accuracy and depth of reasoning. Coding Tasks: A1 models generally perform better in LiveCode Bench and CodeForces tasks. Simple QA: R1 often outpaces A1 in structured QA tasks (e.g., 47% accuracy vs. 30%). One notable finding is that longer reasoning chains generally improve performance. This aligns with insights from Microsoft\u00e2\u0080\u0099s Med-Prompt framework and OpenAI\u00e2\u0080\u0099s observations on test-time compute and reasoning depth. Challenges and Observations Despite its strengths, R1 has some limitations: Mixing English and Chinese responses due to a lack of supervised fine-tuning. Less polished responses compared to chat models like OpenAI\u00e2\u0080\u0099s GPT. These issues were addressed during R1\u00e2\u0080\u0099s refinement process, including supervised fine-tuning and human feedback. Prompt Engineering Insights A fascinating takeaway\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: Performance Benchmarks DeepSeek\u00e2\u0080\u0099s R1 model performs on par with OpenAI\u00e2\u0080\u0099s A1 models across many reasoning benchmarks: Reasoning and Math Tasks: R1 rivals or outperforms A1 models in accuracy and depth of reasoning. Coding Tasks: A1 models generally perform better in LiveCode Bench and CodeForces tasks. Simple QA: R1 often outpaces A1 in structured QA tasks (e.g., 47% accuracy vs. 30%). One notable finding is that longer reasoning chains generally improve performance. This aligns with insights from Microsoft\u00e2\u0080\u0099s Med-Prompt framework and OpenAI\u00e2\u0080\u0099s observations on test-time compute and reasoning depth. Challenges and Observations Despite its strengths, R1 has some limitations: Mixing English and Chinese responses due to a lack of supervised fine-tuning. Less polished responses compared to chat models like OpenAI\u00e2\u0080\u0099s GPT. These issues were addressed during R1\u00e2\u0080\u0099s refinement process, including supervised fine-tuning and human feedback. Prompt Engineering Insights A fascinating takeaway\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: Benchmarks DeepSeek\u00e2\u0080\u0099s R1 model performs on par with OpenAI\u00e2\u0080\u0099s A1 models across many reasoning benchmarks: Reasoning and Math Tasks: R1 rivals or outperforms A1 models in accuracy and depth of reasoning. Coding Tasks: A1 models generally perform better in LiveCode Bench and CodeForces tasks. Simple QA: R1 often outpaces A1 in structured QA tasks (e.g., 47% accuracy vs. 30%). One notable finding is that longer reasoning chains generally improve performance. This aligns with insights from Microsoft\u00e2\u0080\u0099s Med-Prompt framework and OpenAI\u00e2\u0080\u0099s observations on test-time compute and reasoning depth. Challenges and Observations Despite its strengths, R1 has some limitations: Mixing English and Chinese responses due to a lack of supervised fine-tuning. Less polished responses compared to chat models like OpenAI\u00e2\u0080\u0099s GPT. These issues were addressed during R1\u00e2\u0080\u0099s refinement process, including supervised fine-tuning and human feedback. Prompt Engineering Insights A fascinating takeaway from\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: Distillation to Smaller Models:\nDeepSeek-R1\u00e2\u0080\u0099s reasoning capabilities were distilled into smaller, efficient models like Qwen and Llama-3.1-8B, and Llama-3.3-70B-Instruct\nDeepSeek R-1 performance\nThe researchers tested DeepSeek R-1 across a variety of benchmarks and against top models: o1, GPT-4o, and Claude 3.5 Sonnet, o1-mini.\nThe benchmarks were broken down into several categories, shown below in the table: English, Code, Math, and Chinese.\nThe following parameters were applied across all models:\nMaximum generation length: 32,768 tokens.\nSampling configuration:Temperature: 0.6.Top-p value: 0.95.\nTop-p value: 0.95.\nPass@1 estimation: Generated 64 responses per query.\nDeepSeek R1 outperformed o1, Claude 3.5 Sonnet and other models in the majority of reasoning benchmarks\no1 was the best-performing model in four out of the five coding-related benchmarks\nDeepSeek performed well on creative and long-context task task, like AlpacaEval 2.0 and ArenaHard, outperforming all other models\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: Distillation into smaller models (LLaMA 3.1 and 3.3 at various sizes).\nDeepSeek\u00e2\u0080\u0099s R1 model performs on par with OpenAI\u00e2\u0080\u0099s A1 models across many reasoning benchmarks:\nReasoning and Math Tasks: R1 rivals or outperforms A1 models in accuracy and depth of reasoning.\nCoding Tasks: A1 models generally perform better in LiveCode Bench and CodeForces tasks.\nSimple QA: R1 often outpaces A1 in structured QA tasks (e.g., 47% accuracy vs. 30%).\nOne notable finding is that longer reasoning chains generally improve performance. This aligns with insights from Microsoft\u00e2\u0080\u0099s Med-Prompt framework and OpenAI\u00e2\u0080\u0099s observations on test-time compute and reasoning depth.\nChallenges and Observations\nDespite its strengths, R1 has some limitations:\nMixing English and Chinese responses due to a lack of supervised fine-tuning.\nLess polished responses compared to chat models like OpenAI\u00e2\u0080\u0099s GPT.\nThese issues were addressed during R1\u00e2\u0080\u0099s refinement process, including supervised fine-tuning and human feedback.\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: on R10, R1 added several enhancements: Curated datasets with long Chain of Thought examples. Incorporation of R10-generated reasoning chains. Human preference alignment for polished responses. Distillation into smaller models (LLaMA 3.1 and 3.3 at various sizes). Performance Benchmarks DeepSeek\u00e2\u0080\u0099s R1 model performs on par with OpenAI\u00e2\u0080\u0099s A1 models across many reasoning benchmarks: Reasoning and Math Tasks: R1 rivals or outperforms A1 models in accuracy and depth of reasoning. Coding Tasks: A1 models generally perform better in LiveCode Bench and CodeForces tasks. Simple QA: R1 often outpaces A1 in structured QA tasks (e.g., 47% accuracy vs. 30%). One notable finding is that longer reasoning chains generally improve performance. This aligns with insights from Microsoft\u00e2\u0080\u0099s Med-Prompt framework and OpenAI\u00e2\u0080\u0099s observations on test-time compute and reasoning depth. Challenges and Observations Despite its strengths, R1 has some limitations: Mixing English and Chinese responses due to a\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: refine its reasoning capabilities furtherHuman Preference Alignment:A secondary RL stage improved the model\u00e2\u0080\u0099s helpfulness and harmlessness, ensuring better alignment with user needsDistillation to Smaller Models:DeepSeek-R1\u00e2\u0080\u0099s reasoning capabilities were distilled into smaller, efficient models like Qwen and Llama-3.1-8B, and Llama-3.3-70B-Instruct\u00e2\u0080\u008dDeepSeek R-1 performanceThe researchers tested DeepSeek R-1 across a variety of benchmarks and against top models: o1, GPT-4o, and Claude 3.5 Sonnet, o1-mini.The benchmarks were broken down into several categories, shown below in the table: English, Code, Math, and Chinese.SetupThe following parameters were applied across all models:Maximum generation length: 32,768 tokens.Sampling configuration:Temperature: 0.6.Top-p value: 0.95.Pass@1 estimation: Generated 64 responses per query.\u00e2\u0080\u008d\u00e2\u0080\u008dDeepSeek R1 outperformed o1, Claude 3.5 Sonnet and other models in the majority of reasoning benchmarkso1 was the best-performing model in four out of\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: refine its reasoning capabilities furtherHuman Preference Alignment:A secondary RL stage improved the model\u00e2\u0080\u0099s helpfulness and harmlessness, ensuring better alignment with user needsDistillation to Smaller Models:DeepSeek-R1\u00e2\u0080\u0099s reasoning capabilities were distilled into smaller, efficient models like Qwen and Llama-3.1-8B, and Llama-3.3-70B-Instruct\u00e2\u0080\u008dDeepSeek R-1 performanceThe researchers tested DeepSeek R-1 across a variety of benchmarks and against top models: o1, GPT-4o, and Claude 3.5 Sonnet, o1-mini.The benchmarks were broken down into several categories, shown below in the table: English, Code, Math, and Chinese.SetupThe following parameters were applied across all models:Maximum generation length: 32,768 tokens.Sampling configuration:Temperature: 0.6.Top-p value: 0.95.Pass@1 estimation: Generated 64 responses per query.\u00e2\u0080\u008d\u00e2\u0080\u008dDeepSeek R1 outperformed o1, Claude 3.5 Sonnet and other models in the majority of reasoning benchmarkso1 was the best-performing model in four out of\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: refine its reasoning capabilities furtherHuman Preference Alignment:A secondary RL stage improved the model\u00e2\u0080\u0099s helpfulness and harmlessness, ensuring better alignment with user needsDistillation to Smaller Models:DeepSeek-R1\u00e2\u0080\u0099s reasoning capabilities were distilled into smaller, efficient models like Qwen and Llama-3.1-8B, and Llama-3.3-70B-Instruct\u00e2\u0080\u008dDeepSeek R-1 performanceThe researchers tested DeepSeek R-1 across a variety of benchmarks and against top models: o1, GPT-4o, and Claude 3.5 Sonnet, o1-mini.The benchmarks were broken down into several categories, shown below in the table: English, Code, Math, and Chinese.SetupThe following parameters were applied across all models:Maximum generation length: 32,768 tokens.Sampling configuration:Temperature: 0.6.Top-p value: 0.95.Pass@1 estimation: Generated 64 responses per query.\u00e2\u0080\u008d\u00e2\u0080\u008dDeepSeek R1 outperformed o1, Claude 3.5 Sonnet and other models in the majority of reasoning benchmarkso1 was the best-performing model in four out of\n\nSource: https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1\nTitle: DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1\nContent: refine its reasoning capabilities furtherHuman Preference Alignment:A secondary RL stage improved the model\u00e2\u0080\u0099s helpfulness and harmlessness, ensuring better alignment with user needsDistillation to Smaller Models:DeepSeek-R1\u00e2\u0080\u0099s reasoning capabilities were distilled into smaller, efficient models like Qwen and Llama-3.1-8B, and Llama-3.3-70B-Instruct\u00e2\u0080\u008dDeepSeek R-1 performanceThe researchers tested DeepSeek R-1 across a variety of benchmarks and against top models: o1, GPT-4o, and Claude 3.5 Sonnet, o1-mini.The benchmarks were broken down into several categories, shown below in the table: English, Code, Math, and Chinese.SetupThe following parameters were applied across all models:Maximum generation length: 32,768 tokens.Sampling configuration:Temperature: 0.6.Top-p value: 0.95.Pass@1 estimation: Generated 64 responses per query.\u00e2\u0080\u008d\u00e2\u0080\u008dDeepSeek R1 outperformed o1, Claude 3.5 Sonnet and other models in the majority of reasoning benchmarkso1 was the best-performing model in four out of\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:42.898542",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: In this article we have gathered all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.\nIn this article we have gathered all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.\nWhat is DeepSeek R1?\nDeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.\nDeepSeek-R1 is a groundbreaking family of\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1 series includes three primary variants:DeepSeek-R1-Zero: The foundational model trained exclusively via RL (no human-annotated data), excelling in raw reasoning but limited by\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1 series includes three primary variants:DeepSeek-R1-Zero: The foundational model trained exclusively via RL (no human-annotated data), excelling in raw reasoning but limited by\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1 series includes three primary variants:DeepSeek-R1-Zero: The foundational model trained exclusively via RL (no human-annotated data), excelling in raw reasoning but limited by\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: DeepSeek-R1 is a groundbreaking family of\ndeveloped by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.\nThe model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.What is DeepSeek R1What is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1 series includes three primary\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.What is DeepSeek R1What is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1 series includes three primary\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: this article we have gathered all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.What is DeepSeek R1What is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often struggle. Its ability to learn and adapt in real-time makes it ideal for applications such as autonomous driving, personalized healthcare, and even strategic decision-making in business.Types of DeepSeek-R1 ModelsThe R1\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: the boundaries of artificial intelligence. Developerd as a solution for complex decision-making and optimization problems, DeepSeek-R1 is already earning attention for its advanced features and potential applications.In this article we have gathered all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.What is DeepSeek R1What is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often\n\nSource: https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/\nTitle: DeepSeek Unveils DeepSeek-R1 RL Models: What\u2019s New and How It is better than OpenAI and Google - GeeksforGeeks\nContent: the boundaries of artificial intelligence. Developerd as a solution for complex decision-making and optimization problems, DeepSeek-R1 is already earning attention for its advanced features and potential applications.In this article we have gathered all the insights like what\u2019s new in DeepSeek-R1, how to use it, and a comparison with its top competitors in the industry.What is DeepSeek R1What is DeepSeek R1?DeepSeek-R1 is a groundbreaking family of reinforcement learning (RL)-driven AI models developed by Chinese AI firm DeepSeek. Designed to rival industry leaders like OpenAI and Google, it combines advanced reasoning capabilities with open-source accessibility. Unlike traditional models that rely on supervised fine-tuning (SFT), DeepSeek-R1 leverages pure RL training and hybrid methodologies to achieve state-of-the-art performance in STEM tasks, coding, and complex problem-solving.The model is designed to excel in dynamic, complex environments where traditional AI systems often\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:42.916169",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.01807272",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:24:42.948978",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'What is DeepSeek R1'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-28T16:25:31.263950",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'What is DeepSeek R1'",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "content": "selected_images",
+ "output": " January 28, 2025, from https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it",
+ "metadata": [
+ "https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-2196223475.jpg?c=16x9&q=w_1280,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/jon-stewart-01-27.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/videothumbnails/32276036-68347919-generated-thumbnail.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/still-21319316-26194-119-still.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://www.geeky-gadgets.com/wp-content/uploads/2024/11/deepseek-r1-vs-openai-o1-comparison.webp",
+ "https://unfoldai.com/storage/2025/01/lm-studio-deepseek-r1.jpg",
+ "https://unfoldai.com/storage/2025/01/DeepSeek-R1-performance.jpg",
+ "https://unfoldai.com/storage/2025/01/distill-models-deepseek-r1-performance.jpg",
+ "https://aipapersacademy.com/wp-content/uploads/2025/01/image-6.png",
+ "https://techcrunch.com/wp-content/uploads/2024/04/GettyImages-1652364481.jpg?w=1024"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738061631_What is DeepSeek R1.md b/outputs/task_1738061631_What is DeepSeek R1.md
new file mode 100644
index 0000000000000000000000000000000000000000..f14ffcb803b2e2377c7a88d39a3c1eb17e86e47a
--- /dev/null
+++ b/outputs/task_1738061631_What is DeepSeek R1.md
@@ -0,0 +1,54 @@
+## DeepSeek R1: An In-Depth Report
+
+DeepSeek R1 represents a significant advancement in the field of reinforcement learning (RL) driven AI models. Developed by the Chinese AI firm DeepSeek, this family of models challenges established players like OpenAI and Google, offering a compelling combination of advanced reasoning capabilities and open-source accessibility ([GeeksforGeeks, 2025](https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/)). This report delves into the architecture, training process, performance benchmarks, and overall impact of DeepSeek R1, providing a comprehensive overview of its capabilities and limitations.
+
+### Architecture and Training
+
+DeepSeek R1 builds upon the Mixture of Experts (MoE) architecture of its base model, DeepSeek-V3, utilizing a sparse activation approach that allows it to handle sequences up to 128,000 tokens ([UnfoldAI, n.d.](https://unfoldai.com/deepseek-r1/)). This efficiency is evident in the model's ability to generate thousands of reasoning tokens per response while maintaining coherence and accuracy.
+
+The core innovation of DeepSeek R1 lies in its unique training approach. Unlike many large language models (LLMs) that rely heavily on supervised fine-tuning (SFT), the initial model, DeepSeek-R1-Zero, employs pure reinforcement learning to develop reasoning capabilities ([UnfoldAI, n.d.](https://unfoldai.com/deepseek-r1/)). This process begins with the base model and utilizes Group Relative Policy Optimization (GRPO), eliminating the need for a separate critic model. The GRPO implementation uses a reward function that balances accuracy and format adherence ([UnfoldAI, n.d.](https://unfoldai.com/deepseek-r1/)).
+
+DeepSeek R1's training pipeline consists of four distinct phases ([AI Papers Academy, n.d.](https://aipapersacademy.com/deepseek-r1/); [Vellum AI, n.d.](https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it)):
+
+1. **Cold Start:** The DeepSeek-V3-Base model is fine-tuned on a small, high-quality dataset of results generated by DeepSeek-R1-Zero. This addresses initial readability issues and provides a solid foundation for subsequent training. This dataset, while containing thousands of samples, is considered relatively small in the context of LLM training ([AI Papers Academy, n.d.](https://aipapersacademy.com/deepseek-r1/)).
+
+2. **Reasoning Reinforcement Learning:** This phase mirrors the training of R1-Zero, applying large-scale reinforcement learning to enhance reasoning skills, particularly in STEM fields, coding, and logic-based tasks ([AI Papers Academy, n.d.](https://aipapersacademy.com/deepseek-r1/); [Vellum AI, n.d.](https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it)).
+
+3. **Rejection Sampling:** As RL approaches convergence, the model generates its own synthetic labeled data through rejection sampling. This involves selecting the best examples from the previous RL run, a technique reminiscent of strategies reportedly used by OpenAI ([Vellum AI, n.d.](https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it)).
+
+4. **Merged Supervised Fine-tuning:** The synthetic data generated in the previous step is combined with supervised data from DeepSeek-V3-Base in domains like writing and factual question answering. This final fine-tuning stage refines the model's performance across a broader range of tasks ([Vellum AI, n.d.](https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it)).
+
+This multi-stage training process, incorporating both pure RL and SFT, distinguishes DeepSeek R1 from models relying solely on one approach. It aims to combine the raw reasoning power of RL with the refinement and polish achieved through supervised learning and human feedback ([Nguyen, 2025](https://medium.com/@namnguyenthe/deepseek-r1-architecture-and-training-explain-83319903a684)).
+
+### Performance and Benchmarks
+
+DeepSeek R1 has been evaluated against leading models like OpenAI's GPT series and Claude, demonstrating competitive performance across various benchmarks ([PromptHub, n.d.](https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1)). In reasoning and math tasks, R1 rivals or surpasses other models in accuracy and depth of reasoning. While GPT models generally perform better in coding benchmarks, R1 often outperforms in structured question-answering tasks. Notably, R1 excels in creative and long-context tasks, outperforming other models in benchmarks like AlpacaEval 2.0 and ArenaHard ([PromptHub, n.d.](https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1)). One key observation is that longer reasoning chains generally improve performance, aligning with findings from other research ([PromptHub, n.d.](https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1)).
+
+### Strengths and Weaknesses
+
+DeepSeek R1's strengths lie in its efficient architecture, innovative training methodology, and strong performance in reasoning tasks. Its open-source nature promotes accessibility and fosters community-driven development ([OpenTools AI, n.d.](https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money)). The model's cost-effectiveness is also a significant advantage, making advanced AI capabilities more accessible to a wider audience ([OpenTools AI, n.d.](https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money)).
+
+However, R1 is not without limitations. Early versions exhibited issues with language mixing and less polished responses compared to chat-optimized models ([PromptHub, n.d.](https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1)). While these issues have been addressed through subsequent refinements, including supervised fine-tuning and human feedback, they highlight the ongoing development and iterative nature of LLM training. Concerns regarding potential biases in training data and the security implications of open-source models also warrant attention ([OpenTools AI, n.d.](https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money)).
+
+### Conclusion
+
+DeepSeek R1 represents a compelling alternative in the rapidly evolving landscape of large language models. Its focus on reinforcement learning, coupled with a hybrid training approach, yields impressive reasoning capabilities. While challenges remain, the model's open-source nature, cost-effectiveness, and active development trajectory position it as a key player in the democratization of AI. Further research and community involvement will be crucial in realizing the full potential of DeepSeek R1 and addressing the broader implications of open-source AI models.
+
+
+### References
+
+AI Papers Academy. (n.d.). *DeepSeek-R1 Paper Explained - A New RL LLMs Era in AI?*. Retrieved January 28, 2025, from https://aipapersacademy.com/deepseek-r1/
+
+GeeksforGeeks. (2025). *DeepSeek Unveils DeepSeek-R1 RL Models: What’s New and How It is better than OpenAI and Google*. Retrieved January 28, 2025, from https://www.geeksforgeeks.org/deepseek-r1-rl-models-whats-new/
+
+Nguyen, T. N. (2025, January). *DeepSeek-R1: Architecture and training explain*. Medium. Retrieved January 28, 2025, from https://medium.com/@namnguyenthe/deepseek-r1-architecture-and-training-explain-83319903a684
+
+OpenTools AI. (n.d.). *DeepSeek R1: The Open-Source AI Champion Giving OpenAI a Run for Its Money*. Retrieved January 28, 2025, from https://opentools.ai/news/deepseek-r1-the-open-source-ai-champion-giving-openai-a-run-for-its-money
+
+PromptHub. (n.d.). *DeepSeek R-1 Model Overview and How it Ranks Against OpenAI's o1*. Retrieved January 28, 2025, from https://www.prompthub.us/blog/deepseek-r-1-model-overview-and-how-it-ranks-against-openais-o1
+
+Tysoolen. (n.d.). *DeepSeek R1 vs OpenAI o1: The Ultimate Benchmark Comparison*. Retrieved January 28, 2025, from https://www.tysoolen.com/story/deepseek-r1-openai-o1-ultimate-benchmark-showdown
+
+UnfoldAI. (n.d.). *DeepSeek-R1 — Training Language Models to reason through Reinforcement Learning*. Retrieved January 28, 2025, from https://unfoldai.com/deepseek-r1/
+
+Vellum AI. (n.d.). *How DeepSeek-R1 Was Built; For dummies*. Retrieved January 28, 2025, from https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it
\ No newline at end of file
diff --git a/outputs/task_1738061631_What is DeepSeek R1.pdf b/outputs/task_1738061631_What is DeepSeek R1.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..4c9e7eb65a5368013b50990e7dd23d68c578824d
Binary files /dev/null and b/outputs/task_1738061631_What is DeepSeek R1.pdf differ
diff --git a/outputs/task_1738166363_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json b/outputs/task_1738166363_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json
new file mode 100644
index 0000000000000000000000000000000000000000..9f8fcf89b518986900d3fba80256a4b4adf3d389
--- /dev/null
+++ b/outputs/task_1738166363_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json
@@ -0,0 +1,1365 @@
+{
+ "timestamp": "2025-01-29T21:29:23.249087",
+ "events": [
+ {
+ "timestamp": "2025-01-29T21:29:26.624644",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:26.628740",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:26.645849",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:31.653913",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.511464",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['\"Deepseek v3 R1\" model architecture and performance benchmarks', 'Impact of \"Deepseek v3 R1\" on AI industry competition and market share', '\"Deepseek v3 R1\" open-source code and community contributions analysis', '\"Deepseek v3 R1\" training costs and resource efficiency compared to competitors', 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ']...",
+ "metadata": [
+ "\"Deepseek v3 R1\" model architecture and performance benchmarks",
+ "Impact of \"Deepseek v3 R1\" on AI industry competition and market share",
+ "\"Deepseek v3 R1\" open-source code and community contributions analysis",
+ "\"Deepseek v3 R1\" training costs and resource efficiency compared to competitors",
+ "Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. "
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.557976",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" model architecture and performance benchmarks'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.600410",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of \"Deepseek v3 R1\" on AI industry competition and market share'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.617500",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" open-source code and community contributions analysis'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.625880",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" training costs and resource efficiency compared to competitors'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:35.656574",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.196567",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2\n",
+ "metadata": "https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.210874",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\n",
+ "metadata": "https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.223058",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/\n",
+ "metadata": "https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.234880",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nytimes.com/2025/01/28/technology/china-deepseek-ai-silicon-valley.html\n",
+ "metadata": "https://www.nytimes.com/2025/01/28/technology/china-deepseek-ai-silicon-valley.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.250086",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/\n",
+ "metadata": "https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.265417",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:39.281159",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.533380",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.547726",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 3 new images from 3 total images",
+ "metadata": [
+ "https://felloai.com/wp-content/uploads/2025/01/All-About-DeepSeek-Company-and-their-revolutionary-R1-and-V3-models-that-are-disruption-AI-Industry.jpg",
+ "https://felloai.com/wp-content/uploads/2025/01/deepseek-officially-tops-the-appstore-v0-eb8nxvvptdfe1.jpeg-831x1024.webp",
+ "https://felloai.com/wp-content/uploads/2025/01/Screenshot-2025-01-27-at-11.28.00-1-1024x387.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.569126",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.586432",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.892145",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.909097",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\n",
+ "metadata": "https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.919917",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.youtube.com/watch?v=sRxQBmHNbnU\n",
+ "metadata": "https://www.youtube.com/watch?v=sRxQBmHNbnU"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.927091",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\n",
+ "metadata": "https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.946773",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\n",
+ "metadata": "https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.957398",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:40.966575",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:45.925916",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:45.939560",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://www.innobu.com/wp-content/uploads/2025/01/n13ls_Open-Source_AI_-ar_169_-profile_i18tzau_-stylize_0_-_e68ad48f-3546-4573-9128-5d5367342248_0.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:45.949045",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:45.963743",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" model architecture and performance benchmarks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.132198",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.techmeme.com/250127/p29\n",
+ "metadata": "https://www.techmeme.com/250127/p29"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.146490",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://x.com/patrickmoorhead?lang=en\n",
+ "metadata": "https://x.com/patrickmoorhead?lang=en"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.161974",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/LLMDevs/top/\n",
+ "metadata": "https://www.reddit.com/r/LLMDevs/top/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.183865",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\n",
+ "metadata": "https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.202194",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.techmeme.com/250127/p32\n",
+ "metadata": "https://www.techmeme.com/250127/p32"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.225817",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:46.240934",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:52.998526",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:53.024047",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 2 new images from 2 total images",
+ "metadata": [
+ "https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75",
+ "https://arbisoft.com/_next/image?url=https%3A%2F%2Fd1foa0aaimjyw4.cloudfront.net%2FBlog_Image_1_b56afb0c54.png&w=1920&q=75"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:53.047557",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:53.060080",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" open-source code and community contributions analysis...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:53.854500",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\n",
+ "metadata": "https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:54.156370",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/swingtrading/comments/1ib4deg/let_me_cut_through_the_deepseek_fud_and_give_you/\n",
+ "metadata": "https://www.reddit.com/r/swingtrading/comments/1ib4deg/let_me_cut_through_the_deepseek_fud_and_give_you/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:54.391109",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:54.412873",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 2 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:55.991379",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:56.009580",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:56.021647",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:56.034146",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" training costs and resource efficiency compared to competitors...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:56.326926",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.barchart.com/story/news/30627491/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today\n",
+ "metadata": "https://www.barchart.com/story/news/30627491/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:56.427207",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\n",
+ "metadata": "https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:57.107052",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\n",
+ "metadata": "https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:57.124851",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:29:57.140154",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:00.692164",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:00.701689",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://helios-i.mashable.com/imagery/articles/01ywQklBcfNJQHo7KRl3DJe/hero-image.fill.size_1248x702.v1738094497.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:00.716319",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:00.729140",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of \"Deepseek v3 R1\" on AI industry competition and market share...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:03.548192",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: Multi-Head Latent Attention (MLA): Enhanced the model\u2019s ability to process nuanced relationships and manage multiple inputs simultaneously, making it highly effective for tasks requiring contextual depth.\nWhile overshadowed by high-profile releases from OpenAI and Meta, DeepSeek V3 quietly gained respect in research circles for its combination of scale, cost efficiency, and architectural innovation. It also laid the technical foundation for DeepSeek\u2019s most significant achievement to date: DeepSeek R1..\nDeepSeek took its boldest step yet with DeepSeek R1, launched on January 21, 2025. This open-source AI model has become the startup\u2019s most serious challenge to American tech giants, owing to its formidable reasoning power, lower operating costs, and developer-friendly features.\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: What\u2019s Next for DeepSeek\nDeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time.\nDeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets.\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: about AI development. Models like DeepSeek V3 and the groundbreaking DeepSeek R1 prove that success in AI doesn\u2019t always require billion-dollar budgets. Instead, efficiency, adaptability, and strategic partnerships can deliver results that rival even the most expensive models. What makes DeepSeek\u2019s journey even more extraordinary is the sheer shock it has generated within the AI community. Industry experts and researchers have been vocal about their amazement at how a smaller player has managed to compete with\u2014and even outperform\u2014some of the most advanced models developed by vastly better-funded organizations. DeepSeek is showing no signs of slowing down. Its recent launch of DeepThink + Web Search, which enables real-time online lookups, places it ahead of even OpenAI in some capabilities. Looking forward, the company is likely to focus on: Refining reinforcement learning pipelines to further enhance reasoning capabilities. Developing industry-specific models tailored for fields like\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: multiple inputs simultaneously, making it highly effective for tasks requiring contextual depth. While overshadowed by high-profile releases from OpenAI and Meta, DeepSeek V3 quietly gained respect in research circles for its combination of scale, cost efficiency, and architectural innovation. It also laid the technical foundation for DeepSeek\u2019s most significant achievement to date: DeepSeek R1.. DeepSeek R1 DeepSeek took its boldest step yet with DeepSeek R1, launched on January 21, 2025. This open-source AI model has become the startup\u2019s most serious challenge to American tech giants, owing to its formidable reasoning power, lower operating costs, and developer-friendly features. \ud83d\ude80 DeepSeek-R1 is here!\u26a1 Performance on par with OpenAI-o1\ud83d\udcd6 Fully open-source model & technical report\ud83c\udfc6 MIT licensed: Distill & commercialize freely!\ud83c\udf10 Website & API are live now! Try DeepThink at https://t.co/v1TFy7LHNy today!\ud83d\udc0b 1/n pic.twitter.com/7BlpWAPu6y\u2014 DeepSeek (@deepseek_ai) January 20, 2025 Key\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: Beyond MoE, Multi-Head Latent Attention (MLA) boosts the models\u2019 ability to process multiple data streams at once. By distributing focus across several \u201cattention heads,\u201d they can better identify contextual relationships and handle nuanced inputs\u2014even when processing tens of thousands of tokens in a single request.\nDeepSeek\u2019s innovations also extend to model distillation, where knowledge from its larger models is transferred to smaller, more efficient versions, such as DeepSeek-R1-Distill. These compact models retain much of the reasoning power of their larger counterparts but require significantly fewer computational resources, making advanced AI more accessible.\nReactions from the AI Community\nSeveral prominent figures in AI have weighed in on the disruptive potential of DeepSeek R1:\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:03.948493",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: AI enthusiasts\u2014the tools are open. The barriers are mental.\nEntrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?\nBreaking the Myth: AI Development Without Billion-Dollar Budgets\nThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.\nKey takeaways from DeepSeek\u2019s success:\nCost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.\nTechnical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.\nHardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: spent years in the system. Why not challenge it?AI enthusiasts\u2014the tools are open. The barriers are mental.Entrepreneurship is about taking action, not waiting for permission. DeepSeek just proved what\u2019s possible. What\u2019s stopping you from being next?Breaking the Myth: AI Development Without Billion-Dollar BudgetsThe belief that only tech giants can build top-tier AI models has now been completely shattered. DeepSeek has successfully demonstrated that strategic efficiency can outperform unlimited resources.Key takeaways from DeepSeek\u2019s success:Cost-Efficiency: They trained their model for less than $6 million, while competitors spend over $100 million.Technical Performance: Independent benchmarks show DeepSeek outperforming GPT-4 in mathematics, programming, and reasoning tasks.Hardware Limitations Are an Excuse: They developed high-performing AI without access to the most advanced GPUs.For those still waiting for the \u201cright conditions\u201d to innovate, DeepSeek just proved you don\u2019t need\n\nSource: https://medium.com/@dharmatej14june2001/deepseek-just-proved-a-point-whats-stopping-you-ac9db40d0bb7\nTitle: DeepSeek Just Proved a Point\u2014What\u2019s Stopping You? | by Karri Dharma Teja | Jan, 2025 | Medium\nContent: Open in appSign upSign inWriteSign upSign inDeepSeek Just Proved a Point\u2014What\u2019s Stopping You?AI Disruption & Open-Source InnovationKarri Dharma Teja\u00b7Follow3 min read\u00b71 hour ago--ListenShareIn the world of artificial intelligence, we\u2019ve been conditioned to believe that only the biggest tech giants with billion-dollar budgets can lead innovation. Companies like OpenAI, Google, and Meta invest hundreds of millions in training AI models, pushing the boundaries of deep learning. But DeepSeek has just rewritten the rules with an approach that proves money isn\u2019t the only key to success.DeepSeek Just Proved a Point \u2014 What\u2019s Stopping You?A Chinese AI startup, DeepSeek, built an open-source large language model (LLM) that not only competes with GPT-4 but does so at a fraction of the cost. While OpenAI and Google require tens of millions of dollars to train their models, DeepSeek achieved comparable performance for under $6 million by optimizing existing open-source frameworks and using efficient\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:06.885979",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: reasoning skills. Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: reasoning skills. Reinforcement Learning: Two RL stages to refine performance and reasoning. Supervised Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: Head of AI Security & Strategy @ Aon 1d Edited Report this post The DeepSeek AI situation in 60 seconds: 1/ \ud835\udc0c\ud835\udc28\ud835\udc1d\ud835\udc1e\ud835\udc25 \ud835\udc11\ud835\udc1e\ud835\udc25\ud835\udc1e\ud835\udc1a\ud835\udc2c\ud835\udc1e\ud835\udc2c: DeepSeek, AI startup based in China, released two major models: DeepSeek-V3, a 671-billion-parameter Mixture of Experts (MoE) base model, and DeepSeek-R1, a reasoning-focused model built upon V3. Additionally, the company has released smaller, distilled versions of these models, ranging from 1.5 billion to 70 billion parameters, to enhance accessibility across various applications. The company reports development costs of approximately $6 million, though this figure is debated given their parent company's broader AI investments. 2/ \ud835\udc0f\ud835\udc1e\ud835\udc2b\ud835\udc1f\ud835\udc28\ud835\udc2b\ud835\udc26\ud835\udc1a\ud835\udc27\ud835\udc1c\ud835\udc1e \ud835\udc1a\ud835\udc27\ud835\udc1d \ud835\udc01\ud835\udc1e\ud835\udc27\ud835\udc1c\ud835\udc21\ud835\udc26\ud835\udc1a\ud835\udc2b\ud835\udc24\ud835\udc2c: R1 demonstrates impressive performance, achieving a 79.8% Pass@1 on AIME 2024, 97.3% on MATH-500, and a 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants. That said, it\u2019s worth noting that benchmarks like these can be gamed or skewed by including test data in training. Always take\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to OpenAI\u2019s o1-1217: CategoryBenchmarkDeepSeek-R1OpenAI o1-1217ReasoningAIME 2024\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to OpenAI\u2019s o1-1217: CategoryBenchmarkDeepSeek-R1OpenAI o1-1217ReasoningAIME 2024\n\nSource: https://www.innobu.com/deepseek-r1-open-source-reasoning-model-explained-ai-guide/\nTitle: DeepSeek-R1: Open-Source Reasoning\nContent: Fine-Tuning: Final SFT stages to align the model with human preferences and optimize outputs. Architectural Advancements DeepSeek-R1 boasts an efficient design with 671 billion parameters, but only 37 billion are activated per token, thanks to its Mixture-of-Experts (MoE) architecture. Key innovations include: Multi-head Latent Attention (MLA): Reduces memory usage by compressing key-value cache size, achieving a 6.3x reduction. DeepSeekMoE Architecture: Activates only a fraction of parameters, optimizing compute efficiency. Multi-token Prediction (MTP): Improves data efficiency and inference speed by predicting multiple tokens simultaneously. These innovations allow DeepSeek-R1 to achieve performance levels comparable to OpenAI\u2019s o1 while requiring significantly less compute power. Performance Benchmarks DeepSeek-R1 excels in reasoning, mathematics, and coding tasks. Here\u2019s how it compares to OpenAI\u2019s o1-1217: CategoryBenchmarkDeepSeek-R1OpenAI o1-1217ReasoningAIME 2024\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:30:13.811575",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: Our analyst team just revealed what they believe are the 10 best stocks to buy right now. See the 10 stocks \u00bbThat led most AI hardware names, many of which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: Our analyst team just revealed what they believe are the 10 best stocks to buy right now. See the 10 stocks \u00bbThat led most AI hardware names, many of which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: EDT.The obvious reason for the sell-off is last week's release of the AI reasoning model DeepSeek V3 R1. Over the weekend, it became apparent that DeepSeek, a little-known Chinese AI lab, had built a leading frontier AI model with significantly less computing power than had been thought necessary.Where to invest $1,000 right now? Our analyst team just revealed what they believe are the 10 best stocks to buy right now. See the 10 stocks \u00bbThat led most AI hardware names, many of which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer, founded in 2015 by entrepreneur Liang Wenfeng. DeepSeek released the large language model V3 in December, but on January 20, it released the \"reasoning\" version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions withstood scrutiny by experts.In response, major AI hardware stocks are selling off. The thinking is that companies will now be able to spend less on AI infrastructure to build\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: Shares of AI-related hardware companies plunged on Monday, including Taiwan SemiconductorManufacturing(NYSE: TSM), Marvell Technology(NASDAQ: MRVL), and Arista Networks(NYSE: ANET), which fell 14.4%, 18%, and 21.9%, respectively, as of 1:11 PM EDT.The obvious reason for the sell-off is last week's release of the AI reasoning model DeepSeek V3 R1. Over the weekend, it became apparent that DeepSeek, a little-known Chinese AI lab, had built a leading frontier AI model with significantly less computing power than had been thought necessary.Where to invest $1,000 right now? Our analyst team just revealed what they believe are the 10 best stocks to buy right now. See the 10 stocks \u00bbThat led most AI hardware names, many of which were trading at high valuations, to sell off hard. But how much of a risk does this new model really pose?DeepSeek rocks the AI worldDeepSeek is a Chinese AI lab that has been in operation since 2023. It was spun out of a quantitative hedge fund called High-Flyer,\n\nSource: https://www.theglobeandmail.com/investing/markets/stocks/ANET-N/pressreleases/30627593/why-artificial-intelligence-ai-hardware-winners-taiwan-semiconductor-marvell-and-arista-networks-plunged-by-double-digits-today/\nTitle: Why Artificial Intelligence (AI) Hardware Winners Taiwan Semiconductor, Marvell, and Arista Networks Plunged by Double Digits Today - The Globe and Mail\nContent: version of the model, called R1. By several metrics, R1 met or exceeded the latest OpenAI reasoning model, called o1.And DeepSeek was able to do it at just a fraction of the cost of o1, with computing power limited by 2022 and 2023 U.S. trade restrictions on leading AI GPUs. We are talking about one-tenth to one-thirtieth of the cost of models built by OpenAI and other \"Magnificent Seven\" names. DeepSeek's public paper on R1 explains how the company cleverly used a variety of hacks to derive tremendous results from limited hardware. Over the weekend, its solutions withstood scrutiny by experts.In response, major AI hardware stocks are selling off. The thinking is that companies will now be able to spend less on AI infrastructure to build leading-edge models. Given the high valuations at which some AI hardware stocks were trading on the expectation of future growth, it's no surprise these stocks are selling off today.This could be bad news for Taiwan Semiconductor, which has a virtual\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:31:56.216008",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh,\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: More: Will Knight / Wired: DeepSeek's New AI Model Sparks Shock, Awe, and Questions From US CompetitorsEliebak / Hugging Face: Open-R1: a fully open reproduction of DeepSeek-R1Chas Danner / New York Magazine: 7 Ways to Think About the DeepSeek AI FreakoutAllie Garfinkle / Fortune: The lessons and laments of DeepSeek, according to VCs\nWill Knight / Wired: DeepSeek's New AI Model Sparks Shock, Awe, and Questions From US Competitors\nEliebak / Hugging Face: Open-R1: a fully open reproduction of DeepSeek-R1\nChas Danner / New York Magazine: 7 Ways to Think About the DeepSeek AI Freakout\nAllie Garfinkle / Fortune: The lessons and laments of DeepSeek, according to VCs\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: Robert Mao / @mave99a: If OpenAI had remained as open as it was initially intended, the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.\nThomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it's\nRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?\n\nSource: https://www.techmeme.com/250127/p29\nTitle: Techmeme: Hands-on with DeepSeek's free chatbot: the R1 model is powerful, but suffers from rampant hallucinations and lacks some ChatGPT tools like the memory feature (Reece Rogers/Wired)\nContent: the industry might not have rushed to compete by purchasing billions of GPUs. Open-source AI could have evolved more collaboratively and steadily, potentially leading to even better advancements by now.Thomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it'sRobert Sterling / @robertmsterling: Might be a dumb question, but can't OpenAI, Anthropic, and other AI companies just incorporate the best parts of DeepSeek's source code into their code, then use the massive GPU clusters at their disposal to train models even more powerful than DeepSeek? Am I missing something?@techbrospod: Today is the perfect day to call a VC, ask for their take on DeepSeek, wait until they finish talking, then sigh, say \u201cyou are truly lost\u201d and hang up.Nate /\n\nSource: https://www.techmeme.com/250127/p32\nTitle: Techmeme: Sam Altman says DeepSeek's R1 is an \u201cimpressive model, particularly around what they're able to deliver for the price\u201d and OpenAI \u201cwill pull up some releases\u201d (Vlad Savov/Bloomberg)\nContent: @perplexity_ai: DeepSeek R1 is now available on Perplexity to support deep web research. This is hosted exclusively in US & EU data centers - your data never leaves Western servers. Toggle on Pro Search to try it out.\n@groqinc: \ud83d\udc40 Powered by DeepSeek on Groq!\nHaseeb / @hosseeb: There are ~200 co-authors on the DeepSeek R1 paper. The U.S. should offer each of them immediate citizenship and $20M in cash to move here.\nMiles Brundage / @miles_brundage: Wonder what Moo Deng thinks about DeepSeek\nThomas / @distributionat: why has deepseek conquered my timeline for several days? not sure, but some hypotheses - * the model is very good * anybody can talk to the model and tell that it's very good. unlike sonnet 3.5, which requires you to already know how to talk to models or talk to it a lot. * it's\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:31:56.239856",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.027217059999999998",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:31:56.254294",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_subtopics",
+ "output": "\ud83c\udf33 Generating subtopics for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:31:59.345822",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subtopics_generated",
+ "output": "\ud83d\udcca Subtopics generated for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:31:59.371390",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_introduction",
+ "output": "\u270d\ufe0f Writing introduction for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:13.492671",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "introduction_written",
+ "output": "\ud83d\udcdd Introduction written for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:13.505330",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'DeepSeek V3 R1: Architecture and Innovations'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:13.525463",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:13.545153",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: DeepSeek V3 R1: Architecture and Innovations...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:17.759483",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:20.082530",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['\"Deepseek v3 R1\" architecture innovations', '\"Deepseek v3 R1\" impact AI industry \"Multi-Head Latent Attention\"', '\"Deepseek v3 R1\" performance benchmarks comparison \"open-source\"', '\"Deepseek v3 R1\" training efficiency cost analysis \"Mixture-of-Experts\"']...",
+ "metadata": [
+ "\"Deepseek v3 R1\" architecture innovations",
+ "\"Deepseek v3 R1\" impact AI industry \"Multi-Head Latent Attention\"",
+ "\"Deepseek v3 R1\" performance benchmarks comparison \"open-source\"",
+ "\"Deepseek v3 R1\" training efficiency cost analysis \"Mixture-of-Experts\""
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:20.097148",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" architecture innovations'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:20.113307",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" impact AI industry \"Multi-Head Latent Attention\"'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:20.131185",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" performance benchmarks comparison \"open-source\"'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:20.142348",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" training efficiency cost analysis \"Mixture-of-Experts\"'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:23.905633",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\n",
+ "metadata": "https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:23.922704",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: http://www.google.com/search?hl=en&q=\"Deepseek+v3+R1\"+architecture+innovations\n",
+ "metadata": "http://www.google.com/search?hl=en&q=\"Deepseek+v3+R1\"+architecture+innovations"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:23.963250",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:23.980289",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 2 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:24.674884",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:24.687172",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 5 total images",
+ "metadata": [
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-57.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-58.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-59.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-60.png?w=1024"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:24.716437",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:24.734887",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" architecture innovations...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.018376",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\n",
+ "metadata": "https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.038585",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://x.com/JohnKuhles1966/status/1883940370964439349\n",
+ "metadata": "https://x.com/JohnKuhles1966/status/1883940370964439349"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.055150",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/NVDA_Stock/rising/\n",
+ "metadata": "https://www.reddit.com/r/NVDA_Stock/rising/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.073208",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://news.ycombinator.com/item?id=42847825\n",
+ "metadata": "https://news.ycombinator.com/item?id=42847825"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.090920",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.techmeme.com/250127/p14\n",
+ "metadata": "https://www.techmeme.com/250127/p14"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.104341",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:25.127088",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:27.550344",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:27.566274",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:27.579895",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:27.593404",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" training efficiency cost analysis \"Mixture-of-Experts\"...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:28.061613",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.facebook.com/groups/AIUGM/\n",
+ "metadata": "https://www.facebook.com/groups/AIUGM/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:28.404056",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:28.420710",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:28.439939",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:28.457880",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.283155",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.299992",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.320931",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.337826",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" impact AI industry \"Multi-Head Latent Attention\"...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.435839",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://forum.cursor.com/t/cursor-deepseek/43261\n",
+ "metadata": "https://forum.cursor.com/t/cursor-deepseek/43261"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.454594",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\n",
+ "metadata": "https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.470771",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/singularity/comments/1ibngb8/good_way_of_comparing_robustness_between_r1_and/\n",
+ "metadata": "https://www.reddit.com/r/singularity/comments/1ibngb8/good_way_of_comparing_robustness_between_r1_and/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.487266",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://play.ht/blog/deepseek-v3-vs-r1-vs-coder/\n",
+ "metadata": "https://play.ht/blog/deepseek-v3-vs-r1-vs-coder/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.506342",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:29.519385",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:31.488435",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:31.506663",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 3 new images from 3 total images",
+ "metadata": [
+ "https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75",
+ "https://us1.discourse-cdn.com/flex020/uploads/cursor1/optimized/3X/0/d/0df9e1f23791d1b8e362cb2bcf434e2bac1e7a09_2_419x500.png",
+ "https://arbisoft.com/_next/image?url=https%3A%2F%2Fd1foa0aaimjyw4.cloudfront.net%2FBlog_Image_1_b56afb0c54.png&w=1920&q=75"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:31.525780",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:31.543245",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" performance benchmarks comparison \"open-source\"...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:31.670116",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: capable of self-improvement without human supervision, further establishing its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1 likely innovates by integrating: Weighted relevance scoring 2. Contextual Query refinement 3. Hierarchical Search Space Optimization 4. Attention driven ranking Another version from Claude 3.5 Sonnet: Searched web: deepseek r1 github code implementation reinforcement learning distillation GitHub \u2013 deepseek-ai/DeepSeek-R1 DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement \u2026 Aakash Nain \u2013 DeepSeek-R1 unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF \u2013 Hugging Face DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via DeepSeek-R1 \u2026 MoE Architecture Implementation # Pseudo-code representation of MoE layer class MoELayer: def __init__(self, num_experts=16, hidden_size=4096): self.experts = [Expert() for _ in range(num_experts)] self.router = Router(hidden_size, num_experts) def forward(self, x): # Router\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1 likely innovates by integrating: Weighted relevance scoring 2. Contextual Query refinement 3. Hierarchical Search Space Optimization 4. Attention driven ranking Another version from Claude 3.5 Sonnet: Searched web: deepseek r1 github code implementation reinforcement learning distillation GitHub \u2013 deepseek-ai/DeepSeek-R1 DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement \u2026 Aakash Nain \u2013 DeepSeek-R1 unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF \u2013 Hugging Face DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via DeepSeek-R1 \u2026 MoE Architecture Implementation # Pseudo-code representation of MoE layer class MoELayer: def __init__(self, num_experts=16, hidden_size=4096): self.experts = [Expert() for _ in range(num_experts)] self.router = Router(hidden_size, num_experts) def forward(self, x): # Router selects top-k experts for each token expert_weights = self.router(x) # Shape:\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1 likely innovates by integrating: Weighted relevance scoring 2. Contextual Query refinement 3. Hierarchical Search Space Optimization 4. Attention driven ranking Another version from Claude 3.5 Sonnet: Searched web: deepseek r1 github code implementation reinforcement learning distillation GitHub \u2013 deepseek-ai/DeepSeek-R1 DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement \u2026 Aakash Nain \u2013 DeepSeek-R1 unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF \u2013 Hugging Face DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via DeepSeek-R1 \u2026 MoE Architecture Implementation # Pseudo-code representation of MoE layer class MoELayer: def __init__(self, num_experts=16, hidden_size=4096): self.experts = [Expert() for _ in range(num_experts)] self.router = Router(hidden_size, num_experts) def forward(self, x): # Router selects top-k experts for each token expert_weights = self.router(x) # Shape:\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model\nJanuary 26, 2025January 26, 2025 / Naixian Zhang\nJanuary 26, 2025January 26, 2025\n/ Naixian Zhang\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: DeepSeek originated as a research initiative within High-Flyer, a Chinese quantitative hedge fund known for its AI-driven trading strategies. In April 2023, High-Flyer established DeepSeek as an independent entity dedicated to advancing artificial general intelligence (AGI), explicitly separating its research from the firm\u2019s financial operations. Wikipedia Since its inception, DeepSeek has developed several notable AI models. In May 2024, the company released DeepSeek-V2, which gained attention for its strong performance and cost-effectiveness, prompting competitive responses from major tech companies in China. Wikipedia More recently, in January 2025, DeepSeek introduced the R1 model, capable of self-improvement without human supervision, further establishing its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1 likely innovates by integrating: Weighted relevance scoring 2. Contextual Query refinement 3. Hierarchical Search Space Optimization 4.\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: DeepSeek originated as a research initiative within High-Flyer, a Chinese quantitative hedge fund known for its AI-driven trading strategies. In April 2023, High-Flyer established DeepSeek as an independent entity dedicated to advancing artificial general intelligence (AGI), explicitly separating its research from the firm\u2019s financial operations. Wikipedia\nSince its inception, DeepSeek has developed several notable AI models. In May 2024, the company released DeepSeek-V2, which gained attention for its strong performance and cost-effectiveness, prompting competitive responses from major tech companies in China. Wikipedia\nMore recently, in January 2025, DeepSeek introduced the R1 model, capable of self-improvement without human supervision, further establishing its position in the AI research community.\nThe key innovations by deepseek? DeepSeek V3 R1 likely innovates by integrating:\nWeighted relevance scoring\n2. Contextual Query refinement\n3. Hierarchical Search Space Optimization\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model January 26, 2025January 26, 2025 / Naixian Zhang DeepSeek originated as a research initiative within High-Flyer, a Chinese quantitative hedge fund known for its AI-driven trading strategies. In April 2023, High-Flyer established DeepSeek as an independent entity dedicated to advancing artificial general intelligence (AGI), explicitly separating its research from the firm\u2019s financial operations. Wikipedia Since its inception, DeepSeek has developed several notable AI models. In May 2024, the company released DeepSeek-V2, which gained attention for its strong performance and cost-effectiveness, prompting competitive responses from major tech companies in China. Wikipedia More recently, in January 2025, DeepSeek introduced the R1 model, capable of self-improvement without human supervision, further establishing its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model January 26, 2025January 26, 2025 / Naixian Zhang DeepSeek originated as a research initiative within High-Flyer, a Chinese quantitative hedge fund known for its AI-driven trading strategies. In April 2023, High-Flyer established DeepSeek as an independent entity dedicated to advancing artificial general intelligence (AGI), explicitly separating its research from the firm\u2019s financial operations. Wikipedia Since its inception, DeepSeek has developed several notable AI models. In May 2024, the company released DeepSeek-V2, which gained attention for its strong performance and cost-effectiveness, prompting competitive responses from major tech companies in China. Wikipedia More recently, in January 2025, DeepSeek introduced the R1 model, capable of self-improvement without human supervision, further establishing its position in the AI research community. The key innovations by deepseek? DeepSeek V3 R1\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: Naixian Zhang Menu Skip to content Home About Contact Search Search for: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model January 26, 2025January 26, 2025 / Naixian Zhang DeepSeek originated as a research initiative within High-Flyer, a Chinese quantitative hedge fund known for its AI-driven trading strategies. In April 2023, High-Flyer established DeepSeek as an independent entity dedicated to advancing artificial general intelligence (AGI), explicitly separating its research from the firm\u2019s financial operations. Wikipedia Since its inception, DeepSeek has developed several notable AI models. In May 2024, the company released DeepSeek-V2, which gained attention for its strong performance and cost-effectiveness, prompting competitive responses from major tech companies in China. Wikipedia More recently, in January 2025, DeepSeek introduced the R1 model, capable of self-improvement without human supervision, further establishing its position in the\n\nSource: https://naixianzhang.com/2025/01/26/who-is-the-inventor-of-deepseek-and-what-are-the-key-innovation-of-deepseek-v3-r1-model/\nTitle: Who is the Inventor of Deepseek and What are the Key Innovation of Deepseek v3 R1 Model | Naixian Zhang\nContent: relevance scoring 2. Contextual Query refinement 3. Hierarchical Search Space Optimization 4. Attention driven ranking Another version from Claude 3.5 Sonnet: Searched web: deepseek r1 github code implementation reinforcement learning distillation GitHub \u2013 deepseek-ai/DeepSeek-R1 DeepSeek-R1 \u2014 Training Language Models to reason through Reinforcement \u2026 Aakash Nain \u2013 DeepSeek-R1 unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF \u2013 Hugging Face DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via DeepSeek-R1 \u2026 MoE Architecture Implementation # Pseudo-code representation of MoE layer class MoELayer: def __init__(self, num_experts=16, hidden_size=4096): self.experts = [Expert() for _ in range(num_experts)] self.router = Router(hidden_size, num_experts) def forward(self, x): # Router selects top-k experts for each token expert_weights = self.router(x) # Shape: [batch_size, seq_len, num_experts] top_k_weights, top_k_indices = select_top_k(expert_weights, k=2) # Only activate selected experts\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:44.378661",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: January 20th that rivals OpenAI's ChatGPT and Meta's Llama 3.1. DeepSeek utilizes \"inference-time computing,\" activating only necessary parts of its model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: January 20th that rivals OpenAI's ChatGPT and Meta's Llama 3.1. DeepSeek utilizes \"inference-time computing,\" activating only necessary parts of its model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S.\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:32:49.668163",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: comparison breakdown:1. PerformanceDeepSeek-R1 outperforms OpenAI-o1 on key benchmarks like AIME, MATH-500, and SWE-bench.Demonstrates higher accuracy and faster response times for complex problem-solving tasks.Excels in logical reasoning and analytical capabilities, making it ideal for reasoning-focused applications. 2. Development Time and CostDeepSeek-R1 was developed significantly faster due to optimized training techniques and efficient computation.Its development cost is 90-95% lower than OpenAI-o1, reducing dependency on expensive GPU clusters.OpenAI-o1 required years of high-cost iterative training with massive datasets and computational resources. 3. Cost of UseDue to its high cost, OpenAI-o1 is only available to large businesses with sizable budgets.DeepSeek-R1 offers comparable performance at a fraction of the cost, making it affordable for startups and developers.R1 is open-source and available under an MIT license on Hugging Face, democratizing advanced AI. 4. Technical\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: 4. Technical Efficiency\nDeepSeek-R1 uses an optimized architecture to handle complex reasoning with fewer GPUs and lower energy consumption.\nIts computation-focused design allows for \u201clonger thinking\u201d without resource-intensive processes.\nOpenAI-o1 relies on vast computational power for fine-tuning, which means higher costs and less scalability.\nDeepSeek-R1 Benchmark Brilliance\nDeepSeek-R1 has delivered impressive results across multiple benchmarks, demonstrating its competitive edge in various domains:\nMathematics - Achieved 79.8% (Pass@1) on AIME 2024 and an outstanding 93% on MATH-500.\nCoding - Ranked in the 96.3rd percentile on Codeforces.\nGeneral Knowledge - Scored 90.8% on MMLU and 71.5% on GPQA Diamond.\nWriting - Secured 87.6% on AlpacaEval 2.0 for question answering.\nThese numbers place DeepSeek-R1 in line with industry leaders, like OpenAI and Meta. In some areas, it even surpasses them, proving that open-source models can punch above their weight.\n\nSource: https://forum.cursor.com/t/cursor-deepseek/43261\nTitle: Cursor & DeepSeek - Feature Requests - Cursor - Community Forum\nContent: Deepseek R1 is performing much better than sonnet 3.5 so far, I have been playing with it and it\u2019s great. I switch back to sonnet and it feel dumb compared to it. I think deepseek R1 is the new king for coding.\ntemp-sunsetmango January 25, 2025, 1:47am 8 if they make any money off the 40% requests they need to seriously reconsider that I just blew through $20 of fast o1 and got almsot nothing done but deepseek is doing it better for free or am i just idk singularity\ntemp-sunsetmango January 25, 2025, 1:47am 8\nJanuary 25, 2025, 1:47am 8\nif they make any money off the 40% requests they need to seriously reconsider that I just blew through $20 of fast o1 and got almsot nothing done but deepseek is doing it better for free or am i just idk singularity\nif they make any money off the 40% requests they need to seriously reconsider that\nI just blew through $20 of fast o1 and got almsot nothing done\nbut deepseek is doing it better for free\nor am i just idk singularity\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: Demonstrates higher accuracy and faster response times for complex problem-solving tasks.\nExcels in logical reasoning and analytical capabilities, making it ideal for reasoning-focused applications.\n2. Development Time and Cost\nDeepSeek-R1 was developed significantly faster due to optimized training techniques and efficient computation.\nIts development cost is 90-95% lower than OpenAI-o1, reducing dependency on expensive GPU clusters.\nOpenAI-o1 required years of high-cost iterative training with massive datasets and computational resources.\n3. Cost of Use\nDue to its high cost, OpenAI-o1 is only available to large businesses with sizable budgets.\nDeepSeek-R1 offers comparable performance at a fraction of the cost, making it affordable for startups and developers.\nR1 is open-source and available under an MIT license on Hugging Face, democratizing advanced AI.\n4. Technical Efficiency\n\nSource: https://play.ht/blog/deepseek-v3-vs-r1-vs-coder/\nTitle: DeepSeek-V3, R1, Coder: What's The Differences?\nContent: DeepSeek-V3, R1, Coder: What\u2019s the Differences?\nDeepSeek-V3, R1, Coder: What\u2019s the Differences? With a lot of buzz around DeepSeek-V3, R1, Coder, just what are the Differences between each? in API January 27, 2025 4 min read Share this post\nDeepSeek-V3, R1, Coder: What\u2019s the Differences? With a lot of buzz around DeepSeek-V3, R1, Coder, just what are the Differences between each? in API January 27, 2025 4 min read Share this post\nDeepSeek-V3, R1, Coder: What\u2019s the Differences? With a lot of buzz around DeepSeek-V3, R1, Coder, just what are the Differences between each? in API January 27, 2025 4 min read\nDeepSeek-V3, R1, Coder: What\u2019s the Differences? With a lot of buzz around DeepSeek-V3, R1, Coder, just what are the Differences between each?\nWith a lot of buzz around DeepSeek-V3, R1, Coder, just what are the Differences between each?\nin API January 27, 2025 4 min read\nin API January 27, 2025 4 min read\nJanuary 27, 2025 4 min read\nShare this post\nShare this post\nShare this post\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: and available under an MIT license on Hugging Face, democratizing advanced AI. 4. Technical EfficiencyDeepSeek-R1 uses an optimized architecture to handle complex reasoning with fewer GPUs and lower energy consumption.Its computation-focused design allows for \u201clonger thinking\u201d without resource-intensive processes.OpenAI-o1 relies on vast computational power for fine-tuning, which means higher costs and less scalability. DeepSeek-R1 Benchmark BrillianceDeepSeek-R1 has delivered impressive results across multiple benchmarks, demonstrating its competitive edge in various domains: Mathematics - Achieved 79.8% (Pass@1) on AIME 2024 and an outstanding 93% on MATH-500.Coding - Ranked in the 96.3rd percentile on Codeforces.General Knowledge - Scored 90.8% on MMLU and 71.5% on GPQA Diamond.Writing - Secured 87.6% on AlpacaEval 2.0 for question answering. These numbers place DeepSeek-R1 in line with industry leaders, like OpenAI and Meta. In some areas, it even surpasses them, proving that\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: it even more disruptive. DeepSeek isn\u2019t just competing\u2014it\u2019s rewriting the AI playbook. The Training ApproachUnlike traditional models, DeepSeek-R1 skips the usual supervised fine-tuning (SFT) and dives straight into reinforcement learning. This means it learns to reason and solve problems independently\u2014no hand-holding required. The result is a model that is capable of self-verification, reflection, and creating detailed chain-of-thought (CoT) responses. This isn\u2019t just theory. DeepSeek-R1 is the first open-source model to prove that advanced reasoning in large language models can be achieved purely with RL. It\u2019s a game-changer for researchers and developers looking to push the boundaries of what AI can do. DeepSeek-R1 Vs OpenAI-O1 Let\u2019s take a closer look at how DeepSeek-R1 stacks up against OpenAI-o1 in terms of various benchmarks in this graph: Source: HuggingFace Here is a further comparison breakdown:1. PerformanceDeepSeek-R1 outperforms OpenAI-o1 on key benchmarks like AIME,\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: capabilities, making it ideal for reasoning-focused applications. 2. Development Time and CostDeepSeek-R1 was developed significantly faster due to optimized training techniques and efficient computation.Its development cost is 90-95% lower than OpenAI-o1, reducing dependency on expensive GPU clusters.OpenAI-o1 required years of high-cost iterative training with massive datasets and computational resources. 3. Cost of UseDue to its high cost, OpenAI-o1 is only available to large businesses with sizable budgets.DeepSeek-R1 offers comparable performance at a fraction of the cost, making it affordable for startups and developers.R1 is open-source and available under an MIT license on Hugging Face, democratizing advanced AI. 4. Technical EfficiencyDeepSeek-R1 uses an optimized architecture to handle complex reasoning with fewer GPUs and lower energy consumption.Its computation-focused design allows for \u201clonger thinking\u201d without resource-intensive processes.OpenAI-o1 relies on vast\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: design allows for \u201clonger thinking\u201d without resource-intensive processes.OpenAI-o1 relies on vast computational power for fine-tuning, which means higher costs and less scalability. DeepSeek-R1 Benchmark BrillianceDeepSeek-R1 has delivered impressive results across multiple benchmarks, demonstrating its competitive edge in various domains: Mathematics - Achieved 79.8% (Pass@1) on AIME 2024 and an outstanding 93% on MATH-500.Coding - Ranked in the 96.3rd percentile on Codeforces.General Knowledge - Scored 90.8% on MMLU and 71.5% on GPQA Diamond.Writing - Secured 87.6% on AlpacaEval 2.0 for question answering. These numbers place DeepSeek-R1 in line with industry leaders, like OpenAI and Meta. In some areas, it even surpasses them, proving that open-source models can punch above their weight. DeepSeek\u2019s founder, Liang Wenfeng is said to be one of the few who puts right and wrong before profits and losses. He explains, \u201cIf the goal is to make applications, using the Llama structure for\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: means it learns to reason and solve problems independently\u2014no hand-holding required. The result is a model that is capable of self-verification, reflection, and creating detailed chain-of-thought (CoT) responses. This isn\u2019t just theory. DeepSeek-R1 is the first open-source model to prove that advanced reasoning in large language models can be achieved purely with RL. It\u2019s a game-changer for researchers and developers looking to push the boundaries of what AI can do. DeepSeek-R1 Vs OpenAI-O1 Let\u2019s take a closer look at how DeepSeek-R1 stacks up against OpenAI-o1 in terms of various benchmarks in this graph: Source: HuggingFace Here is a further comparison breakdown:1. PerformanceDeepSeek-R1 outperforms OpenAI-o1 on key benchmarks like AIME, MATH-500, and SWE-bench.Demonstrates higher accuracy and faster response times for complex problem-solving tasks.Excels in logical reasoning and analytical capabilities, making it ideal for reasoning-focused applications. 2. Development Time and\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:39.167438",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: Like its predecessor DeepSeek-V3, R1 utilizes a Mixture-of-Experts (MoE) architecture, which allows it to activate only a portion of its network per query. The benefits include:\nLike its predecessor DeepSeek-V3, R1 utilizes a\narchitecture, which allows it to\nactivate only a portion of its network per query\n. The benefits include:\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference.\nLower computational costs\nHigher efficiency in processing reasoning tasks.\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We have the model, its a 35b active, 640b Mixture of Experts We know that spec is 2-3m hours to train Models get worse with more compute after a certain point! https://www.harmdevries.com/ ... [image]Jaana Dogan / @rakyll: DeepSeek codebases are clean and well authored. I learned a lot by reading their work just over the weekend. You cannot deny that they are raising the bar, and wish we focus on quality instead of short sighted incremental work.@wordgrammer: Okay. Thanks for the nerd snipe guys. I spent the day learning exactly how DeepSeek trained at 1/30 the price, instead of working on my pitch deck. The tl;dr to everything, according to their papers:Karma / @0xkarmatic: The visible chains of thought in DeepSeek r1 makes it so easy to prompt it as you can clearly tell when your instructions were ambiguous. Missed opportunity from OpenAI to make their COTs visible. Now that the genie is out\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We have the model, its a 35b active, 640b Mixture of Experts We know that spec is 2-3m hours to train Models get worse with more compute after a certain point! https://www.harmdevries.com/ ... [image]Jaana Dogan / @rakyll: DeepSeek codebases are clean and well authored. I learned a lot by reading their work just over the weekend. You cannot deny that they are raising the bar, and wish we focus on quality instead of short sighted incremental work.@wordgrammer: Okay. Thanks for the nerd snipe guys. I spent the day learning exactly how DeepSeek trained at 1/30 the price, instead of working on my pitch deck. The tl;dr to everything, according to their papers:Karma / @0xkarmatic: The visible chains of thought in DeepSeek r1 makes it so easy to prompt it as you can clearly tell when your instructions were ambiguous. Missed opportunity from OpenAI to make their COTs visible. Now that the genie is out\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We have the model, its a 35b active, 640b Mixture of Experts We know that spec is 2-3m hours to train Models get worse with more compute after a certain point! https://www.harmdevries.com/ ... [image]Jaana Dogan / @rakyll: DeepSeek codebases are clean and well authored. I learned a lot by reading their work just over the weekend. You cannot deny that they are raising the bar, and wish we focus on quality instead of short sighted incremental work.@wordgrammer: Okay. Thanks for the nerd snipe guys. I spent the day learning exactly how DeepSeek trained at 1/30 the price, instead of working on my pitch deck. The tl;dr to everything, according to their papers:Karma / @0xkarmatic: The visible chains of thought in DeepSeek r1 makes it so easy to prompt it as you can clearly tell when your instructions were ambiguous. Missed opportunity from OpenAI to make their COTs visible. Now that the genie is out\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We have the model, its a 35b active, 640b Mixture of Experts We know that spec is 2-3m hours to train Models get worse with more compute after a certain point! https://www.harmdevries.com/ ... [image]Jaana Dogan / @rakyll: DeepSeek codebases are clean and well authored. I learned a lot by reading their work just over the weekend. You cannot deny that they are raising the bar, and wish we focus on quality instead of short sighted incremental work.@wordgrammer: Okay. Thanks for the nerd snipe guys. I spent the day learning exactly how DeepSeek trained at 1/30 the price, instead of working on my pitch deck. The tl;dr to everything, according to their papers:Karma / @0xkarmatic: The visible chains of thought in DeepSeek r1 makes it so easy to prompt it as you can clearly tell when your instructions were ambiguous. Missed opportunity from OpenAI to make their COTs visible. Now that the genie is out\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: @teknium1: Its crazy deepseek direct api has seemingly no rate limits of any kind\nEmad / @emostaque: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We have the model, its a 35b active, 640b Mixture of Experts We know that spec is 2-3m hours to train Models get worse with more compute after a certain point! https://www.harmdevries.com/ ... [image]\nJaana Dogan / @rakyll: DeepSeek codebases are clean and well authored. I learned a lot by reading their work just over the weekend. You cannot deny that they are raising the bar, and wish we focus on quality instead of short sighted incremental work.\n@wordgrammer: Okay. Thanks for the nerd snipe guys. I spent the day learning exactly how DeepSeek trained at 1/30 the price, instead of working on my pitch deck. The tl;dr to everything, according to their papers:\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: the unreleased OpenAI o3 at the same cost at coding on Codeforces and ARC-AGI! [image]Nat Friedman / @natfriedman: The deepseek team is obviously really good. China is full of talented engineers. Every other take is cope. Sorry.Morgan Brown / @morganb: 7/ The results are mind-blowing: - Training cost: $100M \u2192 $5M - GPUs needed: 100,000 \u2192 2,000 - API costs: 95% cheaper - Can run on gaming GPUs instead of data center hardwareOle Lehmann / @itsolelehmann: DeepSeek is a 100x more based name than ChatGpt or ClaudeEthan Mollick / @emollick: I think the market will adjust to any per token cost decrease brought on by DeepSeek quite quickly. Costs for GPT-4 level intelligence dropped by 1000x in the last 18 months. A 95% price drop in reasoning models seems not to be something that will break the labs.@teknium1: Its crazy deepseek direct api has seemingly no rate limits of any kindEmad / @emostaque: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: the unreleased OpenAI o3 at the same cost at coding on Codeforces and ARC-AGI! [image]Nat Friedman / @natfriedman: The deepseek team is obviously really good. China is full of talented engineers. Every other take is cope. Sorry.Morgan Brown / @morganb: 7/ The results are mind-blowing: - Training cost: $100M \u2192 $5M - GPUs needed: 100,000 \u2192 2,000 - API costs: 95% cheaper - Can run on gaming GPUs instead of data center hardwareOle Lehmann / @itsolelehmann: DeepSeek is a 100x more based name than ChatGpt or ClaudeEthan Mollick / @emollick: I think the market will adjust to any per token cost decrease brought on by DeepSeek quite quickly. Costs for GPT-4 level intelligence dropped by 1000x in the last 18 months. A 95% price drop in reasoning models seems not to be something that will break the labs.@teknium1: Its crazy deepseek direct api has seemingly no rate limits of any kindEmad / @emostaque: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: the unreleased OpenAI o3 at the same cost at coding on Codeforces and ARC-AGI! [image]Nat Friedman / @natfriedman: The deepseek team is obviously really good. China is full of talented engineers. Every other take is cope. Sorry.Morgan Brown / @morganb: 7/ The results are mind-blowing: - Training cost: $100M \u2192 $5M - GPUs needed: 100,000 \u2192 2,000 - API costs: 95% cheaper - Can run on gaming GPUs instead of data center hardwareOle Lehmann / @itsolelehmann: DeepSeek is a 100x more based name than ChatGpt or ClaudeEthan Mollick / @emollick: I think the market will adjust to any per token cost decrease brought on by DeepSeek quite quickly. Costs for GPT-4 level intelligence dropped by 1000x in the last 18 months. A 95% price drop in reasoning models seems not to be something that will break the labs.@teknium1: Its crazy deepseek direct api has seemingly no rate limits of any kindEmad / @emostaque: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We\n\nSource: https://www.techmeme.com/250127/p14\nTitle: Techmeme: DeepSeek could be an extinction-level event for venture capital firms that went all-in on foundational model companies; investors say they are not panicking (Dan Primack/Axios)\nContent: the unreleased OpenAI o3 at the same cost at coding on Codeforces and ARC-AGI! [image]Nat Friedman / @natfriedman: The deepseek team is obviously really good. China is full of talented engineers. Every other take is cope. Sorry.Morgan Brown / @morganb: 7/ The results are mind-blowing: - Training cost: $100M \u2192 $5M - GPUs needed: 100,000 \u2192 2,000 - API costs: 95% cheaper - Can run on gaming GPUs instead of data center hardwareOle Lehmann / @itsolelehmann: DeepSeek is a 100x more based name than ChatGpt or ClaudeEthan Mollick / @emollick: I think the market will adjust to any per token cost decrease brought on by DeepSeek quite quickly. Costs for GPT-4 level intelligence dropped by 1000x in the last 18 months. A 95% price drop in reasoning models seems not to be something that will break the labs.@teknium1: Its crazy deepseek direct api has seemingly no rate limits of any kindEmad / @emostaque: Simpler way to understand DeepSeek weren't lying about 50k H100s or training costs for V3/R1 We\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:39.189887",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.0208465",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:39.208249",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'DeepSeek V3 R1: Architecture and Innovations'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:47.647492",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'DeepSeek V3 R1: Architecture and Innovations'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:47.668990",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: DeepSeek V3 R1: Architecture and Innovations...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:47.704690",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'DeepSeek V3 R1: Architecture and Innovations'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:57.587073",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'DeepSeek V3 R1: Architecture and Innovations'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:57.635839",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Performance Benchmarks and Impact on the AI Industry'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:57.652395",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:33:57.662651",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Performance Benchmarks and Impact on the AI Industry...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T21:34:05.668766",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "images",
+ "output": "[\"https://naixianzhang.com/wp-content/uploads/2025/01/image-57.png?w=1024\", \"https://naixianzhang.com/wp-content/uploads/2025/01/image-58.png?w=1024\", \"https://naixianzhang.com/wp-content/uploads/2025/01/image-59.png?w=1024\", \"https://naixianzhang.com/wp-content/uploads/2025/01/image-60.png?w=1024\", \"https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75\", \"https://us1.discourse-cdn.com/flex020/uploads/cursor1/optimized/3X/0/d/0df9e1f23791d1b8e362cb2bcf434e2bac1e7a09_2_419x500.png\", \"https://arbisoft.com/_next/image?url=https%3A%2F%2Fd1foa0aaimjyw4.cloudfront.net%2FBlog_Image_1_b56afb0c54.png&w=1920&q=75\"]",
+ "content": "selected_images",
+ "metadata": [
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-57.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-58.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-59.png?w=1024",
+ "https://naixianzhang.com/wp-content/uploads/2025/01/image-60.png?w=1024",
+ "https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75",
+ "https://us1.discourse-cdn.com/flex020/uploads/cursor1/optimized/3X/0/d/0df9e1f23791d1b8e362cb2bcf434e2bac1e7a09_2_419x500.png",
+ "https://arbisoft.com/_next/image?url=https%3A%2F%2Fd1foa0aaimjyw4.cloudfront.net%2FBlog_Image_1_b56afb0c54.png&w=1920&q=75"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738168903_Give me a detailed research report about Dee.docx b/outputs/task_1738168903_Give me a detailed research report about Dee.docx
new file mode 100644
index 0000000000000000000000000000000000000000..30be6d0080e47a4f06f91a412acd787931ae8218
Binary files /dev/null and b/outputs/task_1738168903_Give me a detailed research report about Dee.docx differ
diff --git a/outputs/task_1738168903_Give me a detailed research report about Dee.md b/outputs/task_1738168903_Give me a detailed research report about Dee.md
new file mode 100644
index 0000000000000000000000000000000000000000..998d196a62fa66e611ca0ca07563c903cde7f547
--- /dev/null
+++ b/outputs/task_1738168903_Give me a detailed research report about Dee.md
@@ -0,0 +1,89 @@
+# DeepSeek's Disruption: The Rise of R1 and V3, Reshaping the AI Landscape
+
+The artificial intelligence field is undergoing a rapid transformation, driven by the relentless pursuit of more powerful and efficient models. At the forefront of this disruption is DeepSeek, a Chinese AI company that has captured significant attention with its DeepSeek-R1 and DeepSeek-V3 models. Released in January 2025, DeepSeek-R1, an open-source 671B parameter Mixture-of-Experts (MoE) model ([https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model](https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model)), has sent ripples through the industry by achieving near-parity performance with OpenAI's O1 model at a fraction of the training cost (reportedly $6 million). This cost-effectiveness, coupled with its open-source nature under the MIT license ([https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/](https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/)), has democratized access to advanced AI capabilities, empowering smaller developers and startups. DeepSeek-V3, another 671B parameter MoE model ([https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/](https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/)), builds upon this foundation, boasting state-of-the-art performance across various benchmarks while maintaining efficient training costs and incorporating innovations like multi-token prediction and a 128K context window. This report delves into the technical details of DeepSeek-R1 and V3, analyzes their performance benchmarks against competitors like GPT-4o ([https://docsbot.ai/models/compare/deepseek-v3/gpt-4o](https://docsbot.ai/models/compare/deepseek-v3/gpt-4o)), and examines their profound impact on the AI industry, including the potential for a price war, the rise of open-source AI, and the challenges DeepSeek faces in navigating the evolving competitive landscape.
+
+## Table of Contents
+
+- DeepSeek V3: Model Architecture and Performance
+ - Mixture of Experts (MoE) Architecture and its Advantages
+ - Multihead Latent Attention (MLA) for Memory OptimizationDeepSeek-V3 incorporates Multihead Latent Attention (MLA) to further reduce memory and computational demands. Traditional multihead attention involves projecting the key, query, and value matrices into multiple heads, allowing the model to attend to different parts of the input sequence. MLA optimizes this process by projecting these matrices into a lower-dimensional latent space before applying multihead attention. This dimensionality reduction significantly decreases the memory footprint and computational cost of the attention mechanism, especially beneficial for long sequences. This innovation allows DeepSeek-V3 to handle complex tasks with greater efficiency compared to models relying on standard multihead attention. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA )
+ - Multi-Token Prediction (MTP) and FP8 Quantization: Enhancing Throughput and Memory Efficiency
+ - Performance Benchmarks and Comparisons
+ - Training Methodology and Efficiency
+- Impact on the AI Industry: Cost Efficiency and Democratization
+ - Redefining Cost-Performance Ratios in Large Language Models
+ - Open-Source Paradigm Shift and Collaborative Development
+ - Challenging the Hardware Dependency and Promoting AccessibilityDeepSeek-R1's efficient resource utilization challenges the prevailing dependence on extensive GPU clusters for training large language models. While not entirely eliminating the need for powerful hardware, DeepSeek demonstrates that significant advancements can be achieved with a more judicious allocation of resources. This has implications for the accessibility of AI research, potentially enabling organizations with limited computational resources to participate in the development of cutting-edge AI models. This reduced reliance on high-end hardware also has environmental benefits, lowering the energy consumption associated with AI training and promoting more sustainable practices within the industry.
+ - Fostering Innovation in Resource-Constrained Environments
+ - Potential Geopolitical Implications and Market Dynamics
+
+
+
+
+
+## DeepSeek V3: Model Architecture and Performance
+
+### Mixture of Experts (MoE) Architecture and its Advantages
+
+DeepSeek-V3 employs a Mixture of Experts (MoE) architecture, a crucial element contributing to its efficiency and performance. Unlike traditional monolithic models, MoE divides the model into a collection of "expert" networks, each specializing in different aspects of the data. For each input token, a "gating network" decides which experts are most relevant and activates only those, leaving the rest dormant. This selective activation drastically reduces computational costs during inference, as only a fraction of the model's parameters are engaged for each token. ([https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/](https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/)) DeepSeek claims this approach makes V3 10x more efficient than some peers and 3-7x better considering other innovations. ([https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en](https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en)) This efficiency gain is particularly significant for large language models, which often contain hundreds of billions or even trillions of parameters. DeepSeek implemented a specialized load balancing loss function to ensure even utilization of experts across distributed hardware, further optimizing performance and preventing bottlenecks. ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA))
+
+### Multihead Latent Attention (MLA) for Memory OptimizationDeepSeek-V3 incorporates Multihead Latent Attention (MLA) to further reduce memory and computational demands. Traditional multihead attention involves projecting the key, query, and value matrices into multiple heads, allowing the model to attend to different parts of the input sequence. MLA optimizes this process by projecting these matrices into a lower-dimensional latent space before applying multihead attention. This dimensionality reduction significantly decreases the memory footprint and computational cost of the attention mechanism, especially beneficial for long sequences. This innovation allows DeepSeek-V3 to handle complex tasks with greater efficiency compared to models relying on standard multihead attention. ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA))
+
+### Multi-Token Prediction (MTP) and FP8 Quantization: Enhancing Throughput and Memory Efficiency
+
+DeepSeek-V3 employs Multi-Token Prediction (MTP), enabling the model to generate multiple tokens in parallel rather than sequentially. This parallel processing significantly improves throughput, increasing the speed of text generation by a factor of 2-3x. This enhancement is particularly valuable for applications requiring real-time or near real-time text generation, such as conversational AI or live translation. ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)) Furthermore, DeepSeek-V3 utilizes FP8 quantization, a technique that reduces the precision of the model's parameters from 32-bit floating point (FP32) to 8-bit floating point (FP8). This reduction in precision leads to a substantial decrease in memory usage, up to 75% compared to FP32, without significantly compromising model accuracy. DeepSeek achieves this by employing adaptive bit-width scaling and loss-aware quantization techniques, ensuring stability and minimizing performance degradation. ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA))
+
+### Performance Benchmarks and Comparisons
+
+DeepSeek-V3 boasts impressive performance across various benchmarks. In the English Massive Multitask Language Understanding (MMLU) benchmark, it achieves an accuracy of 88.5%, surpassing several other leading large language models. ([https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/](https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/)) On the HumanEval-Mul coding benchmark, it achieves a pass rate of 82.6%, demonstrating its strong coding capabilities. These results indicate that DeepSeek-V3's architectural innovations, combined with its efficient training methodology, translate into tangible performance gains. It’s important to note that while these benchmarks provide valuable insights, they should be interpreted with caution, as factors like data selection and evaluation metrics can influence the results. Furthermore, comparisons across different models should consider variations in training data, model size, and evaluation protocols.
+
+### Training Methodology and Efficiency
+
+DeepSeek-V3's training process is remarkably efficient, both in terms of time and cost. The company reports a development cost of approximately $6 million, significantly lower than the development costs of many comparable large language models. ([https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/](https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/)) This cost-effectiveness is attributed to the model's efficient architecture and training methodology. DeepSeek utilizes a multi-stage training approach combining Supervised Fine-tuning (SFT) and Reinforcement Learning (RL). Specifically, they employ Group Relative Policy Optimization (GRPO), a more efficient alternative to Proximal Policy Optimization (PPO) and Detached Policy Optimization (DPO) for reinforcement learning. ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)) This innovative training approach allows DeepSeek to achieve high performance with fewer computational resources, contributing to the model's overall efficiency. DeepSeek-V3's training data comprises 14.8 trillion tokens, a substantial dataset that contributes to its broad knowledge base and strong performance across various tasks. The combination of a large training dataset, efficient architecture, and innovative training methodology positions DeepSeek-V3 as a highly competitive model in the large language model landscape.
+
+
+## Impact on the AI Industry: Cost Efficiency and Democratization
+
+### Redefining Cost-Performance Ratios in Large Language Models
+
+DeepSeek-R1's development cost of approximately $6 million ([https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/](https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/)) significantly challenges the prevailing notion that cutting-edge AI requires exorbitant expenditure. This contrasts sharply with the estimated $100 million development cost of OpenAI's GPT-4 ([https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1](https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1)), highlighting DeepSeek's disruptive approach to cost efficiency. This achievement is attributed not only to architectural innovations like the Mixture of Experts (MoE) and Multihead Latent Attention (MLA) but also to the strategic application of reinforcement learning with Group Relative Policy Optimization (GRPO) ([https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA](https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)). This combination allows DeepSeek to achieve comparable or superior performance to its competitors while drastically reducing the financial barrier to entry for advanced AI development. This shift in the cost-performance landscape has significant implications for the future of AI research and development, potentially leading to a greater emphasis on resource optimization and innovative training methodologies.
+
+### Open-Source Paradigm Shift and Collaborative Development
+
+DeepSeek-R1's open-source nature under the MIT license ([https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost](https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost)) represents a significant departure from the closed-source models prevalent in the industry. This open approach fosters community involvement, allowing researchers and developers to scrutinize, modify, and build upon the model's architecture and training methods. This transparency promotes rapid iteration and collaborative innovation, potentially accelerating the overall pace of AI development. While previous open-source LLMs have existed, DeepSeek-R1's competitive performance combined with its open availability distinguishes it as a potential catalyst for a broader shift towards community-driven AI development. This open-source strategy also democratizes access to advanced AI capabilities, empowering smaller companies and individual researchers who may lack the resources to develop such models independently.
+
+### Challenging the Hardware Dependency and Promoting AccessibilityDeepSeek-R1's efficient resource utilization challenges the prevailing dependence on extensive GPU clusters for training large language models. While not entirely eliminating the need for powerful hardware, DeepSeek demonstrates that significant advancements can be achieved with a more judicious allocation of resources. This has implications for the accessibility of AI research, potentially enabling organizations with limited computational resources to participate in the development of cutting-edge AI models. This reduced reliance on high-end hardware also has environmental benefits, lowering the energy consumption associated with AI training and promoting more sustainable practices within the industry.
+
+### Fostering Innovation in Resource-Constrained Environments
+
+DeepSeek-R1's efficiency opens up new possibilities for AI deployment in resource-constrained environments, such as edge devices and mobile platforms. Its optimized architecture and reduced computational demands make it suitable for applications where processing power and memory are limited. This expands the potential reach of AI beyond traditional data centers, enabling innovative applications in areas like IoT, mobile computing, and on-device personalized AI experiences. This focus on efficiency could drive the development of specialized hardware and software solutions tailored for resource-constrained deployments, further accelerating the adoption of AI in diverse contexts.
+
+### Potential Geopolitical Implications and Market Dynamics
+
+DeepSeek-R1's emergence as a strong contender in the AI landscape has geopolitical implications, particularly concerning the balance of power in AI development. Its origin in China challenges the dominance of U.S.-based companies like OpenAI and Google, potentially leading to a more multipolar AI landscape. This shift could influence international collaborations, data sharing agreements, and the development of AI regulations. Furthermore, DeepSeek's cost-effective approach could pressure established players to re-evaluate their pricing strategies and invest in more efficient training methodologies. This increased competition could ultimately benefit consumers and businesses by driving down the cost of AI services and accelerating the development of more accessible and powerful AI solutions. However, concerns about data security, intellectual property, and potential biases in models trained on specific datasets remain important considerations as the global AI landscape evolves.
+
+
+
+
+## References
+
+- [https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2](https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2)
+- [https://docsbot.ai/models/deepseek-v3](https://docsbot.ai/models/deepseek-v3)
+- [https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31](https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31)
+- [https://venturebeat.com/ai/calm-down-deepseek-r1-is-great-but-chatgpts-product-advantage-is-far-from-over/](https://venturebeat.com/ai/calm-down-deepseek-r1-is-great-but-chatgpts-product-advantage-is-far-from-over/)
+- [https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger](https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger)
+- [https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/](https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/)
+- [https://www.prompthackers.co/compare/deepseek-v3/deepseek-r1](https://www.prompthackers.co/compare/deepseek-v3/deepseek-r1)
+- [https://venturebeat.com/ai/open-source-deepseek-r1-uses-pure-reinforcement-learning-to-match-openai-o1-at-95-less-cost/](https://venturebeat.com/ai/open-source-deepseek-r1-uses-pure-reinforcement-learning-to-match-openai-o1-at-95-less-cost/)
+- [https://gradientflow.com/deepseek-what-you-need-to-know/](https://gradientflow.com/deepseek-what-you-need-to-know/)
+- [https://slashdot.org/software/comparison/DeepSeek-R1-vs-DeepSeek-V3/](https://slashdot.org/software/comparison/DeepSeek-R1-vs-DeepSeek-V3/)
+- [https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777](https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777)
+- [https://bottr.me/blog/deepseek](https://bottr.me/blog/deepseek)
+- [https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3](https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3)
+- [https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/](https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/)
+- [https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/](https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/)
+- [https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/](https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/)
+- [https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1](https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1)
+- [https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/](https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/)
+- [https://nexustrade.io/blog/the-chinese-obliterated-openai-a-side-by-side-comparison-of-deepseek-r1-vs-openai-o1-for-finance-20250121](https://nexustrade.io/blog/the-chinese-obliterated-openai-a-side-by-side-comparison-of-deepseek-r1-vs-openai-o1-for-finance-20250121)
+- [https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/](https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/)
diff --git a/outputs/task_1738168903_Give me a detailed research report about Dee.pdf b/outputs/task_1738168903_Give me a detailed research report about Dee.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..049e7ea97f2e33b829b31f868e052411836f3753
Binary files /dev/null and b/outputs/task_1738168903_Give me a detailed research report about Dee.pdf differ
diff --git a/outputs/task_1738168903_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json b/outputs/task_1738168903_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json
new file mode 100644
index 0000000000000000000000000000000000000000..29a96b51da82878ca1ee8f167eb6aa725abb7902
--- /dev/null
+++ b/outputs/task_1738168903_Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry.json
@@ -0,0 +1,2656 @@
+{
+ "timestamp": "2025-01-29T22:11:43.633727",
+ "events": [
+ {
+ "timestamp": "2025-01-29T22:11:47.953503",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:47.968602",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:47.986478",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:55.070179",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.399647",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['Deepseek v3 R1 model architecture and performance benchmarks', \"Impact of Deepseek's open-source model R1 on AI industry competition\", 'Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models', 'Deepseek R1 cost analysis and implications for AI development accessibility', 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ']...",
+ "metadata": [
+ "Deepseek v3 R1 model architecture and performance benchmarks",
+ "Impact of Deepseek's open-source model R1 on AI industry competition",
+ "Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models",
+ "Deepseek R1 cost analysis and implications for AI development accessibility",
+ "Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. "
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.414562",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek v3 R1 model architecture and performance benchmarks'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.429877",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of Deepseek's open-source model R1 on AI industry competition'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.512498",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.520770",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek R1 cost analysis and implications for AI development accessibility'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:11:57.534587",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.134978",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\n",
+ "metadata": "https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.149891",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777\n",
+ "metadata": "https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.162414",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/pdf/2412.19437\n",
+ "metadata": "https://arxiv.org/pdf/2412.19437"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.170171",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/\n",
+ "metadata": "https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.179235",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/abs/2412.19437\n",
+ "metadata": "https://arxiv.org/abs/2412.19437"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.196328",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.203521",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.463836",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.473100",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.482703",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.499847",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek v3 R1 model architecture and performance benchmarks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.597634",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://c3.unu.edu/blog/deepseek-r1-pioneering-open-source-thinking-model-and-its-impact-on-the-llm-landscape\n",
+ "metadata": "https://c3.unu.edu/blog/deepseek-r1-pioneering-open-source-thinking-model-and-its-impact-on-the-llm-landscape"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.605055",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.technewsday.com/2025/01/26/chinas-deepseek-r1-ai-model-cuts-costs-by-over-98-challenging-u-s-tech-giants/\n",
+ "metadata": "https://www.technewsday.com/2025/01/26/chinas-deepseek-r1-ai-model-cuts-costs-by-over-98-challenging-u-s-tech-giants/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.618102",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://venturebeat.com/ai/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/\n",
+ "metadata": "https://venturebeat.com/ai/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.633337",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.bbntimes.com/technology/deeply-seeking-ai-deepseek-r1-shocks-the-ai-world\n",
+ "metadata": "https://www.bbntimes.com/technology/deeply-seeking-ai-deepseek-r1-shocks-the-ai-world"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.646358",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\n",
+ "metadata": "https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.655825",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:00.667175",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.279600",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.290713",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://venturebeat.com/wp-content/uploads/2025/01/Screenshot-2025-01-25-at-6.06.56%E2%80%AFPM.png?w=800"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.306946",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.318605",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek R1 cost analysis and implications for AI development accessibility...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.444052",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://cointelegraph.com/news/release-deep-seek-shatters-long-held-assumptions-ai\n",
+ "metadata": "https://cointelegraph.com/news/release-deep-seek-shatters-long-held-assumptions-ai"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.496061",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\n",
+ "metadata": "https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.510729",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.business-standard.com/world-news/deepseek-r1-chinese-ai-research-breakthrough-challenging-openai-explained-125012700327_1.html\n",
+ "metadata": "https://www.business-standard.com/world-news/deepseek-r1-chinese-ai-research-breakthrough-challenging-openai-explained-125012700327_1.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.522094",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.532976",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.656687",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.671318",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.681832",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.693979",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of Deepseek's open-source model R1 on AI industry competition...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.744238",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\n",
+ "metadata": "https://docsbot.ai/models/compare/deepseek-v3/gpt-4o"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.751130",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/\n",
+ "metadata": "https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.766355",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/gpt-4o/deepseek-v3\n",
+ "metadata": "https://docsbot.ai/models/compare/gpt-4o/deepseek-v3"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.781936",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31\n",
+ "metadata": "https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.796665",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\n",
+ "metadata": "https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.811671",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:02.819803",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:04.635305",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:04.653491",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 11 total images",
+ "metadata": [
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T173004.195.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172902.179.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172837.890.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172812.190.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:04.678549",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:04.690216",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Comparison of Deepseek v3 R1 with OpenAI GPT series and other large language models...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.080885",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\n",
+ "metadata": "https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.102539",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.aicommission.org/2025/01/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/\n",
+ "metadata": "https://www.aicommission.org/2025/01/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.113650",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.nytimes.com/2025/01/27/technology/what-is-deepseek-china-ai.html\n",
+ "metadata": "https://www.nytimes.com/2025/01/27/technology/what-is-deepseek-china-ai.html"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.135062",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/\n",
+ "metadata": "https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.147058",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/\n",
+ "metadata": "https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.161312",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:05.168073",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:11.217686",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:11.230612",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 7 total images",
+ "metadata": [
+ "https://felloai.com/wp-content/uploads/2025/01/All-About-DeepSeek-Company-and-their-revolutionary-R1-and-V3-models-that-are-disruption-AI-Industry.jpg",
+ "https://felloai.com/wp-content/uploads/2025/01/deepseek-officially-tops-the-appstore-v0-eb8nxvvptdfe1.jpeg-831x1024.webp",
+ "https://www.aicommission.org/wp-content/uploads/2023/01/logomain.png",
+ "https://venturebeat.com/wp-content/uploads/2025/01/DALL%C2%B7E-2025-01-25-08.38.46-A-minimalistic-vector-style-illustration-symbolizing-Deepseek-R1s-AI-innovation.-The-design-includes-a-single-glowing-node-connected-by-a-few-thin-.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:11.246415",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:11.260582",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:11.431646",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday that \u201cDeepSeek\u2019s R1 is an impressive model, particularly around what they\u2019re able to deliver for the price.\u201d Andy Thurai, VP and principal analyst at Constellation Research, noted in his Weekly Tech Bytes newsletter on LinkedIn that DeepSeek\u2019s efficiency will inevitably put downward pressure on AI costs. \u201cIf it is proven that the entire AI software supply chain can be done cheaply using open-source software, many startups will take a hit. VCs will stop writing blank checks to start-ups that have generative AI on their pitch deck.\u201d Venture-backed AI firms that rely on closed-source models to justify their high valuations could take a devastating hit in the aftermath of the DeepSeek tsunami. Companies that fail to differentiate themselves beyond the mere ability to train LLMs could face significant funding challenges. Privacy And Security Concerns However, not everyone is enthusiastic\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday that \u201cDeepSeek\u2019s R1 is an impressive model, particularly around what they\u2019re able to deliver for the price.\u201d Andy Thurai, VP and principal analyst at Constellation Research, noted in his Weekly Tech Bytes newsletter on LinkedIn that DeepSeek\u2019s efficiency will inevitably put downward pressure on AI costs. \u201cIf it is proven that the entire AI software supply chain can be done cheaply using open-source software, many startups will take a hit. VCs will stop writing blank checks to start-ups that have generative AI on their pitch deck.\u201d Venture-backed AI firms that rely on closed-source models to justify their high valuations could take a devastating hit in the aftermath of the DeepSeek tsunami. Companies that fail to differentiate themselves beyond the mere ability to train LLMs could face significant funding challenges. Privacy And Security Concerns However, not everyone is enthusiastic\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday that \u201cDeepSeek\u2019s R1 is an impressive model, particularly around what they\u2019re able to deliver for the price.\u201d Andy Thurai, VP and principal analyst at Constellation Research, noted in his Weekly Tech Bytes newsletter on LinkedIn that DeepSeek\u2019s efficiency will inevitably put downward pressure on AI costs. \u201cIf it is proven that the entire AI software supply chain can be done cheaply using open-source software, many startups will take a hit. VCs will stop writing blank checks to start-ups that have generative AI on their pitch deck.\u201d Venture-backed AI firms that rely on closed-source models to justify their high valuations could take a devastating hit in the aftermath of the DeepSeek tsunami. Companies that fail to differentiate themselves beyond the mere ability to train LLMs could face significant funding challenges. Privacy And Security Concerns However, not everyone is enthusiastic\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: ForbesInnovationAIEditors' PickThe Biggest Winner In The DeepSeek Disruption Story Is Open Source AIKolawole Samuel AdebayoContributorOpinions expressed by Forbes Contributors are their own.I write about the economics of AI.FollowingJan 28, 2025,04:36pm ESTUpdated Jan 29, 2025, 02:12am ESTShare to FacebookShare to TwitterShare to LinkedinWhen the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint ... [+] in the ongoing U.S.-China AI rivalry. But the real story, according to experts like Yann LeCun, is about the value of open source AI. (Photo by NICOLAS TUCAT/AFP via Getty Images)AFP via Getty Images DeepSeek-R1 \u2014 the AI model created by DeepSeek, a little known Chinese company, at a fraction of what it cost OpenAI to build its own models \u2014 has sent the AI industry into a frenzy for the last couple of days. When the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint in the ongoing U.S.-China AI rivalry.\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: ForbesInnovationAIEditors' PickThe Biggest Winner In The DeepSeek Disruption Story Is Open Source AIKolawole Samuel AdebayoContributorOpinions expressed by Forbes Contributors are their own.I write about the economics of AI.FollowingJan 28, 2025,04:36pm ESTUpdated Jan 29, 2025, 02:12am ESTShare to FacebookShare to TwitterShare to LinkedinWhen the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint ... [+] in the ongoing U.S.-China AI rivalry. But the real story, according to experts like Yann LeCun, is about the value of open source AI. (Photo by NICOLAS TUCAT/AFP via Getty Images)AFP via Getty Images DeepSeek-R1 \u2014 the AI model created by DeepSeek, a little known Chinese company, at a fraction of what it cost OpenAI to build its own models \u2014 has sent the AI industry into a frenzy for the last couple of days. When the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint in the ongoing U.S.-China AI rivalry.\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: ForbesInnovationAIEditors' PickThe Biggest Winner In The DeepSeek Disruption Story Is Open Source AIKolawole Samuel AdebayoContributorOpinions expressed by Forbes Contributors are their own.I write about the economics of AI.FollowingJan 28, 2025,04:36pm ESTUpdated Jan 29, 2025, 02:12am ESTShare to FacebookShare to TwitterShare to LinkedinWhen the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint ... [+] in the ongoing U.S.-China AI rivalry. But the real story, according to experts like Yann LeCun, is about the value of open source AI. (Photo by NICOLAS TUCAT/AFP via Getty Images)AFP via Getty Images DeepSeek-R1 \u2014 the AI model created by DeepSeek, a little known Chinese company, at a fraction of what it cost OpenAI to build its own models \u2014 has sent the AI industry into a frenzy for the last couple of days. When the news about DeepSeek-R1 broke, the AI world was quick to frame it as yet another flashpoint in the ongoing U.S.-China AI rivalry.\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: A Looming AI Price War\nDeepSeek\u2019s AI model undoubtedly raises a valid question about whether we are on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday that \u201cDeepSeek\u2019s R1 is an impressive model, particularly around what they\u2019re able to deliver for the price.\u201d\nAndy Thurai, VP and principal analyst at Constellation Research, noted in his Weekly Tech Bytes newsletter on LinkedIn that DeepSeek\u2019s efficiency will inevitably put downward pressure on AI costs. \u201cIf it is proven that the entire AI software supply chain can be done cheaply using open-source software, many startups will take a hit. VCs will stop writing blank checks to start-ups that have generative AI on their pitch deck.\u201d\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: piece of technology in AI is open source and has gained large mindshare.\u201d Sharma believes we are witnessing the same trend in AI that we saw with databases and operating systems, where open solutions eventually dominated the industry. With proprietary models requiring massive investment in compute and data acquisition, open-source alternatives offer more attractive options to companies seeking cost-effective AI solutions. DeepSeek-R1\u2019s training cost \u2014 reportedly just $6 million \u2014 has shocked industry insiders, especially when compared to the billions spent by OpenAI, Google and Anthropic on their frontier models. Kevin Surace, CEO of Appvance, called it a \u201cwake-up call,\u201d proving that \u201cChina has focused on low-cost rapid models while the U.S. has focused on huge models at a huge cost.\u201d A Looming AI Price War DeepSeek\u2019s AI model undoubtedly raises a valid question about whether we are on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: piece of technology in AI is open source and has gained large mindshare.\u201d Sharma believes we are witnessing the same trend in AI that we saw with databases and operating systems, where open solutions eventually dominated the industry. With proprietary models requiring massive investment in compute and data acquisition, open-source alternatives offer more attractive options to companies seeking cost-effective AI solutions. DeepSeek-R1\u2019s training cost \u2014 reportedly just $6 million \u2014 has shocked industry insiders, especially when compared to the billions spent by OpenAI, Google and Anthropic on their frontier models. Kevin Surace, CEO of Appvance, called it a \u201cwake-up call,\u201d proving that \u201cChina has focused on low-cost rapid models while the U.S. has focused on huge models at a huge cost.\u201d A Looming AI Price War DeepSeek\u2019s AI model undoubtedly raises a valid question about whether we are on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday\n\nSource: https://www.forbes.com/sites/kolawolesamueladebayo/2025/01/28/the-biggest-winner-in-the-deepseek-disruption-story-is-open-source-ai/\nTitle: The Biggest Winner In The DeepSeek Disruption Story Is Open Source AI\nContent: piece of technology in AI is open source and has gained large mindshare.\u201d Sharma believes we are witnessing the same trend in AI that we saw with databases and operating systems, where open solutions eventually dominated the industry. With proprietary models requiring massive investment in compute and data acquisition, open-source alternatives offer more attractive options to companies seeking cost-effective AI solutions. DeepSeek-R1\u2019s training cost \u2014 reportedly just $6 million \u2014 has shocked industry insiders, especially when compared to the billions spent by OpenAI, Google and Anthropic on their frontier models. Kevin Surace, CEO of Appvance, called it a \u201cwake-up call,\u201d proving that \u201cChina has focused on low-cost rapid models while the U.S. has focused on huge models at a huge cost.\u201d A Looming AI Price War DeepSeek\u2019s AI model undoubtedly raises a valid question about whether we are on the cusp of an AI price war. Even Sam Altman, OpenAI CEO, acknowledged in a tweet late yesterday\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:12.360803",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-R1DeepSeek-V3MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more90.8%Pass@1Source88.5%EMSourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity84%EMSource75.9%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmarkNot available88.9%10-shotSourceHumanEvalEvaluates code generation and problem-solving capabilitiesNot available82.6%pass@1SourceMATHTests mathematical problem-solving abilities across various difficulty levelsNot available61.6%4-shotSourceGPQATests PhD-level knowledge in\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-R1DeepSeek-V3MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more90.8%Pass@1Source88.5%EMSourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity84%EMSource75.9%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmarkNot available88.9%10-shotSourceHumanEvalEvaluates code generation and problem-solving capabilitiesNot available82.6%pass@1SourceMATHTests mathematical problem-solving abilities across various difficulty levelsNot available61.6%4-shotSourceGPQATests PhD-level knowledge in\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Price ComparisonCost comparison with other models (per million tokens).Scale:LogarithmicLinearInput Token CostsOutput Token CostsCalculate and Compare Model PricesModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-R1DeepSeek-V3MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more90.8%Pass@1Source88.5%EMSourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity84%EMSource75.9%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmarkNot available88.9%10-shotSourceHumanEvalEvaluates code generation and problem-solving capabilitiesNot\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: DeepSeek-V3DeepSeek-V3 is a Open-Source 671B parameter Mixture-of-Experts (MoE) model with 37B activated parameters per token. It features innovative load balancing and multi-token prediction, trained on 14.8T tokens. The model achieves state-of-the-art performance across benchmarks while maintaining efficient training costs of only 2.788M H800 GPU hours. It incorporates reasoning capabilities distilled from DeepSeek-R1 and supports a 128K context window.\nDeepSeek-V3 is a Open-Source 671B parameter Mixture-of-Experts (MoE) model with 37B activated parameters per token. It features innovative load balancing and multi-token prediction, trained on 14.8T tokens. The model achieves state-of-the-art performance across benchmarks while maintaining efficient training costs of only 2.788M H800 GPU hours. It incorporates reasoning capabilities distilled from DeepSeek-R1 and supports a 128K context window.\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: DeepSeek-R1DeepSeek-R1 is a 671B parameter Mixture-of-Experts (MoE) model with 37B activated parameters per token, trained via large-scale reinforcement learning with a focus on reasoning capabilities. It incorporates two RL stages for discovering improved reasoning patterns and aligning with human preferences, along with two SFT stages for seeding reasoning and non-reasoning capabilities. The model achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks.DeepSeek-V3DeepSeek-V3 is a Open-Source 671B parameter Mixture-of-Experts (MoE) model with 37B activated parameters per token. It features innovative load balancing and multi-token prediction, trained on 14.8T tokens. The model achieves state-of-the-art performance across benchmarks while maintaining efficient training costs of only 2.788M H800 GPU hours. It incorporates reasoning capabilities distilled from DeepSeek-R1 and supports a 128K context window.Model OverviewFeatureDeepSeek-R1DeepSeek-V3Input\n\nSource: https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/\nTitle: DeepSeek-V3 Redefines LLM Performance and Cost Efficiency\nContent: at any moment. The team trained the model in 2.79 million GPU hours \u2014 less than 1/10 the time required to train Llama 3.1 405B, which DeepSeek-V3 substantially outperforms \u2014 at an extraordinarily low cost of $5.6 million.The developers trained it on roughly 15 trillion tokens, including a larger percentage of coding and math data relative to DeepSeek-V2. They fine-tuned it on a wide variety of tasks using output generated by DeepSeek-R1 and DeepSeek-V2.5. They further sharpened its performance across diverse domains using the reinforcement learning algorithm known as group relative policy optimization. Earlier work showed that training to predict the next two tokens would improve performance over learning to predict just one. The authors implemented this procedure. The model learned to predict the first token as usual and used an additional set of layers to learn to predict the second token. The additional layers aren\u2019t used at inference.Following DeepSeek-V2, DeepSeek-V3 uses\n\nSource: https://www.deeplearning.ai/the-batch/deepseek-v3-redefines-llm-performance-and-cost-efficiency/\nTitle: DeepSeek-V3 Redefines LLM Performance and Cost Efficiency\nContent: at any moment. The team trained the model in 2.79 million GPU hours \u2014 less than 1/10 the time required to train Llama 3.1 405B, which DeepSeek-V3 substantially outperforms \u2014 at an extraordinarily low cost of $5.6 million.The developers trained it on roughly 15 trillion tokens, including a larger percentage of coding and math data relative to DeepSeek-V2. They fine-tuned it on a wide variety of tasks using output generated by DeepSeek-R1 and DeepSeek-V2.5. They further sharpened its performance across diverse domains using the reinforcement learning algorithm known as group relative policy optimization. Earlier work showed that training to predict the next two tokens would improve performance over learning to predict just one. The authors implemented this procedure. The model learned to predict the first token as usual and used an additional set of layers to learn to predict the second token. The additional layers aren\u2019t used at inference.Following DeepSeek-V2, DeepSeek-V3 uses\n\nSource: https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777\nTitle: Exploring DeepSeek-V3: A Technical Overview | by LM Po | Dec, 2024 | Medium\nContent: Overview of DeepSeek-V3DeepSeek V3 is an open-weight large language model that leverages a Mixture of Experts (MoE) architecture, a cutting-edge approach designed to enhance efficiency and performance. The MoE framework employs multiple specialized \u201cexperts\u201d or smaller models, each optimized for specific tasks. This modular design allows the model to dynamically activate only the relevant subset of parameters during processing, significantly reducing computational overhead while maintaining high accuracy and adaptability.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:22.408978",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: Previously, the high costs associated with leading AI models limited access to advanced machine learning features to a select few organizations. However, with DeepSeek-R1's affordability, smaller developers and startups can now compete on a more level playing field, fostering innovation and increasing diversity in AI applications. This democratization of AI empowers developers from varied backgrounds to contribute to the field, potentially leading to more creative and diverse applications.Learn to use AI like a ProGet the latest AI workflows to boost your productivity and business performance, delivered weekly by expert consultants. Enjoy step-by-step guides, weekly Q&A sessions, and full access to our AI workflow archive.Learn More (And Unlock 50% off!)\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: Learn to use AI like a Pro\nGet the latest AI workflows to boost your productivity and business performance, delivered weekly by expert consultants. Enjoy step-by-step guides, weekly Q&A sessions, and full access to our AI workflow archive.\nLearn More (And Unlock 50% off!)\nThe financial implications of DeepSeek-R1's training cost are profound, primarily influencing the competitive dynamics among major AI players. While established firms like OpenAI and Anthropic might experience a decline in their market dominance due to the cost-efficiency of DeepSeek's model, the development has conversely empowered smaller AI firms and individual developers by democratizing access to advanced AI technologies. This democratization is particularly evident in the open-source movement, where the release of model weights has catalyzed innovation and collaboration, despite some details remaining proprietary.\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: Previously, the high costs associated with leading AI models limited access to advanced machine learning features to a select few organizations. However, with DeepSeek-R1's affordability, smaller developers and startups can now compete on a more level playing field, fostering innovation and increasing diversity in AI applications. This democratization of AI empowers developers from varied backgrounds to contribute to the field, potentially leading to more creative and diverse applications.\nPreviously, the high costs associated with leading AI models limited access to advanced machine learning features to a select few organizations. However, with DeepSeek-R1's affordability, smaller developers and startups can now compete on a more level playing field, fostering innovation and increasing diversity in AI applications. This democratization of AI empowers developers from varied backgrounds to contribute to the field, potentially leading to more creative and diverse applications.\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: standards and the careful management of accessible AI technologies to mitigate potential risks.In conclusion, DeepSeek-R1 serves as a catalyst for change in the AI industry, necessitating adaptation and innovation among major players while simultaneously accelerating democratization within the AI development space. As the industry evolves to accommodate these shifts, strategic collaboration and a focus on balancing accessibility with security will be crucial to navigate the emerging AI landscape successfully.Comparative Performance AnalysisDeepSeek-R1 has garnered significant attention in the AI industry due to its impressive performance relative to its cost. While the model doesn't surpass OpenAI's o1, its performance is nearly equivalent, provided at a fraction of the price. This affordability is achieved through DeepSeek's cloud offerings, which significantly undercut the costs associated with U.S. cloud providers. The model's clear reasoning chain is a substantial advantage for\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: standards and the careful management of accessible AI technologies to mitigate potential risks.In conclusion, DeepSeek-R1 serves as a catalyst for change in the AI industry, necessitating adaptation and innovation among major players while simultaneously accelerating democratization within the AI development space. As the industry evolves to accommodate these shifts, strategic collaboration and a focus on balancing accessibility with security will be crucial to navigate the emerging AI landscape successfully.Comparative Performance AnalysisDeepSeek-R1 has garnered significant attention in the AI industry due to its impressive performance relative to its cost. While the model doesn't surpass OpenAI's o1, its performance is nearly equivalent, provided at a fraction of the price. This affordability is achieved through DeepSeek's cloud offerings, which significantly undercut the costs associated with U.S. cloud providers. The model's clear reasoning chain is a substantial advantage for\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: The controversy surrounding DeepSeek-R1's purported $6 million training cost raises questions about transparency in AI development. Critics argue that this figure only covers hardware expenses, neglecting other critical costs such as data acquisition and personnel. Despite this, the model remains a cost-effective alternative to more expensive industry counterparts, challenging the norm of high expenditure in AI model development.\nThe controversy surrounding DeepSeek-R1's purported $6 million training cost raises questions about transparency in AI development. Critics argue that this figure only covers hardware expenses, neglecting other critical costs such as data acquisition and personnel. Despite this, the model remains a cost-effective alternative to more expensive industry counterparts, challenging the norm of high expenditure in AI model development.\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: AI application developers stand to greatly benefit from the reduced costs associated with DeepSeek-R1. The introduction of an advanced, yet economically accessible model facilitates the widespread incorporation of cutting-edge AI language capabilities into various applications, broadening the scope and potential impact of AI technologies across sectors.\nAI application developers stand to greatly benefit from the reduced costs associated with DeepSeek-R1. The introduction of an advanced, yet economically accessible model facilitates the widespread incorporation of cutting-edge AI language capabilities into various applications, broadening the scope and potential impact of AI technologies across sectors.\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: More (And Unlock 50% off!)The financial implications of DeepSeek-R1's training cost are profound, primarily influencing the competitive dynamics among major AI players. While established firms like OpenAI and Anthropic might experience a decline in their market dominance due to the cost-efficiency of DeepSeek's model, the development has conversely empowered smaller AI firms and individual developers by democratizing access to advanced AI technologies. This democratization is particularly evident in the open-source movement, where the release of model weights has catalyzed innovation and collaboration, despite some details remaining proprietary.DeepSeek-R1's emergence aligns closely with broader industry trends emphasizing efficiency over sheer scale in AI development. This shift towards cost-effective AI solutions resonates within the community, as evidenced by the formation of alliances like the 'Efficient AI Alliance' and the opening of new fronts in AI research previously\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: More (And Unlock 50% off!)The financial implications of DeepSeek-R1's training cost are profound, primarily influencing the competitive dynamics among major AI players. While established firms like OpenAI and Anthropic might experience a decline in their market dominance due to the cost-efficiency of DeepSeek's model, the development has conversely empowered smaller AI firms and individual developers by democratizing access to advanced AI technologies. This democratization is particularly evident in the open-source movement, where the release of model weights has catalyzed innovation and collaboration, despite some details remaining proprietary.DeepSeek-R1's emergence aligns closely with broader industry trends emphasizing efficiency over sheer scale in AI development. This shift towards cost-effective AI solutions resonates within the community, as evidenced by the formation of alliances like the 'Efficient AI Alliance' and the opening of new fronts in AI research previously\n\nSource: https://opentools.ai/news/deepseek-r1-disrupts-ai-industry-with-low-cost-high-performance-model\nTitle: DeepSeek-R1 Disrupts AI Industry with Low-Cost, High-Performance Model | AI News\nContent: The financial implications of DeepSeek-R1's training cost are profound, primarily influencing the competitive dynamics among major AI players. While established firms like OpenAI and Anthropic might experience a decline in their market dominance due to the cost-efficiency of DeepSeek's model, the development has conversely empowered smaller AI firms and individual developers by democratizing access to advanced AI technologies. This democratization is particularly evident in the open-source movement, where the release of model weights has catalyzed innovation and collaboration, despite some details remaining proprietary.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:30.731238",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: Multi-Head Latent Attention (MLA): Enhanced the model\u2019s ability to process nuanced relationships and manage multiple inputs simultaneously, making it highly effective for tasks requiring contextual depth.\nWhile overshadowed by high-profile releases from OpenAI and Meta, DeepSeek V3 quietly gained respect in research circles for its combination of scale, cost efficiency, and architectural innovation. It also laid the technical foundation for DeepSeek\u2019s most significant achievement to date: DeepSeek R1..\nDeepSeek took its boldest step yet with DeepSeek R1, launched on January 21, 2025. This open-source AI model has become the startup\u2019s most serious challenge to American tech giants, owing to its formidable reasoning power, lower operating costs, and developer-friendly features.\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: What\u2019s Next for DeepSeek\nDeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time.\nDeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets.\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: about AI development. Models like DeepSeek V3 and the groundbreaking DeepSeek R1 prove that success in AI doesn\u2019t always require billion-dollar budgets. Instead, efficiency, adaptability, and strategic partnerships can deliver results that rival even the most expensive models. What makes DeepSeek\u2019s journey even more extraordinary is the sheer shock it has generated within the AI community. Industry experts and researchers have been vocal about their amazement at how a smaller player has managed to compete with\u2014and even outperform\u2014some of the most advanced models developed by vastly better-funded organizations. DeepSeek is showing no signs of slowing down. Its recent launch of DeepThink + Web Search, which enables real-time online lookups, places it ahead of even OpenAI in some capabilities. Looking forward, the company is likely to focus on: Refining reinforcement learning pipelines to further enhance reasoning capabilities. Developing industry-specific models tailored for fields like\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: and competitive capabilities, DeepSeek has managed to thrive in a market dominated by tech giants, proving that innovation and efficiency can rival even the largest budgets. What\u2019s Next for DeepSeek DeepSeek\u2019s rapid rise comes with challenges that could shape its future. U.S. export controls restrict access to advanced GPUs, creating a compute gap that could hinder its ability to scale models like R1. While its MoE architecture maximizes efficiency, competing with firms that have access to cutting-edge hardware may become more difficult over time. DeepSeek also faces hurdles in market perception. To gain international trust, it must consistently prove its reliability, especially for enterprise-grade deployments. Meanwhile, the fast-evolving AI landscape means competitors like OpenAI or Meta could outpace it with new innovations. Additionally, operating under Chinese regulatory frameworks imposes content restrictions that may limit its appeal in open markets. Despite these challenges,\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: multiple inputs simultaneously, making it highly effective for tasks requiring contextual depth. While overshadowed by high-profile releases from OpenAI and Meta, DeepSeek V3 quietly gained respect in research circles for its combination of scale, cost efficiency, and architectural innovation. It also laid the technical foundation for DeepSeek\u2019s most significant achievement to date: DeepSeek R1.. DeepSeek R1 DeepSeek took its boldest step yet with DeepSeek R1, launched on January 21, 2025. This open-source AI model has become the startup\u2019s most serious challenge to American tech giants, owing to its formidable reasoning power, lower operating costs, and developer-friendly features. \ud83d\ude80 DeepSeek-R1 is here!\u26a1 Performance on par with OpenAI-o1\ud83d\udcd6 Fully open-source model & technical report\ud83c\udfc6 MIT licensed: Distill & commercialize freely!\ud83c\udf10 Website & API are live now! Try DeepThink at https://t.co/v1TFy7LHNy today!\ud83d\udc0b 1/n pic.twitter.com/7BlpWAPu6y\u2014 DeepSeek (@deepseek_ai) January 20, 2025 Key\n\nSource: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\nTitle: All About DeepSeek: The Rising AI Powerhouse Challenging Industry Giants | Fello\u00a0AI\nContent: Beyond MoE, Multi-Head Latent Attention (MLA) boosts the models\u2019 ability to process multiple data streams at once. By distributing focus across several \u201cattention heads,\u201d they can better identify contextual relationships and handle nuanced inputs\u2014even when processing tens of thousands of tokens in a single request.\nDeepSeek\u2019s innovations also extend to model distillation, where knowledge from its larger models is transferred to smaller, more efficient versions, such as DeepSeek-R1-Distill. These compact models retain much of the reasoning power of their larger counterparts but require significantly fewer computational resources, making advanced AI more accessible.\nReactions from the AI Community\nSeveral prominent figures in AI have weighed in on the disruptive potential of DeepSeek R1:\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:51.310594",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and GPT-4o. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and GPT-4o. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nCompare performance metrics between DeepSeek-V3 and GPT-4o. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\n\nSource: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\nTitle: DeepSeek V3: $5.5M Trained Model Beats GPT-4o & Llama 3.1\nContent: Also Read: DeepSeek V3 vs GPT-4o: Can Open-Source AI Compete with GPT-4o\u2019s Power?\nDeepSeek V3 stands as a monumental achievement in the evolution of large-scale AI models, combining unprecedented scale with unmatched efficiency. With its innovative architecture, cost-effective training, and impressive 685 billion parameters, DeepSeek V3 redefines what\u2019s possible in the AI space. The model\u2019s ability to excel in diverse benchmarks, outperforming both open-source and closed-source competitors, highlights its extraordinary capabilities.\nNot only does DeepSeek V3 deliver state-of-the-art performance in tasks like coding, reasoning, and mathematical problem-solving, but it also democratizes access to cutting-edge AI with its open-source availability. Developers, researchers, and businesses alike can leverage its immense power, supported by a permissive license that fosters innovation and collaboration.\n\nSource: https://docsbot.ai/models/compare/gpt-4o/deepseek-v3\nTitle: GPT-4o vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between GPT-4o and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nModel PerformanceBenchmark ComparisonCompare performance metrics between GPT-4o and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nCompare performance metrics between GPT-4o and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: benchmark88.9%10-shotSourceNot availableHumanEvalEvaluates code generation and problem-solving capabilities82.6%pass@1Source90.2%0-shotSourceMATHTests mathematical problem-solving abilities across various difficulty levels61.6%4-shotSource75.9%0-shotSourceGPQATests PhD-level knowledge in chemistry, biology, and physics through multiple choice questions that require deep domain expertise59.1%pass@1Source53.6%0-shotSourceIFEvalTests model's ability to accurately follow explicit formatting instructions, generate appropriate outputs, and maintain consistent instruction adherence across different tasks86.1%Prompt StrictSourceNot availableFrequently Asked QuestionsWhat are the key differences between DeepSeek-V3 and GPT-4o?When were DeepSeek-V3 and GPT-4o released?How does DeepSeek-V3's context window compare to GPT-4o's?How do DeepSeek-V3 and GPT-4o's prices compare?Is DeepSeek-V3 or GPT-4o open source?What is the maximum output length of DeepSeek-V3 compared to GPT-4o?Which providers\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: CompareDeepSeek-V3 vs GPT-4oGet a detailed comparison of AI language models DeepSeek's DeepSeek-V3 and OpenAI's GPT-4o, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.\nDeepSeek-V3 vs GPT-4o\nGet a detailed comparison of AI language models DeepSeek's DeepSeek-V3 and OpenAI's GPT-4o, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.\nHomeModelsCompareDeepSeek-V3 vs GPT-4o\nDeepSeek-V3 vs GPT-4o\nDeepSeek-V3 vs GPT-4o\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: CompareDeepSeek-V3 vs GPT-4oGet a detailed comparison of AI language models DeepSeek's DeepSeek-V3 and OpenAI's GPT-4o, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.HomeModelsCompareDeepSeek-V3 vs GPT-4oCompareClaude 3.5 HaikuClaude 3.5 SonnetClaude 3.5 Sonnet (Oct 2024)Claude 3.5 Sonnet (Jun 2024)Claude 3 HaikuClaude 3 SonnetClaude 3 OpusClaude 2.1Claude Instant 1.2Claude 2Amazon Nova MicroAmazon Nova LiteAmazon Nova ProCommand R+ (Aug 2024)Command R (Aug 2024)Gemini 2.0 Flash Thinking (Experimental)Gemini 2.0 Flash (Experimental)Gemini 1.5 Pro (002)Gemini 1.5 Flash (002)Gemini 1.5 Flash-8BGemma 2 27BGemma 2 9BGemini 1.5 Flash (001)Gemini 1.5 Pro (001)Gemini 1.0 UltraGemini 1.0 ProLlama 3.3 70B InstructLlama 3.2 90B Vision InstructLlama 3.2 11B Vision InstructLlama 3.1 8B InstructLlama 3.1 70B InstructLlama 3.1 405B InstructLlama 3 8B InstructLlama 3 70B InstructLlama 2 Chat\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: CompareDeepSeek-V3 vs GPT-4oGet a detailed comparison of AI language models DeepSeek's DeepSeek-V3 and OpenAI's GPT-4o, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.HomeModelsCompareDeepSeek-V3 vs GPT-4oCompareClaude 3.5 HaikuClaude 3.5 SonnetClaude 3.5 Sonnet (Oct 2024)Claude 3.5 Sonnet (Jun 2024)Claude 3 HaikuClaude 3 SonnetClaude 3 OpusClaude 2.1Claude Instant 1.2Claude 2Amazon Nova MicroAmazon Nova LiteAmazon Nova ProCommand R+ (Aug 2024)Command R (Aug 2024)Gemini 2.0 Flash Thinking (Experimental)Gemini 2.0 Flash (Experimental)Gemini 1.5 Pro (002)Gemini 1.5 Flash (002)Gemini 1.5 Flash-8BGemma 2 27BGemma 2 9BGemini 1.5 Flash (001)Gemini 1.5 Pro (001)Gemini 1.0 UltraGemini 1.0 ProLlama 3.3 70B InstructLlama 3.2 90B Vision InstructLlama 3.2 11B Vision InstructLlama 3.1 8B InstructLlama 3.1 70B InstructLlama 3.1 405B InstructLlama 3 8B InstructLlama 3 70B InstructLlama 2 Chat\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/gpt-4o\nTitle: DeepSeek-V3 vs GPT-4o - Detailed Performance & Feature Comparison\nContent: CompareDeepSeek-V3 vs GPT-4oGet a detailed comparison of AI language models DeepSeek's DeepSeek-V3 and OpenAI's GPT-4o, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.HomeModelsCompareDeepSeek-V3 vs GPT-4oCompareClaude 3.5 HaikuClaude 3.5 SonnetClaude 3.5 Sonnet (Oct 2024)Claude 3.5 Sonnet (Jun 2024)Claude 3 HaikuClaude 3 SonnetClaude 3 OpusClaude 2.1Claude Instant 1.2Claude 2Amazon Nova MicroAmazon Nova LiteAmazon Nova ProCommand R+ (Aug 2024)Command R (Aug 2024)Gemini 2.0 Flash Thinking (Experimental)Gemini 2.0 Flash (Experimental)Gemini 1.5 Pro (002)Gemini 1.5 Flash (002)Gemini 1.5 Flash-8BGemma 2 27BGemma 2 9BGemini 1.5 Flash (001)Gemini 1.5 Pro (001)Gemini 1.0 UltraGemini 1.0 ProLlama 3.3 70B InstructLlama 3.2 90B Vision InstructLlama 3.2 11B Vision InstructLlama 3.1 8B InstructLlama 3.1 70B InstructLlama 3.1 405B InstructLlama 3 8B InstructLlama 3 70B InstructLlama 2 Chat\n\nSource: https://docsbot.ai/models/compare/gpt-4o/deepseek-v3\nTitle: GPT-4o vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: CompareGPT-4o vs DeepSeek-V3Get a detailed comparison of AI language models OpenAI's GPT-4o and DeepSeek's DeepSeek-V3, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.HomeModelsCompareGPT-4o vs DeepSeek-V3CompareClaude 3.5 HaikuClaude 3.5 SonnetClaude 3.5 Sonnet (Oct 2024)Claude 3.5 Sonnet (Jun 2024)Claude 3 HaikuClaude 3 SonnetClaude 3 OpusClaude 2.1Claude Instant 1.2Claude 2Amazon Nova MicroAmazon Nova LiteAmazon Nova ProCommand R+ (Aug 2024)Command R (Aug 2024)Gemini 2.0 Flash Thinking (Experimental)Gemini 2.0 Flash (Experimental)Gemini 1.5 Pro (002)Gemini 1.5 Flash (002)Gemini 1.5 Flash-8BGemma 2 27BGemma 2 9BGemini 1.5 Flash (001)Gemini 1.5 Pro (001)Gemini 1.0 UltraGemini 1.0 ProLlama 3.3 70B InstructLlama 3.2 90B Vision InstructLlama 3.2 11B Vision InstructLlama 3.1 8B InstructLlama 3.1 70B InstructLlama 3.1 405B InstructLlama 3 8B InstructLlama 3 70B InstructLlama 2 Chat\n\nSource: https://docsbot.ai/models/compare/gpt-4o/deepseek-v3\nTitle: GPT-4o vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: CompareGPT-4o vs DeepSeek-V3Get a detailed comparison of AI language models OpenAI's GPT-4o and DeepSeek's DeepSeek-V3, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.HomeModelsCompareGPT-4o vs DeepSeek-V3CompareClaude 3.5 HaikuClaude 3.5 SonnetClaude 3.5 Sonnet (Oct 2024)Claude 3.5 Sonnet (Jun 2024)Claude 3 HaikuClaude 3 SonnetClaude 3 OpusClaude 2.1Claude Instant 1.2Claude 2Amazon Nova MicroAmazon Nova LiteAmazon Nova ProCommand R+ (Aug 2024)Command R (Aug 2024)Gemini 2.0 Flash Thinking (Experimental)Gemini 2.0 Flash (Experimental)Gemini 1.5 Pro (002)Gemini 1.5 Flash (002)Gemini 1.5 Flash-8BGemma 2 27BGemma 2 9BGemini 1.5 Flash (001)Gemini 1.5 Pro (001)Gemini 1.0 UltraGemini 1.0 ProLlama 3.3 70B InstructLlama 3.2 90B Vision InstructLlama 3.2 11B Vision InstructLlama 3.1 8B InstructLlama 3.1 70B InstructLlama 3.1 405B InstructLlama 3 8B InstructLlama 3 70B InstructLlama 2 Chat\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:51.327481",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.02069424",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:51.344069",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_subtopics",
+ "output": "\ud83c\udf33 Generating subtopics for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:56.142596",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subtopics_generated",
+ "output": "\ud83d\udcca Subtopics generated for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:12:56.159247",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_introduction",
+ "output": "\u270d\ufe0f Writing introduction for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:10.644177",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "introduction_written",
+ "output": "\ud83d\udcdd Introduction written for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:10.679332",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'DeepSeek V3: Model Architecture and Performance'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:10.693792",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:10.712232",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: DeepSeek V3: Model Architecture and Performance...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:15.851198",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:19.204260",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['\"DeepSeek V3 R1\" architecture performance benchmarks', '\"DeepSeek V3 R1\" impact AI industry applications', '\"DeepSeek V3 R1\" training methodology multi-token prediction MoE', '\"DeepSeek V3 R1\" comparison GPT-4 Claude 3.5 cost efficiency']...",
+ "metadata": [
+ "\"DeepSeek V3 R1\" architecture performance benchmarks",
+ "\"DeepSeek V3 R1\" impact AI industry applications",
+ "\"DeepSeek V3 R1\" training methodology multi-token prediction MoE",
+ "\"DeepSeek V3 R1\" comparison GPT-4 Claude 3.5 cost efficiency"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:19.215028",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"DeepSeek V3 R1\" architecture performance benchmarks'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:19.236844",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"DeepSeek V3 R1\" impact AI industry applications'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:19.251806",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"DeepSeek V3 R1\" training methodology multi-token prediction MoE'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:19.265656",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"DeepSeek V3 R1\" comparison GPT-4 Claude 3.5 cost efficiency'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.035715",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://x.com/_philschmid/status/1884526087540990319\n",
+ "metadata": "https://x.com/_philschmid/status/1884526087540990319"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.045223",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/singularity/comments/1icnvun/denials_about_deepseeks_low_cost_training_put_to/\n",
+ "metadata": "https://www.reddit.com/r/singularity/comments/1icnvun/denials_about_deepseeks_low_cost_training_put_to/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.064964",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.078753",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\n",
+ "metadata": "https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.095651",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://x.com/ArtificialAnlys/status/1883867748998197327\n",
+ "metadata": "https://x.com/ArtificialAnlys/status/1883867748998197327"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.111020",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:24.127444",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.163320",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.174696",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.194714",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.212084",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"DeepSeek V3 R1\" training methodology multi-token prediction MoE...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.289435",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.299669",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.youtube.com/watch?v=sRxQBmHNbnU\n",
+ "metadata": "https://www.youtube.com/watch?v=sRxQBmHNbnU"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.323989",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://play.ht/blog/deepseek-v3-vs-r1-vs-coder/\n",
+ "metadata": "https://play.ht/blog/deepseek-v3-vs-r1-vs-coder/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.336996",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\n",
+ "metadata": "https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.342351",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\n",
+ "metadata": "https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.372573",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:25.387854",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:26.964518",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:26.975019",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 2 new images from 2 total images",
+ "metadata": [
+ "https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75",
+ "https://arbisoft.com/_next/image?url=https%3A%2F%2Fd1foa0aaimjyw4.cloudfront.net%2FBlog_Image_1_b56afb0c54.png&w=1920&q=75"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:26.996798",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.016325",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"DeepSeek V3 R1\" architecture performance benchmarks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.166704",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.facebook.com/groups/aifire.co/posts/1616826995589270/\n",
+ "metadata": "https://www.facebook.com/groups/aifire.co/posts/1616826995589270/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.181113",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.threads.net/tag/o1\n",
+ "metadata": "https://www.threads.net/tag/o1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.197129",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\n",
+ "metadata": "https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.213419",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://forum.cursor.com/t/cursor-deepseek/43261\n",
+ "metadata": "https://forum.cursor.com/t/cursor-deepseek/43261"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.230254",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/Kenya/comments/1ibx2eg/lets_talk_about_deepseek/\n",
+ "metadata": "https://www.reddit.com/r/Kenya/comments/1ibx2eg/lets_talk_about_deepseek/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.247510",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:27.261095",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.825753",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.842342",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://us1.discourse-cdn.com/flex020/uploads/cursor1/optimized/3X/0/d/0df9e1f23791d1b8e362cb2bcf434e2bac1e7a09_2_419x500.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.857440",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.859398",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"DeepSeek V3 R1\" comparison GPT-4 Claude 3.5 cost efficiency...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.962254",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/pulse/market-implications-deepseeks-emergence-ai-landscape-sadagopan-s-n8ylf\n",
+ "metadata": "https://www.linkedin.com/pulse/market-implications-deepseeks-emergence-ai-landscape-sadagopan-s-n8ylf"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.972767",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://deepseek-ai.pro/\n",
+ "metadata": "https://deepseek-ai.pro/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:28.998765",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\n",
+ "metadata": "https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:29.022846",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.capacitymedia.com/article/behind-the-deepseek-hype-costs-safety-risks-and-censorship-explained\n",
+ "metadata": "https://www.capacitymedia.com/article/behind-the-deepseek-hype-costs-safety-risks-and-censorship-explained"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:29.039214",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:29.046306",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:30.770532",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:30.788942",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 12 total images",
+ "metadata": [
+ "https://deepseek-ai.pro/wp-content/uploads/2025/01/Deep-Seek-Ai-1024x495.jpeg",
+ "https://deepseek-ai.pro/wp-content/uploads/2025/01/Deep-Seek-graph-1-1024x748.webp",
+ "https://deepseek-ai.pro/wp-content/uploads/2025/01/DeepSeek-Stats-1-1024x505.webp",
+ "https://helios-i.mashable.com/imagery/articles/01ywQklBcfNJQHo7KRl3DJe/hero-image.fill.size_1248x702.v1738094497.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:30.811862",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:30.845051",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"DeepSeek V3 R1\" impact AI industry applications...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:36.504210",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: token generation, improving throughput by 2-3x. 4\ufe0f\u20e3 FP8 Quantization: Provides up to 75% memory reduction compared to FP32 while maintaining stability through adaptive bit-width scaling and loss-aware quantization techniques. DeepSeek's architectural innovations (MoE, MLA, MTP, and FP8 Quantization) focus on optimizing large-scale training and deployment and serving efficiency. Not single-user or local runtime performance, e.g., MoE requires the same memory footprint as the Dense model despite using fewer parameters per inference, MTP's parallel token generation mainly benefits high-throughput scenarios. The real innovation comes from its training methodology. The team managed to independently find some of the core ideas from OpenAI o1. (Confirmed by Mark Chen Chief Research Officer at @OpenAI). Deepseek used Group Relative Policy Optimization (GRPO) - A more efficient alternative to PPO/DPO for reinforcement learning in a multi-stage training approach combining SFT and RL. The\n\nSource: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: compared to FP32 while maintaining stability through adaptive bit-width scaling and loss-aware quantization techniques. DeepSeek's architectural innovations (MoE, MLA, MTP, and FP8 Quantization) focus on optimizing large-scale training and deployment and serving efficiency. Not single-user or local runtime performance, e.g., MoE requires the same memory footprint as the Dense model despite using fewer parameters per inference, MTP's parallel token generation mainly benefits high-throughput scenarios. The real innovation comes from its training methodology. The team managed to independently find some of the core ideas from OpenAI o1. (Confirmed by Mark Chen Chief Research Officer at @OpenAI). Deepseek used Group Relative Policy Optimization (GRPO) - A more efficient alternative to PPO/DPO for reinforcement learning in a multi-stage training approach combining SFT and RL. The reasoning capabilities emerge through reinforcement learning. Read more here: https://lnkd.in/eKnT7bHC I am\n\nSource: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: compared to FP32 while maintaining stability through adaptive bit-width scaling and loss-aware quantization techniques. DeepSeek's architectural innovations (MoE, MLA, MTP, and FP8 Quantization) focus on optimizing large-scale training and deployment and serving efficiency. Not single-user or local runtime performance, e.g., MoE requires the same memory footprint as the Dense model despite using fewer parameters per inference, MTP's parallel token generation mainly benefits high-throughput scenarios. The real innovation comes from its training methodology. The team managed to independently find some of the core ideas from OpenAI o1. (Confirmed by Mark Chen Chief Research Officer at @OpenAI). Deepseek used Group Relative Policy Optimization (GRPO) - A more efficient alternative to PPO/DPO for reinforcement learning in a multi-stage training approach combining SFT and RL. The reasoning capabilities emerge through reinforcement learning. Read more here: https://lnkd.in/eKnT7bHC I am\n\nSource: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: Does Deepseek impact how the next iteration of models are built as Llama did? Deepseek shocked the world with its performance, is it because of architectural changes? \ud83e\udd14 Deepseek V3/R1 includes multiple innovations compared to traditional LLM architecture we know from Llama or other open Models. Here are the main differences and what they mean: Main Architectural Differences: 1\ufe0f\u20e3 Mixture of Experts (MoE): Uses only selected parameters per token, reducing computation while maintaining model quality. Implemented special load balancing loss to ensure even expert utilization of distributed Hardware. 2\ufe0f\u20e3 Multihead Latent Attention (MLA): Reduces memory and computational costs by projecting KQV matrices into a lower-dimensional space. 3\ufe0f\u20e3 Multi-Token Prediction (MTP): Allows parallel token generation, improving throughput by 2-3x. 4\ufe0f\u20e3 FP8 Quantization: Provides up to 75% memory reduction compared to FP32 while maintaining stability through adaptive bit-width scaling and loss-aware\n\nSource: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: Does Deepseek impact how the next iteration of models are built as Llama did? Deepseek shocked the world with its performance, is it because of architectural changes? \ud83e\udd14 Deepseek V3/R1 includes multiple innovations compared to traditional LLM architecture we know from Llama or other open Models. Here are the main differences and what they mean: Main Architectural Differences: 1\ufe0f\u20e3 Mixture of Experts (MoE): Uses only selected parameters per token, reducing computation while maintaining model quality. Implemented special load balancing loss to ensure even expert utilization of distributed Hardware. 2\ufe0f\u20e3 Multihead Latent Attention (MLA): Reduces memory and computational costs by projecting KQV matrices into a lower-dimensional space. 3\ufe0f\u20e3 Multi-Token Prediction (MTP): Allows parallel token generation, improving throughput by 2-3x. 4\ufe0f\u20e3 FP8 Quantization: Provides up to 75% memory reduction compared to FP32 while maintaining stability through adaptive bit-width scaling and loss-aware\n\nSource: https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA\nTitle: Philipp Schmid on LinkedIn: Does Deepseek impact how the next iteration of models are built as Llama\u2026 | 54 comments\nContent: Philipp Schmid\u2019s Post Philipp Schmid Technical Lead & LLMs at Hugging Face \ud83e\udd17 | AWS ML HERO \ud83e\uddb8\ud83c\udffb\u2642\ufe0f 7h Report this post Does Deepseek impact how the next iteration of models are built as Llama did? Deepseek shocked the world with its performance, is it because of architectural changes? \ud83e\udd14 Deepseek V3/R1 includes multiple innovations compared to traditional LLM architecture we know from Llama or other open Models. Here are the main differences and what they mean: Main Architectural Differences: 1\ufe0f\u20e3 Mixture of Experts (MoE): Uses only selected parameters per token, reducing computation while maintaining model quality. Implemented special load balancing loss to ensure even expert utilization of distributed Hardware. 2\ufe0f\u20e3 Multihead Latent Attention (MLA): Reduces memory and computational costs by projecting KQV matrices into a lower-dimensional space. 3\ufe0f\u20e3 Multi-Token Prediction (MTP): Allows parallel token generation, improving throughput by 2-3x. 4\ufe0f\u20e3 FP8 Quantization: Provides up to 75% memory\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: 3.1. DeepSeek utilizes \"inference-time computing,\" activating only necessary parts of its model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: 3.1. DeepSeek utilizes \"inference-time computing,\" activating only necessary parts of its model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like multi-head latent attention (MHLA) for reduced memory usage, mixed precision computation on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:42.019000",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: DeepSeek V3 and Claude 3.5 lead in the English MMLU benchmark, with scores of 88.5% and 88.3% respectively. In coding tasks, DeepSeek V3 achieves the highest pass rate on the HumanEval-Mul benchmark at 82.6%. Efficiency: DeepSeek V3\u2019s MoE architecture allows it to maintain high performance with fewer activated parameters, resulting in lower computational costs. Its training process is notably efficient, both in terms of time and financial investment. Multilingual Capabilities: Llama 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts. Ethical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount. Side-by-Side Comparison Table FeatureDeepSeek V3Llama 3.1Claude 3.5ChatGPT 4oArchitectureMixture of Experts (MoE)Transformer-basedTransformer-basedTransformer-basedTotal Parameters671 billion405 billionNot disclosedNot\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: DeepSeek V3 and Claude 3.5 lead in the English MMLU benchmark, with scores of 88.5% and 88.3% respectively. In coding tasks, DeepSeek V3 achieves the highest pass rate on the HumanEval-Mul benchmark at 82.6%. Efficiency: DeepSeek V3\u2019s MoE architecture allows it to maintain high performance with fewer activated parameters, resulting in lower computational costs. Its training process is notably efficient, both in terms of time and financial investment. Multilingual Capabilities: Llama 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts. Ethical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount. Side-by-Side Comparison Table FeatureDeepSeek V3Llama 3.1Claude 3.5ChatGPT 4oArchitectureMixture of Experts (MoE)Transformer-basedTransformer-basedTransformer-basedTotal Parameters671 billion405 billionNot disclosedNot\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: DeepSeek V3 and Claude 3.5 lead in the English MMLU benchmark, with scores of 88.5% and 88.3% respectively. In coding tasks, DeepSeek V3 achieves the highest pass rate on the HumanEval-Mul benchmark at 82.6%. Efficiency: DeepSeek V3\u2019s MoE architecture allows it to maintain high performance with fewer activated parameters, resulting in lower computational costs. Its training process is notably efficient, both in terms of time and financial investment. Multilingual Capabilities: Llama 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts. Ethical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount. Side-by-Side Comparison Table FeatureDeepSeek V3Llama 3.1Claude 3.5ChatGPT 4oArchitectureMixture of Experts (MoE)Transformer-basedTransformer-basedTransformer-basedTotal Parameters671 billion405 billionNot disclosedNot\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: DeepSeek V3 and Claude 3.5 lead in the English MMLU benchmark, with scores of 88.5% and 88.3% respectively. In coding tasks, DeepSeek V3 achieves the highest pass rate on the HumanEval-Mul benchmark at 82.6%. Efficiency: DeepSeek V3\u2019s MoE architecture allows it to maintain high performance with fewer activated parameters, resulting in lower computational costs. Its training process is notably efficient, both in terms of time and financial investment. Multilingual Capabilities: Llama 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts. Ethical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount. Side-by-Side Comparison Table FeatureDeepSeek V3Llama 3.1Claude 3.5ChatGPT 4oArchitectureMixture of Experts (MoE)Transformer-basedTransformer-basedTransformer-basedTotal Parameters671 billion405 billionNot disclosedNot\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts. Ethical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount. Side-by-Side Comparison Table FeatureDeepSeek V3Llama 3.1Claude 3.5ChatGPT 4oArchitectureMixture of Experts (MoE)Transformer-basedTransformer-basedTransformer-basedTotal Parameters671 billion405 billionNot disclosedNot disclosedActivated Parameters37 billionNot applicableNot disclosedNot disclosedLanguages Supported1 primary (English)8MultilingualMultilingualTraining Data14.8 trillion tokensNot disclosedNot disclosedNot disclosedEnglish MMLU Accuracy88.5%Not disclosed88.3%87.2%Coding Benchmark (HumanEval-Mul)82.6%Not disclosed81.7%80.5%EfficiencyHighly efficient MoEModerateModerateModerateContext WindowStandardExtendedExtendedExtendedKey StrengthsHigh efficiency, top MMLUAdvanced math & codingSafety &\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: When comparing these models, several factors come into play:\nPerformance: DeepSeek V3 and Claude 3.5 lead in the English MMLU benchmark, with scores of 88.5% and 88.3% respectively. In coding tasks, DeepSeek V3 achieves the highest pass rate on the HumanEval-Mul benchmark at 82.6%.\nEfficiency: DeepSeek V3\u2019s MoE architecture allows it to maintain high performance with fewer activated parameters, resulting in lower computational costs. Its training process is notably efficient, both in terms of time and financial investment.\nMultilingual Capabilities: Llama 3.1 supports eight languages, enhancing its applicability in diverse linguistic contexts.\nEthical Considerations: Claude 3.5 places a strong emphasis on safety and ethical AI interactions, which may be a deciding factor for applications where these considerations are paramount.\nSide-by-Side Comparison Table\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: Parameters671 billion405 billionNot disclosedNot disclosedActivated Parameters37 billionNot applicableNot disclosedNot disclosedLanguages Supported1 primary (English)8MultilingualMultilingualTraining Data14.8 trillion tokensNot disclosedNot disclosedNot disclosedEnglish MMLU Accuracy88.5%Not disclosed88.3%87.2%Coding Benchmark (HumanEval-Mul)82.6%Not disclosed81.7%80.5%EfficiencyHighly efficient MoEModerateModerateModerateContext WindowStandardExtendedExtendedExtendedKey StrengthsHigh efficiency, top MMLUAdvanced math & codingSafety & interpretabilityVersatility, wide adoptionTraining Cost~$5.576M (2.788M GPU hrs)Not disclosedNot disclosedNot disclosed Each of these models brings unique strengths to the table. DeepSeek V3 stands out for its efficient architecture and high performance in both language and coding tasks. Llama 3.1 offers robust multilingual support, making it suitable for diverse applications. Claude 3.5\u2019s focus on safety and ethics makes it a compelling choice for\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: Parameters671 billion405 billionNot disclosedNot disclosedActivated Parameters37 billionNot applicableNot disclosedNot disclosedLanguages Supported1 primary (English)8MultilingualMultilingualTraining Data14.8 trillion tokensNot disclosedNot disclosedNot disclosedEnglish MMLU Accuracy88.5%Not disclosed88.3%87.2%Coding Benchmark (HumanEval-Mul)82.6%Not disclosed81.7%80.5%EfficiencyHighly efficient MoEModerateModerateModerateContext WindowStandardExtendedExtendedExtendedKey StrengthsHigh efficiency, top MMLUAdvanced math & codingSafety & interpretabilityVersatility, wide adoptionTraining Cost~$5.576M (2.788M GPU hrs)Not disclosedNot disclosedNot disclosed Each of these models brings unique strengths to the table. DeepSeek V3 stands out for its efficient architecture and high performance in both language and coding tasks. Llama 3.1 offers robust multilingual support, making it suitable for diverse applications. Claude 3.5\u2019s focus on safety and ethics makes it a compelling choice for\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: Parameters671 billion405 billionNot disclosedNot disclosedActivated Parameters37 billionNot applicableNot disclosedNot disclosedLanguages Supported1 primary (English)8MultilingualMultilingualTraining Data14.8 trillion tokensNot disclosedNot disclosedNot disclosedEnglish MMLU Accuracy88.5%Not disclosed88.3%87.2%Coding Benchmark (HumanEval-Mul)82.6%Not disclosed81.7%80.5%EfficiencyHighly efficient MoEModerateModerateModerateContext WindowStandardExtendedExtendedExtendedKey StrengthsHigh efficiency, top MMLUAdvanced math & codingSafety & interpretabilityVersatility, wide adoptionTraining Cost~$5.576M (2.788M GPU hrs)Not disclosedNot disclosedNot disclosed Each of these models brings unique strengths to the table. DeepSeek V3 stands out for its efficient architecture and high performance in both language and coding tasks. Llama 3.1 offers robust multilingual support, making it suitable for diverse applications. Claude 3.5\u2019s focus on safety and ethics makes it a compelling choice for\n\nSource: https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/\nTitle: DeepSeek Vs Claude Vs Llama Vs ChatGPT: Ready To Rumble\nContent: Parameters671 billion405 billionNot disclosedNot disclosedActivated Parameters37 billionNot applicableNot disclosedNot disclosedLanguages Supported1 primary (English)8MultilingualMultilingualTraining Data14.8 trillion tokensNot disclosedNot disclosedNot disclosedEnglish MMLU Accuracy88.5%Not disclosed88.3%87.2%Coding Benchmark (HumanEval-Mul)82.6%Not disclosed81.7%80.5%EfficiencyHighly efficient MoEModerateModerateModerateContext WindowStandardExtendedExtendedExtendedKey StrengthsHigh efficiency, top MMLUAdvanced math & codingSafety & interpretabilityVersatility, wide adoptionTraining Cost~$5.576M (2.788M GPU hrs)Not disclosedNot disclosedNot disclosed Each of these models brings unique strengths to the table. DeepSeek V3 stands out for its efficient architecture and high performance in both language and coding tasks. Llama 3.1 offers robust multilingual support, making it suitable for diverse applications. Claude 3.5\u2019s focus on safety and ethics makes it a compelling choice for\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:47.011014",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.linkedin.com/pulse/market-implications-deepseeks-emergence-ai-landscape-sadagopan-s-n8ylf\nTitle: Market Implications of DeepSeek's Emergence in the AI Landscape\nContent: Development: DeepSeek V3/R1 reduces entry barriers, enabling broader experimentation and innovation. Technical Capabilities: Evolution in foundation models expands application development possibilities. Market Penetration: Cost optimization enables expansion into previously underserved segments. Strategic Implementation Framework: Application Specialization: Develop targeted solutions leveraging AI core capabilities. Ecosystem Development: Implement integrated solutions through strategic partnerships and API infrastructure. Value Differentiation: Enhance base models with industry-specific optimizations. Organizational Agility: Maintain competitive advantage through continuous capability development. Risk Mitigation: Implement multi-vendor strategies to ensure operational resilience. Governance Framework: Integrate ethical considerations including privacy, bias mitigation, and transparency. Forward Outlook The DeepSeek market correction indicates a strategic shift toward\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: Core innovations include Monte Carlo Tree Search (MCTS), Process Reward Models (PRMs), cold-start fine-tuning, and rejection sampling for improved safety and performance.\nWhat industries can benefit from DeepSeek V3 and R1? Industries such as healthcare, education, finance, e-commerce, and software development can significantly benefit from the capabilities of DeepSeek models.\nWhat industries can benefit from DeepSeek V3 and R1?\nWhat industries can benefit from DeepSeek V3 and R1?\nWhat industries can benefit from DeepSeek V3 and R1?\nIndustries such as healthcare, education, finance, e-commerce, and software development can significantly benefit from the capabilities of DeepSeek models.\nIndustries such as healthcare, education, finance, e-commerce, and software development can significantly benefit from the capabilities of DeepSeek models.\n\nSource: https://www.linkedin.com/pulse/market-implications-deepseeks-emergence-ai-landscape-sadagopan-s-n8ylf\nTitle: Market Implications of DeepSeek's Emergence in the AI Landscape\nContent: Application Layer Impact:\nApplication Layer Impact:\nApplication Layer Impact\nOperational Efficiency: Enterprise companies will leverage DeepSeek's innovations to optimize both model training costs and data utilization strategies. Cost Structure Evolution: Reduced model costs cascade through the ecosystem via lower API fees, enhancing margins for application-layer enterprises. Industry Collaboration: DeepSeek's open-source methodology catalyzes knowledge sharing and accelerates industry-wide innovation through practitioner engagement. Ecosystem Development: The collaborative framework drives cumulative advancement, benefiting the broader AI community through shared learning and iteration.\n\nSource: https://www.linkedin.com/pulse/market-implications-deepseeks-emergence-ai-landscape-sadagopan-s-n8ylf\nTitle: Market Implications of DeepSeek's Emergence in the AI Landscape\nContent: by U.S. enterprises. This exemplifies how regulatory constraints can stimulate breakthrough innovations more effectively than unrestricted environments. Learning Architecture Evolution: DeepSeek's implementation of distillation techniques represents progress toward more efficient learning architectures. While not yet achieving human-like learning efficiency, these developments indicate a shift away from conventional scaling approaches. Revenue Generation Framework: Primary value creation in generative AI will emerge from: Market Impact Analysis The DeepSeek market correction signifies a structural shift in AI industry dynamics: Core Developments: Market Disruption via DeepSeek V3/R1: Introduction of cost-competitive, high-performance models necessitates portfolio reassessment across the technology sector. Foundation Model Evolution: Strategic Application Layer Implications: Democratized Development: DeepSeek V3/R1 reduces entry barriers, enabling broader experimentation and\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n\nSource: https://deepseek-ai.pro/\nTitle: DeepSeek AI R1 and V3 Use Fully Unlocked Features of DeepSeek New Model\nContent: V3 and R1? For Businesses: Scale operations with AI-driven insights. Automate repetitive tasks, reducing costs and improving efficiency. Deliver personalized customer experiences across languages and regions. For Researchers: Access a model built on the latest advancements in machine learning. Dive into interpretable AI with tools for debugging and iterative testing. Collaborate in a secure, ethical environment. For Developers: Build next-gen applications with minimal effort. Leverage fine-grained API controls for custom deployments. Utilize pre-built modules for coding, debugging, and testing. Testimonials and Case Studies Enterprise Success: A global retail company boosted sales forecasting accuracy by 22% using DeepSeek V3. STEM Education: An EdTech startup integrated DeepSeek R1, improving student performance in competitive math exams by 30%. Software Development: A SaaS firm reduced debugging time by 40%, thanks to LiveCodeBench optimization Join the Revolution DeepSeek V3 and R1\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:49.514867",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: Like its predecessor DeepSeek-V3, R1 utilizes a Mixture-of-Experts (MoE) architecture, which allows it to activate only a portion of its network per query. The benefits include:\nLike its predecessor DeepSeek-V3, R1 utilizes a\narchitecture, which allows it to\nactivate only a portion of its network per query\n. The benefits include:\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades.\nLower computational costs during inference.\nLower computational costs\nHigher efficiency in processing reasoning tasks.\n\nSource: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: DeepSeek\u2019s progress shows that startups can compete with AI giants effectively\nLike Reply 1 Reaction\nDEEPANSHU KUMAR Attended Desh Bhagat University 2d Report this comment The balance of performance and efficiency in R1 is inspiring for AI researchers. Like Reply 1 Reaction\nDEEPANSHU KUMAR Attended Desh Bhagat University 2d Report this comment The balance of performance and efficiency in R1 is inspiring for AI researchers.\nDEEPANSHU KUMAR Attended Desh Bhagat University 2d Report this comment\nAttended Desh Bhagat University\nReport this comment\nReport this comment\nReport this comment\nThe balance of performance and efficiency in R1 is inspiring for AI researchers.\nThe balance of performance and efficiency in R1 is inspiring for AI researchers.\nLike Reply 1 Reaction\nSee more comments\nTo view or add a comment, sign in\nMore articles by Dileep Kumar Pandiya\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: The DeepSeek AI situation in 60 seconds: 1/ \ud835\udc0c\ud835\udc28\ud835\udc1d\ud835\udc1e\ud835\udc25 \ud835\udc11\ud835\udc1e\ud835\udc25\ud835\udc1e\ud835\udc1a\ud835\udc2c\ud835\udc1e\ud835\udc2c: DeepSeek, AI startup based in China, released two major models: DeepSeek-V3, a 671-billion-parameter Mixture of Experts (MoE) base model, and DeepSeek-R1, a reasoning-focused model built upon V3. Additionally, the company has released smaller, distilled versions of these models, ranging from 1.5 billion to 70 billion parameters, to enhance accessibility across various applications. The company reports development costs of approximately $6 million, though this figure is debated given their parent company's broader AI investments. 2/ \ud835\udc0f\ud835\udc1e\ud835\udc2b\ud835\udc1f\ud835\udc28\ud835\udc2b\ud835\udc26\ud835\udc1a\ud835\udc27\ud835\udc1c\ud835\udc1e \ud835\udc1a\ud835\udc27\ud835\udc1d \ud835\udc01\ud835\udc1e\ud835\udc27\ud835\udc1c\ud835\udc21\ud835\udc26\ud835\udc1a\ud835\udc2b\ud835\udc24\ud835\udc2c: R1 demonstrates impressive performance, achieving a 79.8% Pass@1 on AIME 2024, 97.3% on MATH-500, and a 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants. That said, it\u2019s worth noting that benchmarks like these can be gamed or skewed by including test data in training. Always take these results with a healthy degree of skepticism. It's also worth\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: The DeepSeek AI situation in 60 seconds: 1/ \ud835\udc0c\ud835\udc28\ud835\udc1d\ud835\udc1e\ud835\udc25 \ud835\udc11\ud835\udc1e\ud835\udc25\ud835\udc1e\ud835\udc1a\ud835\udc2c\ud835\udc1e\ud835\udc2c: DeepSeek, AI startup based in China, released two major models: DeepSeek-V3, a 671-billion-parameter Mixture of Experts (MoE) base model, and DeepSeek-R1, a reasoning-focused model built upon V3. Additionally, the company has released smaller, distilled versions of these models, ranging from 1.5 billion to 70 billion parameters, to enhance accessibility across various applications. The company reports development costs of approximately $6 million, though this figure is debated given their parent company's broader AI investments. 2/ \ud835\udc0f\ud835\udc1e\ud835\udc2b\ud835\udc1f\ud835\udc28\ud835\udc2b\ud835\udc26\ud835\udc1a\ud835\udc27\ud835\udc1c\ud835\udc1e \ud835\udc1a\ud835\udc27\ud835\udc1d \ud835\udc01\ud835\udc1e\ud835\udc27\ud835\udc1c\ud835\udc21\ud835\udc26\ud835\udc1a\ud835\udc2b\ud835\udc24\ud835\udc2c: R1 demonstrates impressive performance, achieving a 79.8% Pass@1 on AIME 2024, 97.3% on MATH-500, and a 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants. That said, it\u2019s worth noting that benchmarks like these can be gamed or skewed by including test data in training. Always take these results with a healthy degree of skepticism. It's also worth\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: Head of AI Security & Strategy @ Aon 1d Edited Report this post The DeepSeek AI situation in 60 seconds: 1/ \ud835\udc0c\ud835\udc28\ud835\udc1d\ud835\udc1e\ud835\udc25 \ud835\udc11\ud835\udc1e\ud835\udc25\ud835\udc1e\ud835\udc1a\ud835\udc2c\ud835\udc1e\ud835\udc2c: DeepSeek, AI startup based in China, released two major models: DeepSeek-V3, a 671-billion-parameter Mixture of Experts (MoE) base model, and DeepSeek-R1, a reasoning-focused model built upon V3. Additionally, the company has released smaller, distilled versions of these models, ranging from 1.5 billion to 70 billion parameters, to enhance accessibility across various applications. The company reports development costs of approximately $6 million, though this figure is debated given their parent company's broader AI investments. 2/ \ud835\udc0f\ud835\udc1e\ud835\udc2b\ud835\udc1f\ud835\udc28\ud835\udc2b\ud835\udc26\ud835\udc1a\ud835\udc27\ud835\udc1c\ud835\udc1e \ud835\udc1a\ud835\udc27\ud835\udc1d \ud835\udc01\ud835\udc1e\ud835\udc27\ud835\udc1c\ud835\udc21\ud835\udc26\ud835\udc1a\ud835\udc2b\ud835\udc24\ud835\udc2c: R1 demonstrates impressive performance, achieving a 79.8% Pass@1 on AIME 2024, 97.3% on MATH-500, and a 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants. That said, it\u2019s worth noting that benchmarks like these can be gamed or skewed by including test data in training. Always take\n\nSource: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: DeepSeek-V3, R1 utilizes a Mixture-of-Experts (MoE) architecture, which allows it to activate only a portion of its network per query. The benefits include: Lower computational costs during inference. Higher efficiency in processing reasoning tasks. The ability to scale effectively without requiring expensive hardware upgrades. 4. Open-Sourced to Accelerate AI Research DeepSeek has made DeepSeek-R1 and six distilled models available to researchers and developers worldwide. This means that the global AI community can: Analyze and refine the model's reasoning capabilities. Develop specialized versions tailored for different industries. Experiment with new reinforcement learning techniques. DeepSeek-R1 vs. OpenAI and Other LLMs DeepSeek-R1 has positioned itself as a direct competitor to models developed by OpenAI, Google DeepMind, and Meta. Here\u2019s how it stacks up: Key Takeaways: DeepSeek-R1 has exceptional reasoning skills, even outperforming OpenAI\u2019s o1 in some benchmarks. It is more\n\nSource: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: logical inference, mathematical reasoning, and real-time problem-solving capabilities\nDeepSeek-R1's emergence is particularly intriguing because it achieves performance comparable to OpenAI\u2019s latest o1 model while being open-source and optimized for efficiency. Built on top of DeepSeek\u2019s V3-Base, R1 is reshaping how AI models approach reasoning and setting a new benchmark in the AI arms race.\nDeepSeek-R1's emergence is particularly intriguing because it achieves performance comparable to OpenAI\u2019s latest o1 model while being open-source and optimized for efficiency. Built on top of DeepSeek\u2019s V3-Base, R1 is reshaping how AI models approach reasoning and setting a new benchmark in the AI arms race.\nDeepSeek-R1's emergence is particularly intriguing because it achieves performance comparable to\nOpenAI\u2019s latest o1 model\nwhile being open-source and optimized for efficiency. Built on top of DeepSeek\u2019s V3-Base, R1 is\nreshaping how AI models approach reasoning\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: memory efficiency, he also expressed doubt regarding the reported quantity of chips used. DeepSeek launched an AI model on January 20th that rivals OpenAI's ChatGPT and Meta's Llama 3.1. DeepSeek utilizes \"inference-time computing,\" activating only necessary parts of its model for each query, which is more cost and energy efficient. This has garnered praise from tech figures like Marc Andreessen, who called it a \"profound gift to the world.\" DeepSeek, a Chinese AI startup specializing in open-source large language models (LLMs), has released two notable models: DeepSeek-V3 and DeepSeek-R1. DeepSeek-V3 LLM utilizes a Mixture of Experts (MoE) architecture, combining several smaller models with a total of 671 billion parameters, but activating only 37 billion parameters for each token during inference. This approach significantly enhances efficiency, estimated to be 10x better than some peers and 3-7x better considering other innovations. V3 incorporates further advancements like\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: 1/ \ud835\udc0c\ud835\udc28\ud835\udc1d\ud835\udc1e\ud835\udc25 \ud835\udc11\ud835\udc1e\ud835\udc25\ud835\udc1e\ud835\udc1a\ud835\udc2c\ud835\udc1e\ud835\udc2c: DeepSeek, AI startup based in China, released two major models: DeepSeek-V3, a 671-billion-parameter Mixture of Experts (MoE) base model, and DeepSeek-R1, a reasoning-focused model built upon V3. Additionally, the company has released smaller, distilled versions of these models, ranging from 1.5 billion to 70 billion parameters, to enhance accessibility across various applications. The company reports development costs of approximately $6 million, though this figure is debated given their parent company's broader AI investments. 2/ \ud835\udc0f\ud835\udc1e\ud835\udc2b\ud835\udc1f\ud835\udc28\ud835\udc2b\ud835\udc26\ud835\udc1a\ud835\udc27\ud835\udc1c\ud835\udc1e \ud835\udc1a\ud835\udc27\ud835\udc1d \ud835\udc01\ud835\udc1e\ud835\udc27\ud835\udc1c\ud835\udc21\ud835\udc26\ud835\udc1a\ud835\udc2b\ud835\udc24\ud835\udc2c: R1 demonstrates impressive performance, achieving a 79.8% Pass@1 on AIME 2024, 97.3% on MATH-500, and a 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants. That said, it\u2019s worth noting that benchmarks like these can be gamed or skewed by including test data in training. Always take these results with a healthy degree of skepticism. It's also worth noting that DeepSeek's models exhibit\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: 4. Technical Efficiency\nDeepSeek-R1 Benchmark Brilliance\nDeepSeek-R1 Benchmark Brilliance\nWhat Sets DeepSeek-R1 Apart1. The RL-First Approach2. Efficiency 3. Scalability 4. Independent Reasoning\nWhat Sets DeepSeek-R1 Apart\n1. The RL-First Approach\n1. The RL-First Approach\n4. Independent Reasoning\n4. Independent Reasoning\nWhere DeepSeek-R1 Excels1. Education2. Software Development3. Business Insights with Data Analysis4. Customer Experiences5. Solving Complex Global Problems\nWhere DeepSeek-R1 Excels\n2. Software Development\n2. Software Development\n3. Business Insights with Data Analysis\n3. Business Insights with Data Analysis\n4. Customer Experiences\n4. Customer Experiences\n5. Solving Complex Global Problems\n5. Solving Complex Global Problems\nIn The End\nIn The End\nWork With UsOur innovative and reliable tech solutions deliver the value you're looking for.Contact Us\nWork With Us\nOur innovative and reliable tech solutions deliver the value you're looking for.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:49.536293",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.0136348",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:49.558319",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'DeepSeek V3: Model Architecture and Performance'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:54.062302",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'DeepSeek V3: Model Architecture and Performance'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:54.081908",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: DeepSeek V3: Model Architecture and Performance...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:13:54.121046",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'DeepSeek V3: Model Architecture and Performance'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:32.624084",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'DeepSeek V3: Model Architecture and Performance'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:32.662870",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Impact on the AI Industry: Cost Efficiency and Democratization'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:32.681998",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:32.704956",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Impact on the AI Industry: Cost Efficiency and Democratization...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:37.193771",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:40.474162",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['\"Deepseek v3 R1\" cost efficiency analysis', '\"Deepseek v3 R1\" democratization impact case studies', '\"Deepseek v3 R1\" comparison with other AI models cost and accessibility', '\"Deepseek v3 R1\" impact on AI development barriers']...",
+ "metadata": [
+ "\"Deepseek v3 R1\" cost efficiency analysis",
+ "\"Deepseek v3 R1\" democratization impact case studies",
+ "\"Deepseek v3 R1\" comparison with other AI models cost and accessibility",
+ "\"Deepseek v3 R1\" impact on AI development barriers"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:40.499519",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" cost efficiency analysis'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:40.522411",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" democratization impact case studies'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:40.541906",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" comparison with other AI models cost and accessibility'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:40.557605",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for '\"Deepseek v3 R1\" impact on AI development barriers'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:45.202644",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://feedland.org/?river=true&screenname=gwthompson&catname=ai\n",
+ "metadata": "https://feedland.org/?river=true&screenname=gwthompson&catname=ai"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:45.221693",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://news.ycombinator.com/item?id=42849536\n",
+ "metadata": "https://news.ycombinator.com/item?id=42849536"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:45.240002",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://news.ycombinator.com/item?id=42823568\n",
+ "metadata": "https://news.ycombinator.com/item?id=42823568"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:45.257763",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:45.277098",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.354663",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.373836",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.391379",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.408272",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" democratization impact case studies...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.743081",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\n",
+ "metadata": "https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.758907",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\n",
+ "metadata": "https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.778607",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.capacitymedia.com/article/behind-the-deepseek-hype-costs-safety-risks-and-censorship-explained\n",
+ "metadata": "https://www.capacitymedia.com/article/behind-the-deepseek-hype-costs-safety-risks-and-censorship-explained"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.798886",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\n",
+ "metadata": "https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.817759",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/\n",
+ "metadata": "https://bdtechtalks.com/2025/01/29/deepseek-r1-winners-losers/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.835626",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:51.854551",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:54.877499",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:54.894781",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 9 total images",
+ "metadata": [
+ "https://arbisoft.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcontact.c5602fd6.png&w=1440&q=75",
+ "https://assets.euromoneydigital.com/dims4/default/92e4280/2147483647/strip/true/crop/840x472+0+0/resize/840x472!/quality/90/?url=http%3A%2F%2Feuromoney-brightspot.s3.amazonaws.com%2Ffa%2F68%2F0fff83364b6c8639e07d1ebe49bc%2Fnews-images-34.png",
+ "https://assets.euromoneydigital.com/dims4/default/6441e6b/2147483647/strip/true/crop/840x472+0+0/resize/800x450!/quality/90/?url=http%3A%2F%2Feuromoney-brightspot.s3.amazonaws.com%2F17%2F2e%2F3cb492cd4d28a0f46ae5245bc160%2Fnews-images-30.png",
+ "https://assets.euromoneydigital.com/dims4/default/781f3e0/2147483647/strip/true/crop/840x472+0+0/resize/800x450!/quality/90/?url=http%3A%2F%2Feuromoney-brightspot.s3.amazonaws.com%2F20%2Fcf%2F0b69364d4c129b2cb67afaa7e881%2Fnews-images-37.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:54.903264",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:54.935411",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" impact on AI development barriers...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.149232",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\n",
+ "metadata": "https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.171057",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\n",
+ "metadata": "https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.191975",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.209381",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 2 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.894556",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.913699",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://helios-i.mashable.com/imagery/articles/01ywQklBcfNJQHo7KRl3DJe/hero-image.fill.size_1248x702.v1738094497.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.930852",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.951051",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" comparison with other AI models cost and accessibility...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:55.996678",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.yeschat.ai/features/deepseek\n",
+ "metadata": "https://www.yeschat.ai/features/deepseek"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:56.016629",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:56.035189",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 1 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:58.935061",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 1 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:58.962025",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:58.974593",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:14:58.997834",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: \"Deepseek v3 R1\" cost efficiency analysis...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:01.190918",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\nTitle: DeepSeek R1: Why AI experts think it's so special | Mashable\nContent: But R1 causing such a frenzy because of how little it cost to make. \"It's not smarter than earlier models, just trained more cheaply,\" said AI research scientist Gary Marcus.\nThe fact that DeepSeek was able to build a model that competes with OpenAI's models is pretty remarkable. Andrej Karpathy who co-founded OpenAI, posted on X, \"Does this mean you don't need large GPU clusters for frontier LLMs? No, but you have to ensure that you're not wasteful with what you have, and this looks like a nice demonstration that there's still a lot to get through with both data and algorithms.\"\nRelated Stories Here's what DeepSeek AI does better than OpenAI's ChatGPT What DeepSeek AI won't tell you OpenAI announces new ChatGPT product amid DeepSeek AI news DeepSeek AI: How to try DeepSeek R1 right now DeepSeek collects keystroke data and more, storing it in Chinese servers\nHere's what DeepSeek AI does better than OpenAI's ChatGPT\nWhat DeepSeek AI won't tell you\n\nSource: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\nTitle: DeepSeek R1: Why AI experts think it's so special | Mashable\nContent: VIDEOS Mashable Shows All Videos\nMashable Shows All Videos\nHome > Tech\nHome > Tech\nHome > Tech\nWhat AI experts are saying about DeepSeek R1\nThe experts explain why DeepSeek R1 is truly a big deal.\nBy Cecily Mauran on January 28, 2025 Share on Facebook Share on Twitter Share on Flipboard\nBy Cecily Mauran on January 28, 2025\nShare on Facebook Share on Twitter Share on Flipboard\nShare on Facebook\nShare on Twitter\nShare on Flipboard\nExperts share their views on DeepSeek. Credit: CFOTO / Future Publishing / Getty Images\nExperts share their views on DeepSeek. Credit: CFOTO / Future Publishing / Getty Images\nExperts share their views on DeepSeek. Credit: CFOTO / Future Publishing / Getty Images\nExperts share their views on DeepSeek.\nCredit: CFOTO / Future Publishing / Getty Images\nAll of a sudden, DeepSeek is everywhere.\nIts R1 model is open source, allegedly trained for a fraction of the cost of other AI models, and is just as good, if not better than ChatGPT.\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: For context, I\u2019ve been comparing these to OpenAI\u2019s APIs, which excel in areas like:\nA large ecosystem (e.g., plugins, third-party integrations)\nRobustness for creative tasks like storytelling\nWell-defined safety guardrails for sensitive applications\nThat said, here are some questions for discussion:\nHave you tested DeepSeek in production environments? How does it handle scale and reliability?\nHave you tested DeepSeek in production environments? How does it handle scale and reliability?\nHow does its code generation compare to ChatGPT\u2019s tools like the Code Interpreter?\nHow does its code generation compare to ChatGPT\u2019s tools like the Code Interpreter?\nIs the pricing difference meaningful for your projects?\nIs the pricing difference meaningful for your projects?\nAre there any trade-offs, like regional access, support quality, or rate limits?\nAre there any trade-offs, like regional access, support quality, or rate limits?\n\nSource: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\nTitle: DeepSeek R1: Why AI experts think it's so special | Mashable\nContent: This lethal combination hit Wall Street hard, causing tech stocks to tumble, and making investors question how much money is needed to develop good AI models. DeepSeek engineers claim R1 was trained on 2,788 GPUs which cost around $6 million, compared to OpenAI's GPT-4 which reportedly cost $100 million to train.\nDeepSeek's cost efficiency also challenges the idea that larger models and more data leads to better performance. Amidst the frenzied conversation about DeepSeek's capabilities, its threat to AI companies like OpenAI, and spooked investors, it can be hard to make sense of what's going on. But AI experts with veteran experience have weighed in with valuable perspectives.\nDeepSeek proves what AI experts have been saying for years: bigger isn't better\n\nSource: https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1\nTitle: DeepSeek R1: Why AI experts think it's so special | Mashable\nContent: Here's what DeepSeek AI does better than OpenAI's ChatGPT\nWhat DeepSeek AI won't tell you\nOpenAI announces new ChatGPT product amid DeepSeek AI news\nDeepSeek AI: How to try DeepSeek R1 right now\nDeepSeek collects keystroke data and more, storing it in Chinese servers\nWharton AI professor Ethan Mollick said it's not about it's capabilities, but models that people currently have access to. \"DeepSeek is a really good model, but it is not generally a better model than o1 or Claude\" he said. \"But since it is both free and getting a ton of attention, I think a lot of people who were using free 'mini' models are being exposed to what a early 2025 reasoner AI can do and are surprised.\"\nScore one for open source AI models\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: I\u2019ve been diving into different AI APIs recently and came across DeepSeek AI agents, which seem to be an interesting alternative to OpenAI\u2019s ChatGPT. While ChatGPT is widely used and well-documented, DeepSeek introduces some unique claims that got me curious\u2014especially around API performance, customization, and use cases.\nHere are some aspects of DeepSeek I found worth exploring:\nPerformance Claims: Documentation mentions sub-500ms response times, even for complex tasks. Anyone validated this in real-world use?\nPerformance Claims: Documentation mentions sub-500ms response times, even for complex tasks. Anyone validated this in real-world use?\nPricing: The token-based pricing model seems 20\u201330% cheaper than GPT-4\u2019s API, which could matter for high-volume users.\nPricing: The token-based pricing model seems 20\u201330% cheaper than GPT-4\u2019s API, which could matter for high-volume users.\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: pricing model seems 20\u201330% cheaper than GPT-4\u2019s API, which could matter for high-volume users. Context Handling: Offers extended or unlimited context windows when self-hosting. I\u2019m wondering how practical this is for heavy workflows. Pre-Built Agents: Includes tools designed for coding, data analysis, and research. Has anyone tested how effective these are out of the box? Stateful Workflows: Features memory management for multi-step interactions. This could be helpful for developers building conversational apps or automations. For context, I\u2019ve been comparing these to OpenAI\u2019s APIs, which excel in areas like: A large ecosystem (e.g., plugins, third-party integrations) Robustness for creative tasks like storytelling Well-defined safety guardrails for sensitive applications That said, here are some questions for discussion: Have you tested DeepSeek in production environments? How does it handle scale and reliability? How does its code generation compare to ChatGPT\u2019s tools like the Code\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: pricing model seems 20\u201330% cheaper than GPT-4\u2019s API, which could matter for high-volume users. Context Handling: Offers extended or unlimited context windows when self-hosting. I\u2019m wondering how practical this is for heavy workflows. Pre-Built Agents: Includes tools designed for coding, data analysis, and research. Has anyone tested how effective these are out of the box? Stateful Workflows: Features memory management for multi-step interactions. This could be helpful for developers building conversational apps or automations. For context, I\u2019ve been comparing these to OpenAI\u2019s APIs, which excel in areas like: A large ecosystem (e.g., plugins, third-party integrations) Robustness for creative tasks like storytelling Well-defined safety guardrails for sensitive applications That said, here are some questions for discussion: Have you tested DeepSeek in production environments? How does it handle scale and reliability? How does its code generation compare to ChatGPT\u2019s tools like the Code\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: pricing model seems 20\u201330% cheaper than GPT-4\u2019s API, which could matter for high-volume users. Context Handling: Offers extended or unlimited context windows when self-hosting. I\u2019m wondering how practical this is for heavy workflows. Pre-Built Agents: Includes tools designed for coding, data analysis, and research. Has anyone tested how effective these are out of the box? Stateful Workflows: Features memory management for multi-step interactions. This could be helpful for developers building conversational apps or automations. For context, I\u2019ve been comparing these to OpenAI\u2019s APIs, which excel in areas like: A large ecosystem (e.g., plugins, third-party integrations) Robustness for creative tasks like storytelling Well-defined safety guardrails for sensitive applications That said, here are some questions for discussion: Have you tested DeepSeek in production environments? How does it handle scale and reliability? How does its code generation compare to ChatGPT\u2019s tools like the Code\n\nSource: https://www.reddit.com/r/OpenAI/comments/1ibe3n7/deepseek_ai_agents_vs_chatgpt_openai_still_better/\nTitle: Reddit - Dive into anything\nContent: handle scale and reliability? How does its code generation compare to ChatGPT\u2019s tools like the Code Interpreter? Is the pricing difference meaningful for your projects? Are there any trade-offs, like regional access, support quality, or rate limits? I\u2019ll share some of my own observations in the comments, but I\u2019d love to hear from anyone who\u2019s worked with these APIs. Are these new players like DeepSeek serious contenders in the LLM space, or are they still catching up to the incumbents? Read more Top 1% Rank by size Public Anyone can view, post, and comment to this community TOPICS Internet Culture (Viral) Amazing Animals & Pets Cringe & Facepalm Funny Interesting Memes Oddly Satisfying Reddit Meta Wholesome & Heartwarming Games Action Games Adventure Games Esports Gaming Consoles & Gear Gaming News & Discussion Mobile Games Other Games Role-Playing Games Simulation Games Sports & Racing Games Strategy Games Tabletop Games Q&As Q&As Stories & Confessions Technology 3D\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:07.960409",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: Discover the Core Capabilities of DeepSeek V3 & R1\nDiscover the Core Capabilities of DeepSeek V3 & R1\nAdvanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.Advanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.\nAdvanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: Deepseek v3 paperThe DeepSeek V3 paper provides an in-depth look into the research and development process behind this state-of-the-art AI platform. Covering topics such as advanced neural architectures, training methodologies, and real-world applications, the paper offers valuable insights for academics, researchers, and industry professionals. It showcases the technological innovations that make DeepSeek V3 a leading choice for AI-powered solutions, setting a benchmark for the future of artificial intelligence.\nDeepseek v3 paper\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: your projects and initiatives.Deepseek v3 paperThe DeepSeek V3 paper provides an in-depth look into the research and development process behind this state-of-the-art AI platform. Covering topics such as advanced neural architectures, training methodologies, and real-world applications, the paper offers valuable insights for academics, researchers, and industry professionals. It showcases the technological innovations that make DeepSeek V3 a leading choice for AI-powered solutions, setting a benchmark for the future of artificial intelligence.\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: your projects and initiatives.Deepseek v3 paperThe DeepSeek V3 paper provides an in-depth look into the research and development process behind this state-of-the-art AI platform. Covering topics such as advanced neural architectures, training methodologies, and real-world applications, the paper offers valuable insights for academics, researchers, and industry professionals. It showcases the technological innovations that make DeepSeek V3 a leading choice for AI-powered solutions, setting a benchmark for the future of artificial intelligence.\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: The DeepSeek V3 paper provides an in-depth look into the research and development process behind this state-of-the-art AI platform. Covering topics such as advanced neural architectures, training methodologies, and real-world applications, the paper offers valuable insights for academics, researchers, and industry professionals. It showcases the technological innovations that make DeepSeek V3 a leading choice for AI-powered solutions, setting a benchmark for the future of artificial intelligence.\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: Deepseek v3 paper\nThe DeepSeek V3 paper provides an in-depth look into the research and development process behind this state-of-the-art AI platform. Covering topics such as advanced neural architectures, training methodologies, and real-world applications, the paper offers valuable insights for academics, researchers, and industry professionals. It showcases the technological innovations that make DeepSeek V3 a leading choice for AI-powered solutions, setting a benchmark for the future of artificial intelligence.\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: R1 platform, optimized for lightweight deployment and resource efficiency. Designed for edge computing and low-power environments, DeepSeek R1 Zero ensures that even constrained devices can harness the power of AI. It is ideal for IoT applications, mobile devices, and on-site data processing, bringing the capabilities of DeepSeek to a broader range of use cases without compromising performance or reliability.DeepSeek-aiDeepSeek AI represents the forefront of artificial intelligence innovation, offering a suite of tools and platforms designed to revolutionize the way we interact with technology. From natural language processing to predictive analytics, DeepSeek AI combines cutting-edge algorithms with user-centric design to provide unparalleled functionality. Whether you're a developer, researcher, or business professional, DeepSeek AI empowers you to harness the power of advanced AI for your projects and initiatives.Deepseek v3 paperThe DeepSeek V3 paper provides an in-depth look into\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: R1 platform, optimized for lightweight deployment and resource efficiency. Designed for edge computing and low-power environments, DeepSeek R1 Zero ensures that even constrained devices can harness the power of AI. It is ideal for IoT applications, mobile devices, and on-site data processing, bringing the capabilities of DeepSeek to a broader range of use cases without compromising performance or reliability.DeepSeek-aiDeepSeek AI represents the forefront of artificial intelligence innovation, offering a suite of tools and platforms designed to revolutionize the way we interact with technology. From natural language processing to predictive analytics, DeepSeek AI combines cutting-edge algorithms with user-centric design to provide unparalleled functionality. Whether you're a developer, researcher, or business professional, DeepSeek AI empowers you to harness the power of advanced AI for your projects and initiatives.Deepseek v3 paperThe DeepSeek V3 paper provides an in-depth look into\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: Discover the Core Capabilities of DeepSeek V3 & R1Advanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.Advanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.Problem-Solving Expertise (DeepSeek R1)Optimized for technical support, DeepSeek chat simplifies complex issues, assists in coding challenges, and provides actionable solutions for technical problems.Problem-Solving Expertise (DeepSeek R1)Optimized for technical support, DeepSeek chat simplifies complex issues, assists in coding challenges, and provides actionable solutions for technical problems.Creative Content Creation & SummarizationGenerate engaging content, from blog\n\nSource: https://www.yeschat.ai/features/deepseek\nTitle: DeepSeek chat V3 & R1 | Free AI-Powered Solutions\nContent: Discover the Core Capabilities of DeepSeek V3 & R1Advanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.Advanced Information Retrieval (DeepSeek V3)Efficiently process and retrieve data from extensive knowledge bases and real-time web sources. DeepSeek R1 provides detailed answers, article summaries, and relevant, up-to-date information.Problem-Solving Expertise (DeepSeek R1)Optimized for technical support, DeepSeek chat simplifies complex issues, assists in coding challenges, and provides actionable solutions for technical problems.Problem-Solving Expertise (DeepSeek R1)Optimized for technical support, DeepSeek chat simplifies complex issues, assists in coding challenges, and provides actionable solutions for technical problems.Creative Content Creation & SummarizationGenerate engaging content, from blog\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:24.424388",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: environmental models and predict long-term impacts of climate change.Healthcare providers may rely on its reasoning abilities to analyze patient data and suggest personalized treatment plans. DeepSeek ChallengesDeepSeek-R1 has made a big impact, but it\u2019s not perfect. Here are some challenges it faces that could affect its growth and use. Businesses and developers might doubt DeepSeek\u2019s reliability and long-term support since it\u2019s new to the global AI market.DeepSeek\u2019s low-cost strategy could struggle with highly complex or resource-heavy tasks.Its open-source nature under the MIT license could lead to misuse or unethical applications.Training on local or limited datasets might cause cultural or contextual biases, making it less effective globally.Being a Chinese product, it may face restrictions or scrutiny in Western markets due to political tensions.Unlike OpenAI, DeepSeek lacks strong partnerships and platform integrations, which could limit its appeal to developers.Competing with\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: DeepSeek-R1 has made a big impact, but it\u2019s not perfect. Here are some challenges it faces that could affect its growth and use.\nBusinesses and developers might doubt DeepSeek\u2019s reliability and long-term support since it\u2019s new to the global AI market.\nDeepSeek\u2019s low-cost strategy could struggle with highly complex or resource-heavy tasks.\nIts open-source nature under the MIT license could lead to misuse or unethical applications.\nTraining on local or limited datasets might cause cultural or contextual biases, making it less effective globally.\nBeing a Chinese product, it may face restrictions or scrutiny in Western markets due to political tensions.\nUnlike OpenAI, DeepSeek lacks strong partnerships and platform integrations, which could limit its appeal to developers.\nCompeting with well-funded giants like OpenAI and Google could make it tough for DeepSeek to succeed outside China.\nIn The End\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses, citing concerns about using a Chinese startup for critical AI infrastructure and Nvidia's\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: faster support, reducing wait times and boosting customer satisfaction. 5. Solving Complex Global ProblemsDeepSeek-R1 is a tool for businesses with the potential to address many challenges. Climate research teams may use it to simulate environmental models and predict long-term impacts of climate change.Healthcare providers may rely on its reasoning abilities to analyze patient data and suggest personalized treatment plans. DeepSeek ChallengesDeepSeek-R1 has made a big impact, but it\u2019s not perfect. Here are some challenges it faces that could affect its growth and use. Businesses and developers might doubt DeepSeek\u2019s reliability and long-term support since it\u2019s new to the global AI market.DeepSeek\u2019s low-cost strategy could struggle with highly complex or resource-heavy tasks.Its open-source nature under the MIT license could lead to misuse or unethical applications.Training on local or limited datasets might cause cultural or contextual biases, making it less effective globally.Being\n\nSource: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: and does not require\nhigh-end GPU clusters\nlike OpenAI\u2019s proprietary models.\nUnlike OpenAI and Google, DeepSeek-R1 is open-source, which fosters community-driven advancements.\nUnlike OpenAI and Google,\nDeepSeek-R1 is open-source\n, which fosters community-driven advancements.\nImplications of DeepSeek-R1 for the AI Industry\nImplications of DeepSeek-R1 for the AI Industry\nImplications of DeepSeek-R1 for the AI Industry\nDeepSeek-R1\u2019s release has major implications for AI development, research, and adoption:\nDeepSeek-R1\u2019s release has major implications for AI development, research, and adoption:\nDeepSeek-R1\u2019s release has\nfor AI development, research, and adoption:\n1. Democratizing High-Level Reasoning AI\n1. Democratizing High-Level Reasoning AI\n1. Democratizing High-Level Reasoning AI\n\nSource: https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost\nTitle: DeepSeek-R1 - The Chinese AI Powerhouse Outperforming OpenAI's o1 \u2014 at 95% Less Cost\nContent: limited datasets might cause cultural or contextual biases, making it less effective globally.Being a Chinese product, it may face restrictions or scrutiny in Western markets due to political tensions.Unlike OpenAI, DeepSeek lacks strong partnerships and platform integrations, which could limit its appeal to developers.Competing with well-funded giants like OpenAI and Google could make it tough for DeepSeek to succeed outside China. In The EndDeepSeek-R1\u2019s emergence signals a major shift in the global AI horizon, with China solidifying its position as a leader in advanced technology. By offering a model that rivals the best in the industry\u2014at a fraction of the cost\u2014China is not only transforming its domestic AI ecosystem but also making a bold statement on the global stage. The ripple effects will touch industries like education, healthcare, software development, and customer service, pushing growth. More importantly, DeepSeek-R1 is forcing worldwide players to reconsider the way they\n\nSource: https://www.linkedin.com/pulse/deepseek-r1-next-leap-ai-reasoning-logical-inference-pandiya-fwlqe\nTitle: DeepSeek-R1: The Next Leap in AI Reasoning and Logical Inference\nContent: AI without prohibitive costs. This could lead to faster innovation in industries like finance, healthcare, and education. 2. Disrupting AI Hardware Demand DeepSeek\u2019s ability to develop high-performing AI models without massive computational resources challenges the dominance of GPU manufacturers like Nvidia. Future AI models may prioritize efficiency over sheer parameter size. 3. Strengthening China\u2019s AI Influence DeepSeek\u2019s rapid progress signals China\u2019s growing role in global AI research. U.S.-based AI labs may face increased competition, potentially leading to tighter AI regulations in Western countries. 4. Future of Reinforcement Learning in LLMs If DeepSeek-R1 continues to outperform traditional AI models, more companies may shift towards reinforcement learning approaches. This could reduce dependency on massive labeled datasets, leading to more generalizable AI systems. What\u2019s Next for DeepSeek? DeepSeek-R1 is just the beginning. Looking ahead, DeepSeek is likely to: Expand R1\u2019s\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses,\n\nSource: https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/\nTitle: Dominant Nvidia tested by DeepSeek | LinkedIn\nContent: on FP8, and a post-training re-architecture. While MoE models inherently offer efficiency advantages, V3's performance is particularly noteworthy. DeepSeek-R1 reinforcement learning reasoning model is benchmarked against OpenAI's GPT models. A key claim is that DeepSeek-R1 has eliminated the need for supervised fine-tuning, suggesting a novel approach to training. While DeepSeek's models demonstrate impressive performance and incorporate innovative techniques, including more standard optimization methods, there's ongoing discussion about the extent of DeepSeek's original contributions versus their reliance on existing open-source LLMs. The emergence of a low-cost, high-performing AI tool from China has sparked discussions on Wall Street about the long-term impact on the AI market and whether U.S. companies are overspending on AI development. Despite the technological advancements, some analysts like Wedbush's Ives express doubt about DeepSeek's adoption by major U.S. businesses,\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:48.105173",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: Babawomba 1 day ago | prev | next [\u2013]\n1 day ago\n| prev | next [\u2013]\nsuper cool to see an open initiative like this\u2014love the idea of replicating DeepSeek-R1 in a transparent way.I do like the idea of making these reasoning techniques accessible to everyone. If they really manage to replicate the results of DeepSeek-R1, especially on a smaller budget, that\u2019s a huge win for open-source AI.I\u2019m all for projects that push innovation and share the process with others, even if it\u2019s messy.But yeah\u2014lots of hurdles. They might hit a wall because they don\u2019t have DeepSeek\u2019s original datasets. reply\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: super cool to see an open initiative like this\u2014love the idea of replicating DeepSeek-R1 in a transparent way.I do like the idea of making these reasoning techniques accessible to everyone. If they really manage to replicate the results of DeepSeek-R1, especially on a smaller budget, that\u2019s a huge win for open-source AI.I\u2019m all for projects that push innovation and share the process with others, even if it\u2019s messy.But yeah\u2014lots of hurdles. They might hit a wall because they don\u2019t have DeepSeek\u2019s original datasets.\nI do like the idea of making these reasoning techniques accessible to everyone. If they really manage to replicate the results of DeepSeek-R1, especially on a smaller budget, that\u2019s a huge win for open-source AI.\nI\u2019m all for projects that push innovation and share the process with others, even if it\u2019s messy.\nBut yeah\u2014lots of hurdles. They might hit a wall because they don\u2019t have DeepSeek\u2019s original datasets.\nfl4tul4 1 day ago | prev | next [\u2013]\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: This nitpicking is pointless.DeepSeek's gifts to the world of its open weights, public research and OSS code of its SOTA models are all any reasonable person should expect given no organization is going to release their dataset and open themselves up to criticism and legal exposure.You shouldn't expect to any to see datasets behind any SOTA models until they're able to be synthetically generated from larger models. Models only trained on sanctioned \"public\" datasets are not going to perform as well which makes them a lot less interesting and practically useful.Yes it would be great for their to be open models containing original datasets and a working pipeline to recreate models from scratch. But when few people would even have the resources to train the models and the huge training costs just result in worse performing models, it's only academically interesting to a few research labs.Open model releases should be celebrated, not criticized with unreasonable nitpicking and\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: This nitpicking is pointless.DeepSeek's gifts to the world of its open weights, public research and OSS code of its SOTA models are all any reasonable person should expect given no organization is going to release their dataset and open themselves up to criticism and legal exposure.You shouldn't expect to any to see datasets behind any SOTA models until they're able to be synthetically generated from larger models. Models only trained on sanctioned \"public\" datasets are not going to perform as well which makes them a lot less interesting and practically useful.Yes it would be great for their to be open models containing original datasets and a working pipeline to recreate models from scratch. But when few people would even have the resources to train the models and the huge training costs just result in worse performing models, it's only academically interesting to a few research labs.Open model releases should be celebrated, not criticized with unreasonable nitpicking and\n\nSource: https://news.ycombinator.com/item?id=42823568\nTitle: DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via RL | Hacker News\nContent: as DeepSeek wasn't among China's major AI players before the R1 release, having maintained a relatively low profile. In fact, both DeepSeek-V2 and V3 had outperformed many competitors, I've seen some posts about that. However, these achievements received limited mainstream attention prior to their breakthrough release.\naprilthird2021 3 days ago | parent | prev | next [\u2013]\naprilthird2021 3 days ago | parent | prev | next [\u2013]\n3 days ago\n| parent | prev | next [\u2013]\n> If it turns out that you, in fact, don't need a gazillion GPUs to build SOTA models it destroys a lot of perceived value.Correct me if I'm wrong, but couldn't you take the optimization and tricks for training, inference, etc. from this model and apply to the Big Corps' huge AI data centers and get an even better model?I'll preface this by saying, better and better models may not actually unlock the economic value they are hoping for. It might be a thing where the last 10% takes 90% of the effort so to speak reply\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: cadamsdotcom 1 day ago | prev | next [\u2013]\n1 day ago\n| prev | next [\u2013]\nExciting to see this being reproduced, loving the hyper-fast movement in open source!This is exactly why it is not \u201cUS vs China\u201d, the battle is between heavily-capitalized Silicon Valley companies versus open source.Every believer in this tech owes DeepSeek some gratitude, but even they stand on shoulders of giants in the form of everyone else who pushed the frontier forward and chose to publish, rather than exploit, what they learned. reply\nExciting to see this being reproduced, loving the hyper-fast movement in open source!This is exactly why it is not \u201cUS vs China\u201d, the battle is between heavily-capitalized Silicon Valley companies versus open source.Every believer in this tech owes DeepSeek some gratitude, but even they stand on shoulders of giants in the form of everyone else who pushed the frontier forward and chose to publish, rather than exploit, what they learned.\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: DeepSeek's gifts to the world of its open weights, public research and OSS code of its SOTA models are all any reasonable person should expect given no organization is going to release their dataset and open themselves up to criticism and legal exposure.\nYou shouldn't expect to any to see datasets behind any SOTA models until they're able to be synthetically generated from larger models. Models only trained on sanctioned \"public\" datasets are not going to perform as well which makes them a lot less interesting and practically useful.\nYes it would be great for their to be open models containing original datasets and a working pipeline to recreate models from scratch. But when few people would even have the resources to train the models and the huge training costs just result in worse performing models, it's only academically interesting to a few research labs.\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: From that article:> The release of DeepSeek-R1 is an amazing boon for the community, but they didn\u2019t release everything\u2014although the model weights are open, the datasets and code used to train the model are not.> The goal of Open-R1 is to build these last missing pieces so that the whole research and industry community can build similar or better models using these recipes and datasets.\n> The release of DeepSeek-R1 is an amazing boon for the community, but they didn\u2019t release everything\u2014although the model weights are open, the datasets and code used to train the model are not.\n> The goal of Open-R1 is to build these last missing pieces so that the whole research and industry community can build similar or better models using these recipes and datasets.\nboznz 1 day ago | root | parent | next [\u2013]\nboznz 1 day ago | root | parent | next [\u2013]\n1 day ago\n| root | parent | next [\u2013]\n\nSource: https://news.ycombinator.com/item?id=42849536\nTitle: Open-R1: an open reproduction of DeepSeek-R1 | Hacker News\nContent: It also needs to incorporate some deduplication approach as I notice the same data is often repackaged with variations in format or specification.\nfreddealmeida 1 day ago | prev | next [\u2013]\nfreddealmeida 1 day ago | prev | next [\u2013]\n1 day ago\n| prev | next [\u2013]\nhow is this open vs whatdeepseek did? reply\nhow is this open vs whatdeepseek did?\nsimonw 1 day ago | parent | next [\u2013]\nsimonw 1 day ago | parent | next [\u2013]\n1 day ago\n| parent | next [\u2013]\nFrom that article:> The release of DeepSeek-R1 is an amazing boon for the community, but they didn\u2019t release everything\u2014although the model weights are open, the datasets and code used to train the model are not.> The goal of Open-R1 is to build these last missing pieces so that the whole research and industry community can build similar or better models using these recipes and datasets. reply\n\nSource: https://news.ycombinator.com/item?id=42823568\nTitle: DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via RL | Hacker News\nContent: 3 days ago\n| root | parent | prev | next [\u2013]\ni can\u2019t think of a single commercial use case, outside of education, where that\u2019s even relevant. But i agree it\u2019s messed up from an ethical / moral perspective. reply\ni can\u2019t think of a single commercial use case, outside of education, where that\u2019s even relevant. But i agree it\u2019s messed up from an ethical / moral perspective.\nbrookst 3 days ago | root | parent | next [\u2013]\nbrookst 3 days ago | root | parent | next [\u2013]\n3 days ago\n| root | parent | next [\u2013]\nWell those are the overt political biases. Would you trust DeepSeek to advise on negotiating with a Chinese business?I\u2019m no xenophobe, but seeing the internal reasoning of DeepSeek explicitly planning to ensure alignment with the government give me pause. reply\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:48.131490",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.014451039999999998",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:48.152874",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'Impact on the AI Industry: Cost Efficiency and Democratization'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:55.183352",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'Impact on the AI Industry: Cost Efficiency and Democratization'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:55.214148",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: Impact on the AI Industry: Cost Efficiency and Democratization...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:56.230896",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "relevant_contents_context",
+ "output": "\ud83d\udcc3 Title: Training Methodology and Efficiency\nContent: DeepSeek-V3's training process is remarkably efficient, both in terms of time and cost. The company reports a development cost of approximately $6 million, significantly lower than the development costs of many comparable large language models. (https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/) This cost-effectiveness is attributed to the model's efficient architecture and training methodology. DeepSeek utilizes a multi-stage training approach combining Supervised Fine-tuning (SFT) and Reinforcement Learning (RL). Specifically, they employ Group Relative Policy Optimization (GRPO), a more efficient alternative to Proximal Policy Optimization (PPO) and Detached Policy Optimization (DPO) for reinforcement learning. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA) This innovative training approach allows DeepSeek to achieve high performance with fewer computational\n\nTitle: Mixture of Experts (MoE) Architecture and its Advantages\nContent: This efficiency gain is particularly significant for large language models, which often contain hundreds of billions or even trillions of parameters. DeepSeek implemented a specialized load balancing loss function to ensure even utilization of experts across distributed hardware, further optimizing performance and preventing bottlenecks. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:15:56.286705",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'Impact on the AI Industry: Cost Efficiency and Democratization'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:21.618668",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'Impact on the AI Industry: Cost Efficiency and Democratization'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:21.647228",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Comparison with DeepSeek R1 and Other Models'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:21.671776",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:21.695917",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Comparison with DeepSeek R1 and Other Models...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:25.936242",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:29.720830",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['Deepseek v3 R1 model specifications, benchmarks, capabilities', 'Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features', 'Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost', 'Deepseek v3 R1 impact AI industry applications, research, trends']...",
+ "metadata": [
+ "Deepseek v3 R1 model specifications, benchmarks, capabilities",
+ "Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features",
+ "Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost",
+ "Deepseek v3 R1 impact AI industry applications, research, trends"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:29.746498",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek v3 R1 model specifications, benchmarks, capabilities'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:29.767524",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:29.789093",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:29.810199",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Deepseek v3 R1 impact AI industry applications, research, trends'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.341530",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\n",
+ "metadata": "https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.369225",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://slashdot.org/software/comparison/DeepSeek-R1-vs-DeepSeek-V3/\n",
+ "metadata": "https://slashdot.org/software/comparison/DeepSeek-R1-vs-DeepSeek-V3/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.390534",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.prompthackers.co/compare/deepseek-v3/deepseek-r1\n",
+ "metadata": "https://www.prompthackers.co/compare/deepseek-v3/deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.412184",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://venturebeat.com/ai/calm-down-deepseek-r1-is-great-but-chatgpts-product-advantage-is-far-from-over/\n",
+ "metadata": "https://venturebeat.com/ai/calm-down-deepseek-r1-is-great-but-chatgpts-product-advantage-is-far-from-over/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.436130",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/\n",
+ "metadata": "https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.448685",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:32.481500",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.063481",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.083514",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 3 new images from 4 total images",
+ "metadata": [
+ "https://venturebeat.com/wp-content/uploads/2025/01/IMG_0975.png?w=276",
+ "https://venturebeat.com/wp-content/uploads/2025/01/IMG_1030.png?w=276",
+ "https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.100156",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.123977",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek R1 vs Deepseek v3 R1 comparison performance, cost, features...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.290685",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\n",
+ "metadata": "https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.310544",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\n",
+ "metadata": "https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.335665",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777\n",
+ "metadata": "https://medium.com/@lmpo/exploring-deepseek-version-3-a-technical-deep-dive-0b3d2c78b777"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.353111",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://gradientflow.com/deepseek-what-you-need-to-know/\n",
+ "metadata": "https://gradientflow.com/deepseek-what-you-need-to-know/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.385775",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://docsbot.ai/models/deepseek-v3\n",
+ "metadata": "https://docsbot.ai/models/deepseek-v3"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.411864",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.431691",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.895446",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.920852",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 7 total images",
+ "metadata": [
+ "https://docsbot.ai/_next/static/media/docsbot-logo.5cd91e1f.svg",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175610.470.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175650.395.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T180050.778.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.940672",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:33.998813",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek v3 R1 model specifications, benchmarks, capabilities...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.214997",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bottr.me/blog/deepseek\n",
+ "metadata": "https://bottr.me/blog/deepseek"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.238914",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://venturebeat.com/ai/open-source-deepseek-r1-uses-pure-reinforcement-learning-to-match-openai-o1-at-95-less-cost/\n",
+ "metadata": "https://venturebeat.com/ai/open-source-deepseek-r1-uses-pure-reinforcement-learning-to-match-openai-o1-at-95-less-cost/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.264892",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/\n",
+ "metadata": "https://www.analyticsvidhya.com/blog/2025/01/deepseek-r1-vs-openai-o1/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.279941",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31\n",
+ "metadata": "https://medium.com/@mike.lydick/comparative-analysis-of-reasoning-approaches-openai-vs-deepseek-44e384b67b31"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.304806",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://nexustrade.io/blog/the-chinese-obliterated-openai-a-side-by-side-comparison-of-deepseek-r1-vs-openai-o1-for-finance-20250121\n",
+ "metadata": "https://nexustrade.io/blog/the-chinese-obliterated-openai-a-side-by-side-comparison-of-deepseek-r1-vs-openai-o1-for-finance-20250121"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.328443",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:34.353339",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:35.399510",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:35.419964",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 8 total images",
+ "metadata": [
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T173004.195.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172902.179.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172837.890.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172812.190.webp"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:35.441149",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:35.469203",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek v3 R1 vs OpenAI o1, Claude 3.5, other LLMs comparison reasoning, coding, math, cost...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.010702",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/\n",
+ "metadata": "https://felloai.com/2025/01/all-about-deepseek-the-rising-ai-powerhouse-challenging-industry-giants/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.045135",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/\n",
+ "metadata": "https://www.reuters.com/technology/artificial-intelligence/what-is-deepseek-why-is-it-disrupting-ai-sector-2025-01-27/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.066788",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\n",
+ "metadata": "https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.087568",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger\n",
+ "metadata": "https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.110365",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2\n",
+ "metadata": "https://pub.towardsai.net/the-deepseek-revolution-why-this-ai-model-is-outperforming-tech-giants-in-85-of-enterprise-tasks-8fa3fd1284a2"
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.132426",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:36.155059",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:38.321101",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:38.340349",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 4 total images",
+ "metadata": [
+ "https://felloai.com/wp-content/uploads/2025/01/All-About-DeepSeek-Company-and-their-revolutionary-R1-and-V3-models-that-are-disruption-AI-Industry.jpg",
+ "https://felloai.com/wp-content/uploads/2025/01/deepseek-officially-tops-the-appstore-v0-eb8nxvvptdfe1.jpeg-831x1024.webp",
+ "https://felloai.com/wp-content/uploads/2025/01/Screenshot-2025-01-27-at-11.28.00-1-1024x387.png",
+ "https://theoutpost.ai/_next/image/?url=https%3A%2F%2Fcdn.theoutpost.ai%2Ffiles%2Fnews_story_image_9972_149006_4c6fa4690b.jpeg&w=3840&q=20"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:38.366189",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:38.391596",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Deepseek v3 R1 impact AI industry applications, research, trends...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:16:55.071294",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: When were DeepSeek-R1 and DeepSeek-V3 released?\nWhen were DeepSeek-R1 and DeepSeek-V3 released?\nHow does DeepSeek-R1's context window compare to DeepSeek-V3's?\nHow does DeepSeek-R1's context window compare to DeepSeek-V3's?\nHow do DeepSeek-R1 and DeepSeek-V3's prices compare?\nHow do DeepSeek-R1 and DeepSeek-V3's prices compare?\nIs DeepSeek-R1 or DeepSeek-V3 open source?\nIs DeepSeek-R1 or DeepSeek-V3 open source?\nWhat is the maximum output length of DeepSeek-R1 compared to DeepSeek-V3?\nWhat is the maximum output length of DeepSeek-R1 compared to DeepSeek-V3?\nWhich providers offer DeepSeek-R1 and DeepSeek-V3?\nWhich providers offer DeepSeek-R1 and DeepSeek-V3?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Frequently Asked QuestionsWhat are the key differences between DeepSeek-R1 and DeepSeek-V3?When were DeepSeek-R1 and DeepSeek-V3 released?How does DeepSeek-R1's context window compare to DeepSeek-V3's?How do DeepSeek-R1 and DeepSeek-V3's prices compare?Is DeepSeek-R1 or DeepSeek-V3 open source?What is the maximum output length of DeepSeek-R1 compared to DeepSeek-V3?Which providers offer DeepSeek-R1 and DeepSeek-V3?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\nFrequently Asked Questions\nWhat are the key differences between DeepSeek-R1 and DeepSeek-V3?\nWhat are the key differences between DeepSeek-R1 and DeepSeek-V3?\nWhen were DeepSeek-R1 and DeepSeek-V3 released?\nWhen were DeepSeek-R1 and DeepSeek-V3 released?\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: More Model ComparisonsDeepSeek-R1 vs Claude Instant 1.2DeepSeek-R1 vs Claude 3.5 SonnetDeepSeek-R1 vs Claude 3.5 Sonnet (Oct 2024)DeepSeek-R1 vs Claude 3.5 Sonnet (Jun 2024)DeepSeek-R1 vs Claude 3 SonnetDeepSeek-R1 vs Claude 3 OpusDeepSeek-R1 vs Claude 3.5 HaikuDeepSeek-R1 vs Claude 3 HaikuDeepSeek-R1 vs Claude 2.1DeepSeek-R1 vs Claude 2DeepSeek-R1 vs Amazon Nova MicroDeepSeek-R1 vs Amazon Nova LiteDeepSeek-R1 vs Amazon Nova ProDeepSeek-R1 vs Command R+ (Aug 2024)DeepSeek-R1 vs Command R (Aug 2024)DeepSeek-R1 vs Gemma 2 27BDeepSeek-R1 vs Gemma 2 9BDeepSeek-R1 vs Gemini 1.0 UltraDeepSeek-R1 vs Gemini 1.0 ProDeepSeek-R1 vs Gemini 1.5 Pro (002)DeepSeek-R1 vs Gemini 1.5 Pro (001)DeepSeek-R1 vs Gemini 2.0 Flash Thinking (Experimental)DeepSeek-R1 vs Gemini 2.0 Flash (Experimental)DeepSeek-R1 vs Gemini 1.5 Flash (002)DeepSeek-R1 vs Gemini 1.5 Flash (001)DeepSeek-R1 vs Gemini 1.5 Flash-8BDeepSeek-R1 vs Llama 3.3 70B InstructDeepSeek-R1 vs Llama 3.2 90B Vision InstructDeepSeek-R1 vs Llama 3.2\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: More Model ComparisonsDeepSeek-R1 vs Claude Instant 1.2DeepSeek-R1 vs Claude 3.5 SonnetDeepSeek-R1 vs Claude 3.5 Sonnet (Oct 2024)DeepSeek-R1 vs Claude 3.5 Sonnet (Jun 2024)DeepSeek-R1 vs Claude 3 SonnetDeepSeek-R1 vs Claude 3 OpusDeepSeek-R1 vs Claude 3.5 HaikuDeepSeek-R1 vs Claude 3 HaikuDeepSeek-R1 vs Claude 2.1DeepSeek-R1 vs Claude 2DeepSeek-R1 vs Amazon Nova MicroDeepSeek-R1 vs Amazon Nova LiteDeepSeek-R1 vs Amazon Nova ProDeepSeek-R1 vs Command R+ (Aug 2024)DeepSeek-R1 vs Command R (Aug 2024)DeepSeek-R1 vs Gemma 2 27BDeepSeek-R1 vs Gemma 2 9BDeepSeek-R1 vs Gemini 1.0 UltraDeepSeek-R1 vs Gemini 1.0 ProDeepSeek-R1 vs Gemini 1.5 Pro (002)DeepSeek-R1 vs Gemini 1.5 Pro (001)DeepSeek-R1 vs Gemini 2.0 Flash Thinking (Experimental)DeepSeek-R1 vs Gemini 2.0 Flash (Experimental)DeepSeek-R1 vs Gemini 1.5 Flash (002)DeepSeek-R1 vs Gemini 1.5 Flash (001)DeepSeek-R1 vs Gemini 1.5 Flash-8BDeepSeek-R1 vs Llama 3.3 70B InstructDeepSeek-R1 vs Llama 3.2 90B Vision InstructDeepSeek-R1 vs Llama 3.2\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Frequently Asked QuestionsWhat are the key differences between DeepSeek-R1 and DeepSeek-V3?When were DeepSeek-R1 and DeepSeek-V3 released?How does DeepSeek-R1's context window compare to DeepSeek-V3's?How do DeepSeek-R1 and DeepSeek-V3's prices compare?Is DeepSeek-R1 or DeepSeek-V3 open source?What is the maximum output length of DeepSeek-R1 compared to DeepSeek-V3?Which providers offer DeepSeek-R1 and DeepSeek-V3?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: More Model Comparisons\nDeepSeek-R1 vs Claude Instant 1.2\nDeepSeek-R1 vs Claude 3.5 Sonnet\nDeepSeek-R1 vs Claude 3.5 Sonnet (Oct 2024)\nDeepSeek-R1 vs Claude 3.5 Sonnet (Jun 2024)\nDeepSeek-R1 vs Claude 3 Sonnet\nDeepSeek-R1 vs Claude 3 Opus\nDeepSeek-R1 vs Claude 3.5 Haiku\nDeepSeek-R1 vs Claude 3 Haiku\nDeepSeek-R1 vs Claude 2.1\nDeepSeek-R1 vs Claude 2\nDeepSeek-R1 vs Amazon Nova Micro\nDeepSeek-R1 vs Amazon Nova Lite\nDeepSeek-R1 vs Amazon Nova Pro\nDeepSeek-R1 vs Command R+ (Aug 2024)\nDeepSeek-R1 vs Command R (Aug 2024)\nDeepSeek-R1 vs Gemma 2 27B\nDeepSeek-R1 vs Gemma 2 9B\nDeepSeek-R1 vs Gemini 1.0 Ultra\nDeepSeek-R1 vs Gemini 1.0 Pro\nDeepSeek-R1 vs Gemini 1.5 Pro (002)\nDeepSeek-R1 vs Gemini 1.5 Pro (001)\nDeepSeek-R1 vs Gemini 2.0 Flash Thinking (Experimental)\nDeepSeek-R1 vs Gemini 2.0 Flash (Experimental)\nDeepSeek-R1 vs Gemini 1.5 Flash (002)\nDeepSeek-R1 vs Gemini 1.5 Flash (001)\nDeepSeek-R1 vs Gemini 1.5 Flash-8B\nDeepSeek-R1 vs Llama 3.3 70B Instruct\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: CompareDeepSeek-R1 vs DeepSeek-V3Get a detailed comparison of AI language models DeepSeek's DeepSeek-R1 and DeepSeek's DeepSeek-V3, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.\nDeepSeek-R1 vs DeepSeek-V3\nGet a detailed comparison of AI language models DeepSeek's DeepSeek-R1 and DeepSeek's DeepSeek-V3, including model features, token pricing, API costs, performance benchmarks, and real-world capabilities to help you choose the right LLM for your needs.\nHomeModelsCompareDeepSeek-R1 vs DeepSeek-V3\nDeepSeek-R1 vs DeepSeek-V3\nDeepSeek-R1 vs DeepSeek-V3\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nCompare performance metrics between DeepSeek-R1 and DeepSeek-V3. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: DeepSeek-R1 or DeepSeek-V3 open source?What is the maximum output length of DeepSeek-R1 compared to DeepSeek-V3?Which providers offer DeepSeek-R1 and DeepSeek-V3?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?How do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\n\nSource: https://docsbot.ai/models/compare/deepseek-r1/deepseek-v3\nTitle: DeepSeek-R1 vs DeepSeek-V3 - Detailed Performance & Feature Comparison\nContent: How do DeepSeek-R1 and DeepSeek-V3 compare on the MMLU-Pro benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the GPQA benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\nHow do DeepSeek-R1 and DeepSeek-V3 compare on the IFEval benchmark?\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:02.871531",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: the top 500 US students in the AIME (American Invitational Mathematics Examination)7. Coding and Programming DeepSeek R1: Scores 96.3% in the Codeforces competition, slightly behind OpenAI o1 (96.6%) but ahead of Llama 3.1 (89%)415.OpenAI o1: Demonstrates proficiency in code generation and debugging, with a ranking in the 89th percentile on Codeforces7. Logical Problem-Solving DeepSeek R1: Uses a chain-of-thought approach to verify intermediate steps, making it highly effective for tasks requiring deep reasoning11.OpenAI o1: Leverages reasoning tokens to break down tasks and generate refined outputs7. 4.\u00c2 Cost Efficiency and Accessibility Pricing DeepSeek R1: Costs\u00c2 0.14permillioninputtokens(cachehit)and0.14permillioninputtokens(cachehit)and2.19 per million output tokens, making it 97% cheaper than Claude Sonnet 3.5 and 93% cheaper than OpenAI o124.OpenAI o1: Priced at\u00c2 1.50\u00e2\u0080\u00931.50\u00e2\u0080\u009360 per million input tokens and $60 per million output tokens, reflecting its premium capabilities2.\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: 37 billion per token. This design ensures efficiency and scalability411.OpenAI o1: Features a monolithic architecture with a 200,000-token context window, optimized for scientific reasoning and coding tasks7.Claude Sonnet 3.5: Focuses on ethical alignment and safety, using supervised fine-tuning and reinforcement learning with human feedback (RLHF)2. Training Approach DeepSeek R1: Employs reinforcement learning (RL) with minimal supervised data, emphasizing reasoning and interpretability24.OpenAI o1: Combines supervised fine-tuning (SFT) and RLHF for versatility and alignment7.Meta Llama 3.1: Uses a multilingual training approach, focusing on general-purpose tasks14. 3.\u00c2 Performance Benchmarks Mathematical Reasoning DeepSeek R1: Achieves 97.3% on the MATH-500 benchmark, outperforming OpenAI o1 (96.4%) and Llama 3.1 (69.3%)415.OpenAI o1: Excels in complex equations and ranks among the top 500 US students in the AIME (American Invitational Mathematics Examination)7. Coding and\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: outputs2. 6.\u00c2 User Experiences and Applications DeepSeek R1 Strengths: Praised for its \u00e2\u0080\u009cthinking out loud\u00e2\u0080\u009d approach, providing visibility into its reasoning process11.Weaknesses: Some users report slower processing speeds for specific tasks7. OpenAI o1 Strengths: Excels in high-stakes academic and professional tasks, with detailed explanations7.Weaknesses: Higher computational costs and slower response times7. Claude Sonnet 3.5 Strengths: Balanced reasoning and ethical alignment make it ideal for safety-critical applications2.Weaknesses: Limited versatility compared to DeepSeek R1 and OpenAI o12. 7.\u00c2 Future Implications and Industry Impact DeepSeek R1\u00e2\u0080\u0099s open-source nature and cost efficiency could democratize AI development, enabling smaller teams to compete with tech giants. Its success despite US export controls highlights the importance of resource efficiency and innovation1215.OpenAI o1, while proprietary, continues to set benchmarks in scientific reasoning and coding. Its\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: OpenAI o1: Leverages reasoning tokens to break down tasks and generate refined outputs7.\n: Leverages reasoning tokens to break down tasks and generate refined outputs\n4.\u00c2 Cost Efficiency and Accessibility\n4.\u00c2 Cost Efficiency and Accessibility\n4.\u00c2 Cost Efficiency and Accessibility\n4.\u00c2 Cost Efficiency and Accessibility\nCost Efficiency and Accessibility\nDeepSeek R1: Costs\u00c2 0.14permillioninputtokens(cachehit)and0.14permillioninputtokens(cachehit)and2.19 per million output tokens, making it 97% cheaper than Claude Sonnet 3.5 and 93% cheaper than OpenAI o124.OpenAI o1: Priced at\u00c2 1.50\u00e2\u0080\u00931.50\u00e2\u0080\u009360 per million input tokens and $60 per million output tokens, reflecting its premium capabilities2.\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: The competition between these models is driving rapid advancements in AI, benefiting researchers, developers, and end-users alike.\nDeepSeek R1 and OpenAI o1 represent two distinct approaches to advancing AI capabilities. While DeepSeek R1 excels in cost efficiency, accessibility, and reasoning tasks, OpenAI o1 leads in scientific reasoning and coding benchmarks. Other models like Claude Sonnet 3.5 and Meta Llama 3.1 offer unique strengths in ethical alignment and multilingual capabilities.\nDeepSeek R1 and OpenAI o1 represent two distinct approaches to advancing AI capabilities. While DeepSeek R1 excels in cost efficiency, accessibility, and reasoning tasks, OpenAI o1 leads in scientific reasoning and coding benchmarks. Other models like Claude Sonnet 3.5 and Meta Llama 3.1 offer unique strengths in ethical alignment and multilingual capabilities.\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: DeepSeek R1 and OpenAI o1 represent two distinct approaches to advancing AI capabilities. While DeepSeek R1 excels in cost efficiency, accessibility, and reasoning tasks, OpenAI o1 leads in scientific reasoning and coding benchmarks. Other models like Claude Sonnet 3.5 and Meta Llama 3.1 offer unique strengths in ethical alignment and multilingual capabilities.\nAs the AI landscape evolves, the choice between these models will depend on specific use cases, budget constraints, and the need for customization. DeepSeek R1\u00e2\u0080\u0099s open-source model and affordability make it a game-changer, while OpenAI o1\u00e2\u0080\u0099s advanced features ensure its place at the forefront of AI innovation.\nAs the AI landscape evolves, the choice between these models will depend on specific use cases, budget constraints, and the need for customization. DeepSeek R1\u00e2\u0080\u0099s open-source model and affordability make it a game-changer, while OpenAI o1\u00e2\u0080\u0099s advanced features ensure its place at the forefront of AI innovation.\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: per million input tokens and $60 per million output tokens, reflecting its premium capabilities2. Accessibility DeepSeek R1: Open-source under an MIT license, allowing researchers to study, modify, and build on the model411.OpenAI o1: Proprietary, with limited access through pay-per-use APIs7. 5.\u00c2 Unique Features and Innovations DeepSeek R1 Open-Source Nature: Enables widespread adoption and customization411.Distilled Models: Offers six smaller versions (1.5B to 70B parameters) for local deployment and specific use cases4.Reinforcement Learning: Focuses on reasoning and interpretability, reducing reliance on supervised data2. OpenAI o1 Chain-of-Thought Mechanism: Enhances logical coherence and problem-solving accuracy7.Vision API Integration: Supports image analysis, expanding its application scope7. Claude Sonnet 3.5 Ethical Alignment: Prioritizes safety and ethical considerations in AI outputs2. 6.\u00c2 User Experiences and Applications DeepSeek R1 Strengths: Praised for its \u00e2\u0080\u009cthinking\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: o1, while proprietary, continues to set benchmarks in scientific reasoning and coding. Its integration with vision APIs and other advanced features ensures its relevance in high-stakes applications7.The competition between these models is driving rapid advancements in AI, benefiting researchers, developers, and end-users alike. 8.\u00c2 Conclusion DeepSeek R1 and OpenAI o1 represent two distinct approaches to advancing AI capabilities. While DeepSeek R1 excels in cost efficiency, accessibility, and reasoning tasks, OpenAI o1 leads in scientific reasoning and coding benchmarks. Other models like Claude Sonnet 3.5 and Meta Llama 3.1 offer unique strengths in ethical alignment and multilingual capabilities.As the AI landscape evolves, the choice between these models will depend on specific use cases, budget constraints, and the need for customization. DeepSeek R1\u00e2\u0080\u0099s open-source model and affordability make it a game-changer, while OpenAI o1\u00e2\u0080\u0099s advanced features ensure its place at the\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: DeepSeek R1: Costs\u00c2 0.14permillioninputtokens(cachehit)and0.14permillioninputtokens(cachehit)and2.19 per million output tokens, making it 97% cheaper than Claude Sonnet 3.5 and 93% cheaper than OpenAI o124.OpenAI o1: Priced at\u00c2 1.50\u00e2\u0080\u00931.50\u00e2\u0080\u009360 per million input tokens and $60 per million output tokens, reflecting its premium capabilities2.\nDeepSeek R1: Costs\u00c2 0.14permillioninputtokens(cachehit)and0.14permillioninputtokens(cachehit)and2.19 per million output tokens, making it 97% cheaper than Claude Sonnet 3.5 and 93% cheaper than OpenAI o124.\n2.19 per million output tokens, making it 97% cheaper than Claude Sonnet 3.5 and 93% cheaper than OpenAI o1\nOpenAI o1: Priced at\u00c2 1.50\u00e2\u0080\u00931.50\u00e2\u0080\u009360 per million input tokens and $60 per million output tokens, reflecting its premium capabilities2.\n: Priced at\n60 per million input tokens and $60 per million output tokens, reflecting its premium capabilities\n\nSource: https://bottr.me/blog/deepseek\nTitle: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs\nContent: DeepSeek R1 vs OpenAI o1 vs Other Leading LLMs: A Comprehensive Comparison The AI landscape is evolving rapidly, with new models pushing the boundaries of reasoning, coding, and problem-solving capabilities. Among the latest entrants, DeepSeek R1 has emerged as a formidable competitor to OpenAI\u00e2\u0080\u0099s o1 and other leading large language models (LLMs). This blog provides a detailed comparison of DeepSeek R1, OpenAI o1, and other prominent models, analyzing their technical specifications, performance benchmarks, cost efficiency, and unique features. 1.\u00c2 Introduction to the Contenders DeepSeek R1 DeepSeek R1, released in January 2025, is a reasoning-focused LLM developed by the Chinese AI startup DeepSeek. Built on the DeepSeek V3 architecture, it emphasizes logical reasoning, problem-solving, and interpretability. With 671 billion parameters and a Mixture-of-Experts (MoE) design, it activates only 37 billion parameters per token, ensuring efficiency. The model is open-source under an MIT\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:04.447795",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\nCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-V3DeepSeek-R1MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more88.5%EMSource90.8%Pass@1SourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity75.9%EMSource84%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmark88.9%10-shotSourceNot availableHumanEvalEvaluates code generation and problem-solving capabilities82.6%pass@1SourceNot availableMATHTests mathematical problem-solving abilities across various difficulty levels61.6%4-shotSourceNot availableGPQATests PhD-level knowledge in\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: Model PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-V3DeepSeek-R1MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more88.5%EMSource90.8%Pass@1SourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity75.9%EMSource84%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmark88.9%10-shotSourceNot availableHumanEvalEvaluates code generation and problem-solving capabilities82.6%pass@1SourceNot availableMATHTests mathematical problem-solving abilities across various difficulty levels61.6%4-shotSourceNot availableGPQATests PhD-level knowledge in\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: Price ComparisonCost comparison with other models (per million tokens).Scale:LogarithmicLinearInput Token CostsOutput Token CostsCalculate and Compare Model PricesModel PerformanceBenchmark ComparisonCompare performance metrics between DeepSeek-V3 and DeepSeek-R1. See how each model performs on key benchmarks measuring reasoning, knowledge and capabilities.BenchmarkDeepSeek-V3DeepSeek-R1MMLUMassive Multitask Language Understanding - Tests knowledge across 57 subjects including mathematics, history, law, and more88.5%EMSource90.8%Pass@1SourceMMLU-ProA more robust MMLU benchmark with harder, reasoning-focused questions, a larger choice set, and reduced prompt sensitivity75.9%EMSource84%EMSourceMMMUMassive Multitask Multimodal Understanding - Tests understanding across text, images, audio, and videoNot availableNot availableHellaSwagA challenging sentence completion benchmark88.9%10-shotSourceNot availableHumanEvalEvaluates code generation and problem-solving\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: benchmark88.9%10-shotSourceNot availableHumanEvalEvaluates code generation and problem-solving capabilities82.6%pass@1SourceNot availableMATHTests mathematical problem-solving abilities across various difficulty levels61.6%4-shotSourceNot availableGPQATests PhD-level knowledge in chemistry, biology, and physics through multiple choice questions that require deep domain expertise59.1%pass@1Source71.5%Pass@1SourceIFEvalTests model's ability to accurately follow explicit formatting instructions, generate appropriate outputs, and maintain consistent instruction adherence across different tasks86.1%Prompt StrictSource83.3%Prompt StrictSourceFrequently Asked QuestionsWhat are the key differences between DeepSeek-V3 and DeepSeek-R1?When were DeepSeek-V3 and DeepSeek-R1 released?How does DeepSeek-V3's context window compare to DeepSeek-R1's?How do DeepSeek-V3 and DeepSeek-R1's prices compare?Is DeepSeek-V3 or DeepSeek-R1 open source?What is the maximum output length of DeepSeek-V3 compared to\n\nSource: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\nTitle: DeepSeek V3: $5.5M Trained Model Beats GPT-4o & Llama 3.1\nContent: length are meticulously controlled to ensure versatility and consistency across tasks. 4. Unmatched Performance and Stability Extensive evaluations confirm that DeepSeek V3 outperforms all open-source models and rivals leading closed-source AI systems. Despite its massive scale and complexity, the training process was exceptionally stable, with no irrecoverable loss spikes or rollbacks throughout the entire cycle. DeepSeek V3 is a testament to the power of innovation and collaboration, offering developers and researchers a powerful, scalable, and cost-effective tool to tackle a wide range of challenges in AI and beyond. Its open-source nature ensures accessibility, paving the way for breakthroughs in coding, reasoning, and multi-modal applications. Here are the links to download: ModelTotal ParametersContext LengthDownloadDeepSeek-V3-Base671B128KHuggingFaceDeepSeek-V3671B128KHuggingFace Evaluation of DeepSeek V3 on Different Benchmarks Benchmarks Evaluated MMLU-Pro (Exact Match \u2013 EM):\n\nSource: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\nTitle: DeepSeek V3: $5.5M Trained Model Beats GPT-4o & Llama 3.1\nContent: length are meticulously controlled to ensure versatility and consistency across tasks. 4. Unmatched Performance and Stability Extensive evaluations confirm that DeepSeek V3 outperforms all open-source models and rivals leading closed-source AI systems. Despite its massive scale and complexity, the training process was exceptionally stable, with no irrecoverable loss spikes or rollbacks throughout the entire cycle. DeepSeek V3 is a testament to the power of innovation and collaboration, offering developers and researchers a powerful, scalable, and cost-effective tool to tackle a wide range of challenges in AI and beyond. Its open-source nature ensures accessibility, paving the way for breakthroughs in coding, reasoning, and multi-modal applications. Here are the links to download: ModelTotal ParametersContext LengthDownloadDeepSeek-V3-Base671B128KHuggingFaceDeepSeek-V3671B128KHuggingFace Evaluation of DeepSeek V3 on Different Benchmarks Benchmarks Evaluated MMLU-Pro (Exact Match \u2013 EM):\n\nSource: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\nTitle: DeepSeek V3: $5.5M Trained Model Beats GPT-4o & Llama 3.1\nContent: length are meticulously controlled to ensure versatility and consistency across tasks. 4. Unmatched Performance and Stability Extensive evaluations confirm that DeepSeek V3 outperforms all open-source models and rivals leading closed-source AI systems. Despite its massive scale and complexity, the training process was exceptionally stable, with no irrecoverable loss spikes or rollbacks throughout the entire cycle. DeepSeek V3 is a testament to the power of innovation and collaboration, offering developers and researchers a powerful, scalable, and cost-effective tool to tackle a wide range of challenges in AI and beyond. Its open-source nature ensures accessibility, paving the way for breakthroughs in coding, reasoning, and multi-modal applications. Here are the links to download: ModelTotal ParametersContext LengthDownloadDeepSeek-V3-Base671B128KHuggingFaceDeepSeek-V3671B128KHuggingFace Evaluation of DeepSeek V3 on Different Benchmarks Benchmarks Evaluated MMLU-Pro (Exact Match \u2013 EM):\n\nSource: https://www.analyticsvidhya.com/blog/2024/12/deepseek-v3/\nTitle: DeepSeek V3: $5.5M Trained Model Beats GPT-4o & Llama 3.1\nContent: 3. Post-Training Enhancements: Knowledge Distillation for Reasoning Mastery\nDeepSeek V3 integrates an innovative knowledge distillation pipeline, leveraging reasoning capabilities from DeepSeek R1 series models. This pipeline incorporates advanced verification and reflection patterns into the model, dramatically improving its reasoning performance. Additionally, the output style and length are meticulously controlled to ensure versatility and consistency across tasks.\n4. Unmatched Performance and Stability\nExtensive evaluations confirm that DeepSeek V3 outperforms all open-source models and rivals leading closed-source AI systems. Despite its massive scale and complexity, the training process was exceptionally stable, with no irrecoverable loss spikes or rollbacks throughout the entire cycle.\n\nSource: https://docsbot.ai/models/compare/deepseek-v3/deepseek-r1\nTitle: DeepSeek-V3 vs DeepSeek-R1 - Detailed Performance & Feature Comparison\nContent: per token. It features innovative load balancing and multi-token prediction, trained on 14.8T tokens. The model achieves state-of-the-art performance across benchmarks while maintaining efficient training costs of only 2.788M H800 GPU hours. It incorporates reasoning capabilities distilled from DeepSeek-R1 and supports a 128K context window.DeepSeek-R1DeepSeek-R1 is a 671B parameter Mixture-of-Experts (MoE) model with 37B activated parameters per token, trained via large-scale reinforcement learning with a focus on reasoning capabilities. It incorporates two RL stages for discovering improved reasoning patterns and aligning with human preferences, along with two SFT stages for seeding reasoning and non-reasoning capabilities. The model achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks.Model OverviewFeatureDeepSeek-V3DeepSeek-R1Input Context WindowThe number of tokens supported by the input context window.128Ktokens128KtokensMaximum Output TokensThe\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:18.091829",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: v3 will play a crucial role in driving advancements in accessibility, collaboration, and technical innovation. By combining innovative features with an open-weight design, DeepSeek v3 sets a new standard for what open source AI can achieve. Its ability to deliver high performance at a fraction of the cost of proprietary models makes it a compelling choice for developers, researchers, and organizations worldwide. As challenges are addressed and the model continues to evolve, its impact on the AI ecosystem is likely to grow, shaping the future of artificial intelligence for years to come.\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: v3 will play a crucial role in driving advancements in accessibility, collaboration, and technical innovation. By combining innovative features with an open-weight design, DeepSeek v3 sets a new standard for what open source AI can achieve. Its ability to deliver high performance at a fraction of the cost of proprietary models makes it a compelling choice for developers, researchers, and organizations worldwide. As challenges are addressed and the model continues to evolve, its impact on the AI ecosystem is likely to grow, shaping the future of artificial intelligence for years to come.\n\nSource: https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger\nTitle: DeepSeek V3: A 685B-Parameter Open-Source AI Powerhouse\nContent: Forward Future Takeaways:DeepSeek V3's release marks a significant milestone in the AI industry, showcasing that high-performance models can be developed efficiently and cost-effectively. Its open-source nature is poised to accelerate innovation and competition, potentially leading to more accessible and advanced AI applications across various sectors. However, the model's compliance with Chinese regulatory standards highlights the ongoing challenges of balancing technological advancement with governmental policies. \u2192 Read the full article here.\nForward Future Takeaways:\n\nSource: https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger\nTitle: DeepSeek V3: A 685B-Parameter Open-Source AI Powerhouse\nContent: Forward Future Takeaways:\nDeepSeek V3's release marks a significant milestone in the AI industry, showcasing that high-performance models can be developed efficiently and cost-effectively. Its open-source nature is poised to accelerate innovation and competition, potentially leading to more accessible and advanced AI applications across various sectors. However, the model's compliance with Chinese regulatory standards highlights the ongoing challenges of balancing technological advancement with governmental policies. \u2192\nRead the full article here\n\nSource: https://www.forwardfuture.ai/p/deepseek-s-open-source-ai-model-emerges-as-a-top-challenger\nTitle: DeepSeek V3: A 685B-Parameter Open-Source AI Powerhouse\nContent: Forward Future Takeaways:DeepSeek V3's release marks a significant milestone in the AI industry, showcasing that high-performance models can be developed efficiently and cost-effectively. Its open-source nature is poised to accelerate innovation and competition, potentially leading to more accessible and advanced AI applications across various sectors. However, the model's compliance with Chinese regulatory standards highlights the ongoing challenges of balancing technological advancement with governmental policies. \u2192 Read the full article here.\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: in AI deployment and use. Hardware requirements for running such a large model efficiently [5]. Additionally, being a Chinese company, DeepSeek is subject to regulatory oversight, which may influence certain model responses on sensitive topics [5]. Future Implications DeepSeek V3's success highlights the potential of open-source innovation in AI. As the model continues to evolve and address challenges, it could significantly impact the AI ecosystem, driving advancements in accessibility, collaboration, and technical innovation [1][4]. This development may also intensify competition in the AI industry, potentially leading to more rapid advancements and reduced costs for AI technologies [4][5].\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: These technical advancements highlight the innovative approach behind DeepSeek v3, setting a benchmark for future AI development practices. By addressing the challenges of scalability and cost, it paves the way for more accessible and efficient AI solutions. Independent benchmarks reveal that DeepSeek v3 performs on par with or surpasses proprietary models in several critical domains. Its performance highlights include: These results underscore the model's potential for applications requiring nuanced decision-making, problem-solving, and technical expertise. Its ability to deliver consistent results across diverse tasks makes it a reliable choice for both research and industry use. DeepSeek v3 is designed with accessibility and flexibility in mind, offering multiple ways for users to interact with and deploy the model. Key features include: This accessibility makes DeepSeek v3 an attractive option for developers, researchers, and organizations looking to explore new use cases or\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: These technical advancements highlight the innovative approach behind DeepSeek v3, setting a benchmark for future AI development practices. By addressing the challenges of scalability and cost, it paves the way for more accessible and efficient AI solutions. Independent benchmarks reveal that DeepSeek v3 performs on par with or surpasses proprietary models in several critical domains. Its performance highlights include: These results underscore the model's potential for applications requiring nuanced decision-making, problem-solving, and technical expertise. Its ability to deliver consistent results across diverse tasks makes it a reliable choice for both research and industry use. DeepSeek v3 is designed with accessibility and flexibility in mind, offering multiple ways for users to interact with and deploy the model. Key features include: This accessibility makes DeepSeek v3 an attractive option for developers, researchers, and organizations looking to explore new use cases or\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: crucial role in driving advancements in accessibility, collaboration, and technical innovation. By combining innovative features with an open-weight design, DeepSeek v3 sets a new standard for what open source AI can achieve. Its ability to deliver high performance at a fraction of the cost of proprietary models makes it a compelling choice for developers, researchers, and organizations worldwide. As challenges are addressed and the model continues to evolve, its impact on the AI ecosystem is likely to grow, shaping the future of artificial intelligence for years to come.\n\nSource: https://theoutpost.ai/news-story/deep-seek-v3-a-powerful-open-source-ai-model-challenges-industry-leaders-9972/\nTitle: DeepSeek V3: Open-Source AI Model Challenges Industry Giants with Impressive Performance\nContent: practices. By addressing the challenges of scalability and cost, it paves the way for more accessible and efficient AI solutions. Independent benchmarks reveal that DeepSeek v3 performs on par with or surpasses proprietary models in several critical domains. Its performance highlights include: These results underscore the model's potential for applications requiring nuanced decision-making, problem-solving, and technical expertise. Its ability to deliver consistent results across diverse tasks makes it a reliable choice for both research and industry use. DeepSeek v3 is designed with accessibility and flexibility in mind, offering multiple ways for users to interact with and deploy the model. Key features include: This accessibility makes DeepSeek v3 an attractive option for developers, researchers, and organizations looking to explore new use cases or enhance existing systems. Its flexibility ensures that it can be seamlessly integrated into a wide range of applications. While\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:18.119937",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.019601880000000002",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:18.145091",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "generating_draft_sections",
+ "output": "\ud83d\udcd1 Generating draft section titles for 'Comparison with DeepSeek R1 and Other Models'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:24.672900",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "draft_sections_generated",
+ "output": "\ud83d\uddc2\ufe0f Draft section titles generated for 'Comparison with DeepSeek R1 and Other Models'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:24.698477",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_relevant_written_content",
+ "output": "\ud83d\udd0e Getting relevant written content based on query: Comparison with DeepSeek R1 and Other Models...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:25.836035",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "relevant_contents_context",
+ "output": "\ud83d\udcc3 Title: Redefining Cost-Performance Ratios in Large Language Models\nContent: DeepSeek-R1's development cost of approximately $6 million (https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/) significantly challenges the prevailing notion that cutting-edge AI requires exorbitant expenditure. This contrasts sharply with the estimated $100 million development cost of OpenAI's GPT-4 (https://mashable.com/article/what-ai-experts-saying-about-deepseek-r1), highlighting DeepSeek's disruptive approach to cost efficiency. This achievement is attributed not only to architectural innovations like the Mixture of Experts (MoE) and Multihead Latent Attention (MLA) but also to the strategic application of reinforcement learning with Group Relative Policy Optimization (GRPO) (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA). This combination allows DeepSeek to achieve comparable or superior performance to its competitors while drastically reducing the financial\n\nTitle: Multi-Token Prediction (MTP) and FP8 Quantization: Enhancing Throughput and Memory Efficiency\nContent: achieves this by employing adaptive bit-width scaling and loss-aware quantization techniques, ensuring stability and minimizing performance degradation. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)\n\nTitle: Performance Benchmarks and Comparisons\nContent: DeepSeek-V3 boasts impressive performance across various benchmarks. In the English Massive Multitask Language Understanding (MMLU) benchmark, it achieves an accuracy of 88.5%, surpassing several other leading large language models. (https://play.ht/blog/deepseek-vs-claude-vs-llama-vs-chatgpt/) On the HumanEval-Mul coding benchmark, it achieves a pass rate of 82.6%, demonstrating its strong coding capabilities. These results indicate that DeepSeek-V3's architectural innovations, combined with its efficient training methodology, translate into tangible performance gains. It\u2019s important to note that while these benchmarks provide valuable insights, they should be interpreted with caution, as factors like data selection and evaluation metrics can influence the results. Furthermore, comparisons across different models should consider variations in training data, model size, and evaluation protocols.\n\nTitle: Training Methodology and Efficiency\nContent: innovative training approach allows DeepSeek to achieve high performance with fewer computational resources, contributing to the model's overall efficiency. DeepSeek-V3's training data comprises 14.8 trillion tokens, a substantial dataset that contributes to its broad knowledge base and strong performance across various tasks. The combination of a large training dataset, efficient architecture, and innovative training methodology positions DeepSeek-V3 as a highly competitive model in the large language model landscape.\n\nTitle: Potential Geopolitical Implications and Market Dynamics\nContent: DeepSeek-R1's emergence as a strong contender in the AI landscape has geopolitical implications, particularly concerning the balance of power in AI development. Its origin in China challenges the dominance of U.S.-based companies like OpenAI and Google, potentially leading to a more multipolar AI landscape. This shift could influence international collaborations, data sharing agreements, and the development of AI regulations. Furthermore, DeepSeek's cost-effective approach could pressure established players to re-evaluate their pricing strategies and invest in more efficient training methodologies. This increased competition could ultimately benefit consumers and businesses by driving down the cost of AI services and accelerating the development of more accessible and powerful AI solutions. However, concerns about data security, intellectual property, and potential biases in models trained on specific datasets remain important considerations as the global AI landscape evolves.\n\nTitle: Mixture of Experts (MoE) Architecture and its Advantages\nContent: DeepSeek-V3 employs a Mixture of Experts (MoE) architecture, a crucial element contributing to its efficiency and performance. Unlike traditional monolithic models, MoE divides the model into a collection of \"expert\" networks, each specializing in different aspects of the data. For each input token, a \"gating network\" decides which experts are most relevant and activates only those, leaving the rest dormant. This selective activation drastically reduces computational costs during inference, as only a fraction of the model's parameters are engaged for each token. (https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/) DeepSeek claims this approach makes V3 10x more efficient than some peers and 3-7x better considering other innovations. (https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/?utm_source=rss&utm_campaign=storylines_en) This efficiency gain is particularly significant for large language models, which often contain\n\nTitle: Mixture of Experts (MoE) Architecture and its Advantages\nContent: This efficiency gain is particularly significant for large language models, which often contain hundreds of billions or even trillions of parameters. DeepSeek implemented a specialized load balancing loss function to ensure even utilization of experts across distributed hardware, further optimizing performance and preventing bottlenecks. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA)\n\nTitle: Fostering Innovation in Resource-Constrained Environments\nContent: DeepSeek-R1's efficiency opens up new possibilities for AI deployment in resource-constrained environments, such as edge devices and mobile platforms. Its optimized architecture and reduced computational demands make it suitable for applications where processing power and memory are limited. This expands the potential reach of AI beyond traditional data centers, enabling innovative applications in areas like IoT, mobile computing, and on-device personalized AI experiences. This focus on efficiency could drive the development of specialized hardware and software solutions tailored for resource-constrained deployments, further accelerating the adoption of AI in diverse contexts.\n\nTitle: Training Methodology and Efficiency\nContent: DeepSeek-V3's training process is remarkably efficient, both in terms of time and cost. The company reports a development cost of approximately $6 million, significantly lower than the development costs of many comparable large language models. (https://www.linkedin.com/news/story/dominant-nvidia-tested-by-deepseek-7138610/) This cost-effectiveness is attributed to the model's efficient architecture and training methodology. DeepSeek utilizes a multi-stage training approach combining Supervised Fine-tuning (SFT) and Reinforcement Learning (RL). Specifically, they employ Group Relative Policy Optimization (GRPO), a more efficient alternative to Proximal Policy Optimization (PPO) and Detached Policy Optimization (DPO) for reinforcement learning. (https://www.linkedin.com/posts/philipp-schmid-a6a2bb196_does-deepseek-impact-how-the-next-iteration-activity-7290291368923459584-XpcA) This innovative training approach allows DeepSeek to achieve high performance with fewer computational\n\nTitle: Open-Source Paradigm Shift and Collaborative Development\nContent: DeepSeek-R1's open-source nature under the MIT license (https://arbisoft.com/blogs/deep-seek-r1-the-chinese-ai-powerhouse-outperforming-open-ai-s-o1-at-95-less-cost) represents a significant departure from the closed-source models prevalent in the industry. This open approach fosters community involvement, allowing researchers and developers to scrutinize, modify, and build upon the model's architecture and training methods. This transparency promotes rapid iteration and collaborative innovation, potentially accelerating the overall pace of AI development. While previous open-source LLMs have existed, DeepSeek-R1's competitive performance combined with its open availability distinguishes it as a potential catalyst for a broader shift towards community-driven AI development. This open-source strategy also democratizes access to advanced AI capabilities, empowering smaller companies and individual researchers who may lack the resources to develop such models independently.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:25.884118",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'Comparison with DeepSeek R1 and Other Models'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:34.475598",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'Comparison with DeepSeek R1 and Other Models'",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:34.504828",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_conclusion",
+ "output": "\u270d\ufe0f Writing conclusion for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-01-29T22:17:38.761218",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "conclusion_written",
+ "output": "\ud83d\udcdd Conclusion written for 'Give me a detailed research report about Deepseek v3 R1 model and how its impacting the AI industry. '",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "images",
+ "output": "[\"https://venturebeat.com/wp-content/uploads/2025/01/IMG_0975.png?w=276\", \"https://venturebeat.com/wp-content/uploads/2025/01/IMG_1030.png?w=276\", \"https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/\", \"https://docsbot.ai/_next/static/media/docsbot-logo.5cd91e1f.svg\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175610.470.webp\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175650.395.webp\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T180050.778.webp\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T173004.195.webp\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172902.179.webp\", \"https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172837.890.webp\"]",
+ "content": "selected_images",
+ "metadata": [
+ "https://venturebeat.com/wp-content/uploads/2025/01/IMG_0975.png?w=276",
+ "https://venturebeat.com/wp-content/uploads/2025/01/IMG_1030.png?w=276",
+ "https://www.zdnet.com/article/i-tested-deepseeks-r1-and-v3-coding-skills-and-were-not-all-doomed-yet/",
+ "https://docsbot.ai/_next/static/media/docsbot-logo.5cd91e1f.svg",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175610.470.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T175650.395.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2024/12/unnamed-2024-12-27T180050.778.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T173004.195.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172902.179.webp",
+ "https://cdn.analyticsvidhya.com/wp-content/uploads/2025/01/unnamed-2025-01-21T172837.890.webp"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738538904_How did world acceptance corp perform during its december ending period Q3 2025.json b/outputs/task_1738538904_How did world acceptance corp perform during its december ending period Q3 2025.json
new file mode 100644
index 0000000000000000000000000000000000000000..dd964e1badff334ff490101c67f9afaa13779066
--- /dev/null
+++ b/outputs/task_1738538904_How did world acceptance corp perform during its december ending period Q3 2025.json
@@ -0,0 +1,660 @@
+{
+ "timestamp": "2025-02-03T04:58:24.304427",
+ "events": [
+ {
+ "timestamp": "2025-02-03T04:58:29.403124",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'How did world acceptance corp perform during its december ending period Q3 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:29.412391",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcb0 Finance Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:29.422789",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: How did world acceptance corp perform during its december ending period Q3 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:33.584291",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.771672",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['World Acceptance Corporation Q3 2025 earnings report', 'WRLD Q3 2025 financial results', 'World Acceptance Corp investor relations Q3 2025', 'World Acceptance Corporation Q3 2025 SEC filings', 'How did world acceptance corp perform during its december ending period Q3 2025']...",
+ "metadata": [
+ "World Acceptance Corporation Q3 2025 earnings report",
+ "WRLD Q3 2025 financial results",
+ "World Acceptance Corp investor relations Q3 2025",
+ "World Acceptance Corporation Q3 2025 SEC filings",
+ "How did world acceptance corp perform during its december ending period Q3 2025"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.785123",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'World Acceptance Corporation Q3 2025 earnings report'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.801028",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'WRLD Q3 2025 financial results'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.817605",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'World Acceptance Corp investor relations Q3 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.832569",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'World Acceptance Corporation Q3 2025 SEC filings'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:36.849363",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'How did world acceptance corp perform during its december ending period Q3 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.370727",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.stocktitan.net/news/WRLD/world-acceptance-corporation-reports-fiscal-2025-third-quarter-xcogb138wqvw.html\n",
+ "metadata": "https://www.stocktitan.net/news/WRLD/world-acceptance-corporation-reports-fiscal-2025-third-quarter-xcogb138wqvw.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.382802",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investing.com/news/transcripts/earnings-call-transcript-world-acceptance-beats-q3-2025-forecasts-93CH-3834989\n",
+ "metadata": "https://www.investing.com/news/transcripts/earnings-call-transcript-world-acceptance-beats-q3-2025-forecasts-93CH-3834989"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.395255",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketbeat.com/earnings/reports/2025-1-28-world-acceptance-co-stock/\n",
+ "metadata": "https://www.marketbeat.com/earnings/reports/2025-1-28-world-acceptance-co-stock/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.408080",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html\n",
+ "metadata": "https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.420378",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\n",
+ "metadata": "https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.431316",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:39.444118",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:40.734312",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:40.744987",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 6 total images",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:40.758914",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:40.769477",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: World Acceptance Corporation Q3 2025 earnings report...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:41.210590",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://grafa.com/news/technology-world-acceptance-corporation-reports-mixed-q3-2025-results-358917\n",
+ "metadata": "https://grafa.com/news/technology-world-acceptance-corporation-reports-mixed-q3-2025-results-358917"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:41.284484",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\n",
+ "metadata": "https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:41.298921",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/world-acceptance-corp-wrld-q3-070740803.html\n",
+ "metadata": "https://finance.yahoo.com/news/world-acceptance-corp-wrld-q3-070740803.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:41.310383",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:41.321321",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.046288",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.061175",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.072518",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.083286",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: WRLD Q3 2025 financial results...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.163223",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.businesswire.com/news/home/20250128675749/en/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results/\n",
+ "metadata": "https://www.businesswire.com/news/home/20250128675749/en/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.175627",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\n",
+ "metadata": "https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.185698",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html\n",
+ "metadata": "https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.195791",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:43.208382",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 3 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.325234",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.335510",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.345243",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.361402",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: World Acceptance Corporation Q3 2025 SEC filings...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.523669",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://investorshangout.com/world-acceptance-corporations-q3-results-showcase-steady-progress-196687-/\n",
+ "metadata": "https://investorshangout.com/world-acceptance-corporations-q3-results-showcase-steady-progress-196687-/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.535529",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\n",
+ "metadata": "https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.545797",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:47.558946",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 2 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:50.941607",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:50.951100",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://investorshangout.com/images/blog/ihnews-World%20Acceptance%20Corporation%27s%20Q3%20Results%20Showcase%20Steady%20Progress.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:50.961395",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:50.974165",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: How did world acceptance corp perform during its december ending period Q3 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:51.115801",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\n",
+ "metadata": "https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:51.128734",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:51.140461",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 1 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:52.405692",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 1 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:52.414687",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 2 new images from 6 total images",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241210135548_neos-sp-500r-high-income-etf-harnasses-the-power-o.png",
+ "https://www.marketbeat.com/logos/articles/thumb_20241028111137_albemarle-stock-call-options-surge-what-it-means-f.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:52.426219",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:52.438206",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: World Acceptance Corp investor relations Q3 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:58:54.842914",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million\nAnalyzing World Acceptance Corp's Third Quarter Financial Performance\nGuruFocus News 5 days ago\nGuruFocus News 5 days ago\nGuruFocus News 5 days ago\n5 days ago\n5 days ago\n\nSource: https://finance.yahoo.com/news/world-acceptance-corp-wrld-q3-070740803.html\nTitle: World Acceptance Corp (WRLD) Q3 2025 Earnings Call Highlights: Strong Portfolio and Customer ...\nContent: World Acceptance Corp (WRLD) Q3 2025 Earnings Call Highlights: Strong Portfolio and Customer ...\nWorld Acceptance Corp (WRLD) Q3 2025 Earnings Call Highlights: Strong Portfolio and Customer ...\nWorld Acceptance Corp (WRLD) Q3 2025 Earnings Call Highlights: Strong Portfolio and Customer ...\nGuruFocus News Fri, Jan 31, 2025, 12:37 PM 3 min read\nGuruFocus News Fri, Jan 31, 2025, 12:37 PM 3 min read\nGuruFocus News Fri, Jan 31, 2025, 12:37 PM 3 min read\nFri, Jan 31, 2025, 12:37 PM 3 min read\n3 min read\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million Analyzing World Acceptance Corp's Third Quarter Financial Performance GuruFocus News 5 days ago Summary Revenue: Achieved $138.6 million, surpassing the estimated $131.79 million.Earnings Per Share (EPS): Reported at $2.45, significantly exceeding the estimated $1.23.Net Income: Decreased to $13.4 million from $16.7 million in the same quarter of the previous year, impacted by increased credit loss provisions.Loan Portfolio: Gross loans outstanding decreased by 1.4% year-over-year to $1.38 billion, but increased sequentially by 6.6% from the previous quarter.Customer Growth: Unique borrowers increased by 6.2% compared to the previous year, with new customer loan volume rising by 22.6%.Branch Performance: Same-store gross loans decreased by 0.2% over the past year, showing improvement from an 8.2% decrease in the prior year.Credit Quality: Recency delinquency\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million Analyzing World Acceptance Corp's Third Quarter Financial Performance GuruFocus News 5 days ago Summary Revenue: Achieved $138.6 million, surpassing the estimated $131.79 million.Earnings Per Share (EPS): Reported at $2.45, significantly exceeding the estimated $1.23.Net Income: Decreased to $13.4 million from $16.7 million in the same quarter of the previous year, impacted by increased credit loss provisions.Loan Portfolio: Gross loans outstanding decreased by 1.4% year-over-year to $1.38 billion, but increased sequentially by 6.6% from the previous quarter.Customer Growth: Unique borrowers increased by 6.2% compared to the previous year, with new customer loan volume rising by 22.6%.Branch Performance: Same-store gross loans decreased by 0.2% over the past year, showing improvement from an 8.2% decrease in the prior year.Credit Quality: Recency delinquency\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million Analyzing World Acceptance Corp's Third Quarter Financial Performance GuruFocus News 5 days ago Summary Revenue: Achieved $138.6 million, surpassing the estimated $131.79 million.Earnings Per Share (EPS): Reported at $2.45, significantly exceeding the estimated $1.23.Net Income: Decreased to $13.4 million from $16.7 million in the same quarter of the previous year, impacted by increased credit loss provisions.Loan Portfolio: Gross loans outstanding decreased by 1.4% year-over-year to $1.38 billion, but increased sequentially by 6.6% from the previous quarter.Customer Growth: Unique borrowers increased by 6.2% compared to the previous year, with new customer loan volume rising by 22.6%.Branch Performance: Same-store gross loans decreased by 0.2% over the past year, showing improvement from an 8.2% decrease in the prior year.Credit Quality: Recency delinquency\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: On January 28, 2025, World Acceptance Corp (WRLD, Financial) released its 8-K filing detailing the financial results for its third quarter of fiscal 2025. The company, which operates a small-loan consumer finance business, reported a mixed performance with revenue exceeding expectations but earnings per share (EPS) falling short.\nRevenue and Earnings Overview\nWorld Acceptance Corp reported total revenues of $138.6 million for the third quarter, surpassing the analyst estimate of $131.79 million. This marks a 0.6% increase from the $137.7 million reported in the same quarter of the previous year. However, the company's diluted net income per share was $2.45, which is above the analyst estimate of $1.23, but a decrease from $2.84 in the prior year.\nOperational Highlights and Challenges\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: Add-On API Manual of Stocks DCF Calculator WACC Calculator Interactive Chart Maps Fund Letters Archive Stock Comparison Table Mobile App Discussion Board Financial Calendar Embed Widgets Stock Market Holidays Discussion All Topics General Discussions Investment Ideas Strategies Article Comments Books Feedback Data Pricing Tutorials Tutorials Demo Center Financial Glossary FAQ Change Log Contact Us Support Chat Support Create a ticket Book Demo User Engagement Meeting 469-248-6885 Status Take Survey Subscribe Free Trial Group Subscription Refer a Friend and Earn One Month of Free Membership GURUFOCUS.COM Latest News U.S. Credit Services WRLD World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million Analyzing World Acceptance Corp's Third Quarter Financial Performance GuruFocus News 5 days ago Summary Revenue: Achieved $138.6 million, surpassing the estimated $131.79 million.Earnings Per Share (EPS): Reported at $2.45,\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: Add-On API Manual of Stocks DCF Calculator WACC Calculator Interactive Chart Maps Fund Letters Archive Stock Comparison Table Mobile App Discussion Board Financial Calendar Embed Widgets Stock Market Holidays Discussion All Topics General Discussions Investment Ideas Strategies Article Comments Books Feedback Data Pricing Tutorials Tutorials Demo Center Financial Glossary FAQ Change Log Contact Us Support Chat Support Create a ticket Book Demo User Engagement Meeting 469-248-6885 Status Take Survey Subscribe Free Trial Group Subscription Refer a Friend and Earn One Month of Free Membership GURUFOCUS.COM Latest News U.S. Credit Services WRLD World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million Analyzing World Acceptance Corp's Third Quarter Financial Performance GuruFocus News 5 days ago Summary Revenue: Achieved $138.6 million, surpassing the estimated $131.79 million.Earnings Per Share (EPS): Reported at $2.45,\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: On January 28, 2025, World Acceptance Corp (WRLD, Financial) released its 8-K filing detailing the financial results for its third quarter of fiscal 2025. The company, which operates a small-loan consumer finance business, reported a mixed performance with revenue exceeding expectations but earnings per share (EPS) falling short.Revenue and Earnings OverviewWorld Acceptance Corp reported total revenues of $138.6 million for the third quarter, surpassing the analyst estimate of $131.79 million. This marks a 0.6% increase from the $137.7 million reported in the same quarter of the previous year. However, the company's diluted net income per share was $2.45, which is above the analyst estimate of $1.23, but a decrease from $2.84 in the prior year.Operational Highlights and ChallengesThe company achieved improved loan growth, with gross loans outstanding increasing sequentially by 6.6% to $1.38 billion as of December 31, 2024. Despite this growth, the gross loans outstanding decreased by\n\nSource: https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million\nTitle: World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Be\nContent: On January 28, 2025, World Acceptance Corp (WRLD, Financial) released its 8-K filing detailing the financial results for its third quarter of fiscal 2025. The company, which operates a small-loan consumer finance business, reported a mixed performance with revenue exceeding expectations but earnings per share (EPS) falling short.Revenue and Earnings OverviewWorld Acceptance Corp reported total revenues of $138.6 million for the third quarter, surpassing the analyst estimate of $131.79 million. This marks a 0.6% increase from the $137.7 million reported in the same quarter of the previous year. However, the company's diluted net income per share was $2.45, which is above the analyst estimate of $1.23, but a decrease from $2.84 in the prior year.Operational Highlights and ChallengesThe company achieved improved loan growth, with gross loans outstanding increasing sequentially by 6.6% to $1.38 billion as of December 31, 2024. Despite this growth, the gross loans outstanding decreased by\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:00.480158",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsEarningsFinancialsHeadlinesInsider TradesOwnershipSEC FilingsShort InterestSustainabilityTrends World Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript Get World Acceptance Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on World Acceptance and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up Skip Charts & View Estimated and\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsEarningsFinancialsHeadlinesInsider TradesOwnershipSEC FilingsShort InterestSustainabilityTrends World Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript Get World Acceptance Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on World Acceptance and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up Skip Charts & View Estimated and\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: World Acceptance Latest Earnings Summary\nWorld Acceptance Latest Earnings Summary\nLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share.\nLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23\nLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23\nLatest Q3 2025 Earnings DateJan. 28Confirmed\nActual EPS (Jan. 28) $2.45 Beat By $1.22\nBeat By $1.22\nConsensus EPS (Jan. 28) $1.23\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: World Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript\nWorld Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: Actual EPS (Jan. 28) $2.45 Beat By $1.22\nBeat By $1.22\nConsensus EPS (Jan. 28) $1.23\nWorld Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share.\nConference CallConference Call Transcript\nConference CallConference Call Transcript\nConference Call Transcript\nGet World Acceptance Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on World Acceptance and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: World Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript Get World Acceptance Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on World Acceptance and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up Skip Charts & View Estimated and Actual Earnings DataWRLD Earnings Estimates and Actuals by QuarterEarnings Estimates and Acutals by Quarter Chart DescriptionThe chart below shows up to\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: Conference CallConference Call Transcript Get World Acceptance Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on World Acceptance and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up Skip Charts & View Estimated and Actual Earnings DataWRLD Earnings Estimates and Actuals by QuarterEarnings Estimates and Acutals by Quarter Chart DescriptionThe chart below shows up to four years of a company's earnings history. The dark blue line represents the company's actual earnings per share. The light blue area represents the range of Wall Street analysts' earnings estimates for each quarter.WRLD Estimated and Actual Revenue by QuarterEstimated Revenue and Actual Revenue by Quarter Chart DescriptionThe chart below shows up to four years of a company's revenue history. The dark blue line represents the company's actual revenue.\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: 2023$1.62$2.84+$1.22$2.84$134.95M$137.75M10/20/2023Q2 2024$1.44$2.71+$1.27$2.71$136.00M$136.88M7/21/2023Q2 2023$1.13$1.62+$0.49$1.62$139.02M$139.32M5/4/2023Q4 2023$1.49$1.97+$0.48-$0.43$148.57M$160.84M World Acceptance Earnings - Frequently Asked Questions When did World Acceptance announce their last quarterly earnings? World Acceptance (NASDAQ:WRLD) last announced its quarterly earning data on Tuesday, January 28, 2025. Learn more on WRLD's earnings history. Did World Acceptance beat their earnings estimates last quarter? In the previous quarter, World Acceptance (NASDAQ:WRLD) reported $2.45 earnings per share (EPS) to beat the analysts' consensus estimate of $1.23 by $1.22. Learn more on analysts' earnings estimate vs. WRLD's actual earnings. How can I listen to World Acceptance's earnings conference call? The conference call for World Acceptance's latest earnings report can be listened to online. Listen to Conference Call How can I read World Acceptance's conference call\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: 2023$1.62$2.84+$1.22$2.84$134.95M$137.75M10/20/2023Q2 2024$1.44$2.71+$1.27$2.71$136.00M$136.88M7/21/2023Q2 2023$1.13$1.62+$0.49$1.62$139.02M$139.32M5/4/2023Q4 2023$1.49$1.97+$0.48-$0.43$148.57M$160.84M World Acceptance Earnings - Frequently Asked Questions When did World Acceptance announce their last quarterly earnings? World Acceptance (NASDAQ:WRLD) last announced its quarterly earning data on Tuesday, January 28, 2025. Learn more on WRLD's earnings history. Did World Acceptance beat their earnings estimates last quarter? In the previous quarter, World Acceptance (NASDAQ:WRLD) reported $2.45 earnings per share (EPS) to beat the analysts' consensus estimate of $1.23 by $1.22. Learn more on analysts' earnings estimate vs. WRLD's actual earnings. How can I listen to World Acceptance's earnings conference call? The conference call for World Acceptance's latest earnings report can be listened to online. Listen to Conference Call How can I read World Acceptance's conference call\n\nSource: https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/\nTitle: \r\n\tWorld Acceptance (WRLD) Earnings Date and Reports 2025\r\n\nContent: 04:00 PM EasternExtended Trading$141.16 0.00 (0.00%) As of 01/31/2025 05:30 PM Eastern Extended trading is trading that happens on electronic markets outside of regular trading hours. This is a fair market value extended hours price provided by Polygon.io. Learn more. Add Compare Share Share Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsEarningsFinancialsHeadlinesInsider TradesOwnershipSEC FilingsShort InterestSustainabilityTrends World Acceptance Latest Earnings SummaryLatest Q3 2025 Earnings DateJan. 28ConfirmedActual EPS (Jan. 28) $2.45 Beat By $1.22 Consensus EPS (Jan. 28) $1.23 World Acceptance released Q3 2025 earnings on January 28, 2025, reporting an EPS of $2.45, which beat the consensus estimate of $1.23 by $1.22. With a trailing EPS of $14.32 and a P/E Ratio of 9.86, World Acceptance's earnings are expected to grow 7.71% next year, from $13.61 to $14.66 per share. Conference CallConference Call Transcript Get World Acceptance Earnings Alerts Want to stay updated\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:09.211810",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nProvided by Business Wire Jan 28, 2025 12:30pm\nProvided by Business Wire Jan 28, 2025 12:30pm\nProvided by Business Wire\nJan 28, 2025 12:30pm\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025.\nThird fiscal quarter highlights\nDuring its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year.\nHighlights from the third quarter include:\nIncrease in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year\nNet income of $13.4 million\nDiluted net income per share of $2.45\nRecency delinquency on accounts 90+ days past due improved to 3.4% at December 31, 2024, from 3.7% at December 31, 2023\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: Home News Business Wire World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results Provided by Business Wire Jan 28, 2025 12:30pm World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year Net income of $13.4 million\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: Home News Business Wire World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results Provided by Business Wire Jan 28, 2025 12:30pm World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year Net income of $13.4 million\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: Home News Business Wire World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results Provided by Business Wire Jan 28, 2025 12:30pm World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year Net income of $13.4 million\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year Net income of $13.4 million Diluted net income per share of $2.45 Recency delinquency on accounts 90+ days past due improved to 3.4% at December 31, 2024, from 3.7% at December 31, 2023 Portfolio results Gross loans outstanding were $1.38\n\nSource: https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results | Morningstar\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year Net income of $13.4 million Diluted net income per share of $2.45 Recency delinquency on accounts 90+ days past due improved to 3.4% at December 31, 2024, from 3.7% at December 31, 2023 Portfolio results Gross loans outstanding were $1.38\n\nSource: https://investorshangout.com/world-acceptance-corporations-q3-results-showcase-steady-progress-196687-/\nTitle: World Acceptance Corporation's Q3 Results Showcase Steady Progress - Investors Hangout\nContent: quarter of fiscal year 2025, demonstrating notable improvement in loan growth alongside a steadfast commitment to credit quality. In the increasingly competitive landscape of installment lending, the company\u2019s strategy to invest smartly in its core customer base is yielding positive results.Q3 Fiscal HighlightsDuring this quarter, total revenues reached $138.6 million\u2014a milestone reflecting a base point yield increase of 208 when compared year-over-year. However, net income saw a reduction to $13.4 million, a drop from $16.7 million in the same quarter last year, which the management attributed to an increase in the provision for credit losses related to expanding business operations.Key Financial MetricsThe metrics showcasing the company\u2019s financial performance are quite illuminating:Net Income: $13.4 millionDiluted Net Income per Share: $2.45Delinquency Rate: Improved from 3.7% to 3.4% for accounts 90+ days past dueGrowth in Lending PortfolioAs of December 31, 2024, World Acceptance\n\nSource: https://investorshangout.com/world-acceptance-corporations-q3-results-showcase-steady-progress-196687-/\nTitle: World Acceptance Corporation's Q3 Results Showcase Steady Progress - Investors Hangout\nContent: World Acceptance Corporation's Impressive Q3 Performance\nWorld Acceptance Corporation (NASDAQ: WRLD) has shared its financial results for the third quarter of fiscal year 2025, demonstrating notable improvement in loan growth alongside a steadfast commitment to credit quality. In the increasingly competitive landscape of installment lending, the company\u2019s strategy to invest smartly in its core customer base is yielding positive results.\nQ3 Fiscal Highlights\nDuring this quarter, total revenues reached $138.6 million\u2014a milestone reflecting a base point yield increase of 208 when compared year-over-year. However, net income saw a reduction to $13.4 million, a drop from $16.7 million in the same quarter last year, which the management attributed to an increase in the provision for credit losses related to expanding business operations.\nKey Financial Metrics\nThe metrics showcasing the company\u2019s financial performance are quite illuminating:\nNet Income: $13.4 million\n\nSource: https://investorshangout.com/world-acceptance-corporations-q3-results-showcase-steady-progress-196687-/\nTitle: World Acceptance Corporation's Q3 Results Showcase Steady Progress - Investors Hangout\nContent: World Acceptance Corporation's Impressive Q3 PerformanceWorld Acceptance Corporation (NASDAQ: WRLD) has shared its financial results for the third quarter of fiscal year 2025, demonstrating notable improvement in loan growth alongside a steadfast commitment to credit quality. In the increasingly competitive landscape of installment lending, the company\u2019s strategy to invest smartly in its core customer base is yielding positive results.Q3 Fiscal HighlightsDuring this quarter, total revenues reached $138.6 million\u2014a milestone reflecting a base point yield increase of 208 when compared year-over-year. However, net income saw a reduction to $13.4 million, a drop from $16.7 million in the same quarter last year, which the management attributed to an increase in the provision for credit losses related to expanding business operations.Key Financial MetricsThe metrics showcasing the company\u2019s financial performance are quite illuminating:Net Income: $13.4 millionDiluted Net Income per Share:\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:09.847530",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html\nTitle: Q3 2025 World Acceptance Corp Earnings Call\nContent: Good morning, and welcome to World Acceptance Corporation's third quarter 2025 earnings conference call. This call is being recorded. (Operator Instructions) Before we begin, the corporation has requested that I make the following announcement.The comments made during this conference call may contain certain forward-looking statements within the meaning of Section 21E of the Securities Exchange Act of 1934 that represent the corporation's expectations and beliefs concerning future events. Such forward-looking statements are about matters that are inherently subject to risks and uncertainties.Statements other than those of historical fact as well as those identified by the words anticipate, estimate, intend, plan, expect, believe, may, will and should or any variation of the foregoing and similar expressions are forward-looking statements.Additional information regarding forward-looking statements and any factors that could cause actual results or performance to differ from the\n\nSource: https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html\nTitle: Q3 2025 World Acceptance Corp Earnings Call\nContent: Q3 2025 World Acceptance Corp Earnings Call\nQ3 2025 World Acceptance Corp Earnings Call\nQ3 2025 World Acceptance Corp Earnings Call\nThomson Reuters StreetEvents Wed, Jan 29, 2025, 11:08 AM 12 min read\nThomson Reuters StreetEvents Wed, Jan 29, 2025, 11:08 AM 12 min read\nThomson Reuters StreetEvents Wed, Jan 29, 2025, 11:08 AM 12 min read\nThomson Reuters StreetEvents\nWed, Jan 29, 2025, 11:08 AM 12 min read\n12 min read\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: These and other factors are discussed in greater detail in Part I, Item 1A,\u201cRisk Factors\u201d in the Company\u2019s most recent annual report on Form 10-K for the fiscal year ended March 31, 2024, as filed with the SEC and the Company\u2019s other reports filed with, or furnished to, the SEC from time to time. World Acceptance Corporation does not undertake any obligation to update any forward-looking statements it makes. The Company is also not responsible for updating the information contained in this press release beyond the publication date, or for changes made to this document by wire services or Internet services.\n-MORE- WRLD Reports Fiscal 2025 Third Quarter Results Page 6\nWRLD Reports Fiscal 2025 Third Quarter Results Page 6\nWRLD Reports Fiscal 2025 Third Quarter Results Page 6\nWRLD Reports Fiscal 2025 Third Quarter Results Page 6\nWRLD Reports Fiscal 2025 Third Quarter Results\nWORLD ACCEPTANCE CORPORATION AND SUBSIDIARIES\nCONSOLIDATED STATEMENTS OF OPERATIONS\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: Exhibit 99.1 NEWS RELEASE For Immediate Release Contact: John L. Calmes, Jr. Executive VP, Chief Financial & Strategy Officer, and Treasurer (864) 298-9800 GREENVILLE, S.C. (January 28, 2025) - World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. WORLD ACCEPTANCE CORPORATION REPORTS FISCAL 2025 THIRD QUARTER RESULTS Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: \u2022 Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year \u2022 Net income of $13.4 million \u2022 Diluted net\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: Exhibit 99.1 NEWS RELEASE For Immediate Release Contact: John L. Calmes, Jr. Executive VP, Chief Financial & Strategy Officer, and Treasurer (864) 298-9800 GREENVILLE, S.C. (January 28, 2025) - World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. WORLD ACCEPTANCE CORPORATION REPORTS FISCAL 2025 THIRD QUARTER RESULTS Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: \u2022 Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year \u2022 Net income of $13.4 million \u2022 Diluted net\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: Exhibit 99.1 NEWS RELEASE For Immediate Release Contact: John L. Calmes, Jr. Executive VP, Chief Financial & Strategy Officer, and Treasurer (864) 298-9800 GREENVILLE, S.C. (January 28, 2025) - World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. WORLD ACCEPTANCE CORPORATION REPORTS FISCAL 2025 THIRD QUARTER RESULTS Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year. Highlights from the third quarter include: \u2022 Increase in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year \u2022 Net income of $13.4 million \u2022 Diluted net\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: affecting delinquency and charge-offs); the impact of extreme weather events and natural disasters; changes in the Company\u2019s markets and general changes in the economy (particularly in the markets served by the Company). These and other factors are discussed in greater detail in Part I, Item 1A,\u201cRisk Factors\u201d in the Company\u2019s most recent annual report on Form 10-K for the fiscal year ended March 31, 2024, as filed with the SEC and the Company\u2019s other reports filed with, or furnished to, the SEC from time to time. World Acceptance Corporation does not undertake any obligation to update any forward-looking statements it makes. The Company is also not responsible for updating the information contained in this press release beyond the publication date, or for changes made to this document by wire services or Internet services. -MORE- WRLD Reports Fiscal 2025 Third Quarter Results Page 6 WORLD ACCEPTANCE CORPORATION AND SUBSIDIARIES CONSOLIDATED STATEMENTS OF OPERATIONS (unaudited and in\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: affecting delinquency and charge-offs); the impact of extreme weather events and natural disasters; changes in the Company\u2019s markets and general changes in the economy (particularly in the markets served by the Company). These and other factors are discussed in greater detail in Part I, Item 1A,\u201cRisk Factors\u201d in the Company\u2019s most recent annual report on Form 10-K for the fiscal year ended March 31, 2024, as filed with the SEC and the Company\u2019s other reports filed with, or furnished to, the SEC from time to time. World Acceptance Corporation does not undertake any obligation to update any forward-looking statements it makes. The Company is also not responsible for updating the information contained in this press release beyond the publication date, or for changes made to this document by wire services or Internet services. -MORE- WRLD Reports Fiscal 2025 Third Quarter Results Page 6 WORLD ACCEPTANCE CORPORATION AND SUBSIDIARIES CONSOLIDATED STATEMENTS OF OPERATIONS (unaudited and in\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: affecting delinquency and charge-offs); the impact of extreme weather events and natural disasters; changes in the Company\u2019s markets and general changes in the economy (particularly in the markets served by the Company). These and other factors are discussed in greater detail in Part I, Item 1A,\u201cRisk Factors\u201d in the Company\u2019s most recent annual report on Form 10-K for the fiscal year ended March 31, 2024, as filed with the SEC and the Company\u2019s other reports filed with, or furnished to, the SEC from time to time. World Acceptance Corporation does not undertake any obligation to update any forward-looking statements it makes. The Company is also not responsible for updating the information contained in this press release beyond the publication date, or for changes made to this document by wire services or Internet services. -MORE- WRLD Reports Fiscal 2025 Third Quarter Results Page 6 WORLD ACCEPTANCE CORPORATION AND SUBSIDIARIES CONSOLIDATED STATEMENTS OF OPERATIONS (unaudited and in\n\nSource: https://last10k.com/sec-filings/wrld/0001437749-25-001945.htm\nTitle: World Acceptance Corp (WRLD) 8-K Earnings Release, Regulated Disclosure - Jan 2025\nContent: Third quarter conference call\nThe senior management of World Acceptance Corporation will be discussing these results in its quarterly conference call to be held at 10:00 a.m. Eastern Time today. A simulcast of the conference call will be available on the Internet at https://event.choruscall.com/mediaframe/webcast.html?webcastid=1DhfUuWc. The call will be available for replay on the Internet for approximately 30 days.\n-MORE- WRLD Reports Fiscal 2025 Third Quarter Results Page 5\nWRLD Reports Fiscal 2025 Third Quarter Results Page 5\nWRLD Reports Fiscal 2025 Third Quarter Results Page 5\nWRLD Reports Fiscal 2025 Third Quarter Results Page 5\nWRLD Reports Fiscal 2025 Third Quarter Results\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:28.082579",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nWorld Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nBusiness Wire Tue, Jan 28, 2025, 6:00 PM 20 min read\nBusiness Wire Tue, Jan 28, 2025, 6:00 PM 20 min read\nBusiness Wire Tue, Jan 28, 2025, 6:00 PM 20 min read\nTue, Jan 28, 2025, 6:00 PM 20 min read\n20 min read\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nJanuary 28, 2025 at 07:31 am EST Share\nJanuary 28, 2025 at 07:31 am EST\n\nSource: https://www.stocktitan.net/news/WRLD/world-acceptance-corporation-reports-fiscal-2025-third-quarter-xcogb138wqvw.html\nTitle: World Acceptance Q3 Earnings: Revenue Up, EPS Declines to $2.45 Amid Mixed Results | WRLD Stock News\nContent: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results January 28, 2025 at 07:31 am EST Share World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results January 28, 2025 at 07:31 am EST Share World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results January 28, 2025 at 07:31 am EST Share World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results January 28, 2025 at 07:31 am EST Share World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: Income Statement Balance Sheet Cash flow Financial Ratios Business Segments Valuation Valuation ratios Dividend Consensus Analysts' Opinion Estimates Revisions Ratings Calendar Sector Sector performance Sector valuations Sector dividends Financial comparisons Sector ratings Sector consensus Sector revisions ETFs All News Analyst Reco. Highlights Insiders Transcripts Press Releases Official Publications Other languages Trading Ideas MarketScreener Editorial Features MarketScreener Strategies World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results January 28, 2025 at 07:31 am EST Share World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025. Third fiscal quarter highlights During its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring\n\nSource: https://www.marketscreener.com/quote/stock/WORLD-ACCEPTANCE-CORPORAT-11409/news/World-Acceptance-Corporation-Reports-Fiscal-2025-Third-Quarter-Results-48889868/\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results -January 28, 2025 at 07:31 am EST | MarketScreener\nContent: World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025.\nThird fiscal quarter highlights\nDuring its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year.\nHighlights from the third quarter include:\nIncrease in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year\nNet income of $13.4 million\nDiluted net income per share of $2.45\nRecency delinquency on accounts 90+ days past due improved to 3.4% at December 31, 2024, from 3.7% at December 31, 2023\n\nSource: https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html\nTitle: World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results\nContent: GREENVILLE, S.C., January 28, 2025--(BUSINESS WIRE)--World Acceptance Corporation (NASDAQ: WRLD) today reported financial results for its third quarter of fiscal 2025.\nThird fiscal quarter highlights\nDuring its third fiscal quarter, World Acceptance Corporation achieved improved loan growth while continuing to focus on credit quality. Management believes that continuing to carefully invest in our best customers and closely monitoring performance has strengthened the Company's financial position and positioned us well for the remainder of the fiscal year.\nHighlights from the third quarter include:\nIncrease in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year\nIncrease in total revenues to $138.6 million, including a 208 basis point yield increase compared to the same quarter in the prior year\nNet income of $13.4 million\nNet income of $13.4 million\nDiluted net income per share of $2.45\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:28.096750",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.01785",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T04:59:28.124450",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'How did world acceptance corp perform during its december ending period Q3 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T05:00:11.500768",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'How did world acceptance corp perform during its december ending period Q3 2025'",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "content": "selected_images",
+ "output": "47809.html](https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html)",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg",
+ "https://investorshangout.com/images/blog/ihnews-World%20Acceptance%20Corporation%27s%20Q3%20Results%20Showcase%20Steady%20Progress.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241210135548_neos-sp-500r-high-income-etf-harnasses-the-power-o.png",
+ "https://www.marketbeat.com/logos/articles/thumb_20241028111137_albemarle-stock-call-options-surge-what-it-means-f.png"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738538904_How did world acceptance corp perform during.docx b/outputs/task_1738538904_How did world acceptance corp perform during.docx
new file mode 100644
index 0000000000000000000000000000000000000000..e0a91f060c6b76f2c5f77567c5312a99a663f7d1
Binary files /dev/null and b/outputs/task_1738538904_How did world acceptance corp perform during.docx differ
diff --git a/outputs/task_1738538904_How did world acceptance corp perform during.md b/outputs/task_1738538904_How did world acceptance corp perform during.md
new file mode 100644
index 0000000000000000000000000000000000000000..a1a4e6c46a9fb22ec1750585120905ad94e3f909
--- /dev/null
+++ b/outputs/task_1738538904_How did world acceptance corp perform during.md
@@ -0,0 +1,38 @@
+## World Acceptance Corporation's Q3 2025 Performance: A Detailed Analysis
+
+World Acceptance Corporation (WRLD) reported a mixed performance for its third quarter of fiscal year 2025, ending December 31, 2024. While the company demonstrated improved loan growth and a strengthened focus on credit quality, net income declined year-over-year. This report provides a detailed analysis of WRLD's Q3 2025 performance, drawing upon information from multiple reputable financial news sources.
+
+**Revenue and Earnings:**
+
+WRLD reported total revenues of $138.6 million, exceeding analyst estimates of $131.79 million and representing a 0.6% increase compared to the same quarter of the previous year ([GuruFocus](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million), [Yahoo Finance](https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html)). This revenue growth was driven by a 208 basis point increase in yield. However, net income decreased to $13.4 million compared to $16.7 million in Q3 2024, primarily due to increased credit loss provisions ([GuruFocus](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million)). Diluted earnings per share (EPS) came in at $2.45, significantly beating the consensus estimate of $1.23 but still lower than the $2.84 reported in the prior year ([MarketBeat](https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/)).
+
+**Loan Portfolio and Customer Growth:**
+
+WRLD achieved improved loan growth during the quarter. Gross loans outstanding increased sequentially by 6.6% to $1.38 billion as of December 31, 2024. However, on a year-over-year basis, gross loans outstanding decreased by 1.4% ([GuruFocus](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million)). This suggests a shift in the company's lending strategy, potentially focusing on higher-quality borrowers. Supporting this observation is the growth in unique borrowers, which increased by 6.2% compared to the previous year, with new customer loan volume rising by 22.6% ([GuruFocus](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million)). This indicates a successful customer acquisition strategy, attracting new borrowers to WRLD's services.
+
+**Credit Quality and Delinquency:**
+
+WRLD management emphasized their focus on credit quality throughout the quarter ([Yahoo Finance](https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html)). This focus appears to be yielding positive results, as recency delinquency on accounts 90+ days past due improved to 3.4% at December 31, 2024, down from 3.7% at December 31, 2023 ([Morningstar](https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results)). This improvement in delinquency rates suggests that WRLD's efforts to manage credit risk are effective.
+
+**Branch Performance and Operational Efficiency:**Same-store gross loans decreased by 0.2% over the past year, a significant improvement from the 8.2% decrease in the prior year ([GuruFocus](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million)). This suggests that WRLD's existing branches are stabilizing and performing better than in the previous year. While specific details on operational efficiency were not provided in the available sources, the improvement in same-store loan performance hints at potential operational improvements within the branches.
+
+**Management Commentary and Future Outlook:**
+
+WRLD management expressed confidence in the company's financial position and its outlook for the remainder of the fiscal year ([Yahoo Finance](https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html)). They attributed the company's strengthened position to their focus on investing in their best customers and closely monitoring performance. While the decrease in net income raises some concerns, the improved loan growth, customer acquisition, and delinquency rates suggest that WRLD is pursuing a sustainable growth strategy. Further insights into the management's perspective can be gleaned from the Q3 2025 earnings conference call transcript ([MarketBeat](https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/), [Yahoo Finance](https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html)).
+
+**Overall Assessment:**
+
+WRLD's Q3 2025 performance presents a mixed picture. The company delivered strong revenue growth and demonstrated a commitment to credit quality, resulting in improved delinquency rates and customer growth. However, the decline in net income year-over-year requires further scrutiny. The increased provision for credit losses, while potentially a short-term expense related to business expansion, warrants monitoring in future quarters. Overall, WRLD appears to be navigating a competitive lending environment by focusing on sustainable growth and prudent risk management. The company's success in attracting new customers and improving its existing branch performance suggests a positive trajectory for future growth. However, the profitability challenge highlighted by the lower net income needs to be addressed to ensure long-term shareholder value.
+
+
+**References**
+
+GuruFocus News. (2025, January 29). *World Acceptance Corp (WRLD) Q3 FY2025 Earnings: EPS of $2.45 Beats Estimate, Revenue Surpasses Expectations at $138.6 Million*. [https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million](https://www.gurufocus.com/news/2669812/world-acceptance-corp-wrld-q3-fy2025-earnings-eps-of-245-beats-estimate-revenue-surpasses-expectations-at-1386-million)
+
+MarketBeat. (2025, January 31). *World Acceptance (WRLD) Earnings Date and Reports 2025*. [https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/](https://www.marketbeat.com/stocks/NASDAQ/WRLD/earnings/)
+
+Morningstar. (2025, January 28). *World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results*. [https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results](https://www.morningstar.com/news/business-wire/20250128675749/world-acceptance-corporation-reports-fiscal-2025-third-quarter-results)
+
+Yahoo Finance. (2025, January 28). *World Acceptance Corporation Reports Fiscal 2025 Third Quarter Results*. [https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html](https://finance.yahoo.com/news/world-acceptance-corporation-reports-fiscal-123000861.html)
+
+Yahoo Finance. (2025, January 29). *Q3 2025 World Acceptance Corp Earnings Call*. [https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html](https://finance.yahoo.com/news/q3-2025-world-acceptance-corp-053847809.html)
\ No newline at end of file
diff --git a/outputs/task_1738538904_How did world acceptance corp perform during.pdf b/outputs/task_1738538904_How did world acceptance corp perform during.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..ebaf38c60d32c5e04c2b0e7e4e6c8bfe002c9420
Binary files /dev/null and b/outputs/task_1738538904_How did world acceptance corp perform during.pdf differ
diff --git a/outputs/task_1738560819_What is distillation in LLM and how did deep.docx b/outputs/task_1738560819_What is distillation in LLM and how did deep.docx
new file mode 100644
index 0000000000000000000000000000000000000000..7ff10c6ce3190f3ca50fbef5622e9d41b596724b
Binary files /dev/null and b/outputs/task_1738560819_What is distillation in LLM and how did deep.docx differ
diff --git a/outputs/task_1738560819_What is distillation in LLM and how did deep.md b/outputs/task_1738560819_What is distillation in LLM and how did deep.md
new file mode 100644
index 0000000000000000000000000000000000000000..04a2add59b36026cbd97fc71b7d913b604b60b59
--- /dev/null
+++ b/outputs/task_1738560819_What is distillation in LLM and how did deep.md
@@ -0,0 +1,50 @@
+## DeepSeek-R1: Distillation, Training, and the Flawed Panic in the AI Industry
+
+**Abstract**
+
+The emergence of DeepSeek-R1 has sparked concerns within the AI industry, primarily due to claims of significantly lower training costs compared to competitors like OpenAI. This report delves into the concept of knowledge distillation in Large Language Models (LLMs), specifically how it was employed in training DeepSeek-R1. Furthermore, it analyzes the validity of the industry's cost-related panic, arguing that while DeepSeek has achieved notable efficiency gains, the narrative surrounding its disruptive potential is overblown and based on incomplete information. The report critically evaluates DeepSeek's training methodology, cost-saving strategies, and performance benchmarks, concluding that while the model represents a step forward in efficiency, it doesn't constitute a paradigm shift that threatens the established players in the AI landscape.
+
+**Introduction**
+
+The development of LLMs has been marked by a relentless pursuit of scale, with models growing increasingly larger and more computationally expensive. This trend has raised concerns about accessibility and sustainability, prompting research into model compression techniques like knowledge distillation. Knowledge distillation involves transferring the "knowledge" from a large, complex "teacher" model to a smaller, more efficient "student" model ([Knowledge distillation: a way to make a large model more efficient and accessible](https://toloka.ai/blog/knowledge-distillation/)). DeepSeek-R1 leverages this technique, alongside reinforcement learning, to achieve impressive performance while maintaining lower training costs. However, the narrative surrounding DeepSeek’s cost efficiency and its potential to disrupt the AI industry warrants a critical examination.
+
+**Knowledge Distillation in LLMs**
+
+Knowledge distillation allows smaller models to inherit the complex behaviors and reasoning capabilities of larger models without the associated computational burden. Instead of simply mimicking the teacher model's outputs, distillation aims to replicate its underlying "thought processes" ([Knowledge distillation: a way to make a large model more efficient and accessible](https://toloka.ai/blog/knowledge-distillation/)). This is achieved by training the student model not just on the ground truth labels but also on the softer probability distributions outputted by the teacher model. This allows the student to learn from the teacher's nuanced understanding of the data, including its uncertainties and biases. In the context of LLMs, this translates to transferring stylistic nuances, reasoning abilities, and even alignment with human values ([Knowledge distillation: a way to make a large model more efficient and accessible](https://toloka.ai/blog/knowledge-distillation/)).
+
+**DeepSeek-R1's Training Methodology**
+
+DeepSeek-R1's training employed a multi-stage approach combining supervised fine-tuning (SFT) and reinforcement learning (RL) ([Bite: How Deepseek R1 was trained](https://www.philschmid.de/deepseek-r1)). Initially, the base model, DeepSeek V3, underwent SFT using a dataset of chain-of-thought (CoT) data generated by both the R1-zero model and human annotators. This stage focused on improving readability and coherence. Subsequently, RL was applied, concentrating on reasoning-intensive tasks like coding and mathematics, utilizing rule-based reward models and an additional reward for language consistency ([Bite: How Deepseek R1 was trained](https://www.philschmid.de/deepseek-r1)). This multi-stage process, alternating between SFT and RL, allowed DeepSeek-R1 to refine its reasoning capabilities while maintaining readability and alignment with human preferences ([DeepSeek R1: It’s All About Architecture and Training Approach](https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8)). Distillation played a crucial role in creating smaller, more efficient versions of the R1 model without requiring the computationally expensive RL training ([DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation](https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627)).
+
+**The Flawed Panic: Deconstructing the Cost Narrative**
+
+While DeepSeek has publicized a significantly lower training cost for its models (e.g., $6 million for V3), the narrative surrounding this figure is misleading. Reports suggest DeepSeek leveraged a substantial stockpile of Nvidia chips (potentially costing around $1 billion), which were not accounted for in their publicized figures due to U.S. export control restrictions ([Is the DeepSeek Panic Overblown?](https://time.com/7211646/is-deepseek-panic-overblown/)). This omission significantly skews the cost comparison with competitors like OpenAI, who reportedly spent over $100 million training GPT-4 ([Is the DeepSeek Panic Overblown?](https://time.com/7211646/is-deepseek-panic-overblown/)).
+
+Furthermore, DeepSeek’s lower model access fees ($2.19 per million tokens compared to OpenAI's $60) don't necessarily reflect superior cost efficiency. Experts suggest this pricing strategy could be a loss-leader tactic to gain market share, potentially operating at a loss on inference ([Is the DeepSeek Panic Overblown?](https://time.com/7211646/is-deepseek-panic-overblown/)). DeepSeek’s cost-saving measures, such as proprietary energy-efficient accelerators and data optimization, contribute to their lower expenses ([DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency](https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8)). However, these efficiencies, while noteworthy, do not represent a fundamental technological breakthrough that invalidates the investments made by other leading AI companies.
+
+**DeepSeek-R1's Performance: A Balanced Perspective**
+
+DeepSeek-R1 has demonstrated impressive performance on various benchmarks, including reasoning tasks (AIME 2024, MATH-500), general QA, and long-context understanding (AlpacaEval, ArenaHard) ([DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation](https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627)). However, experts caution against overinterpreting these results. While R1 showcases advancements in efficiency and specific task performance, it's not considered a groundbreaking leap in AI capabilities ([Is the DeepSeek Panic Overblown?](https://time.com/7211646/is-deepseek-panic-overblown/)).
+
+**Conclusion**
+
+DeepSeek-R1’s utilization of knowledge distillation and reinforcement learning represents a significant step forward in LLM training efficiency. However, the narrative surrounding its disruptive potential based on significantly lower training costs is flawed and incomplete. While DeepSeek has undoubtedly achieved notable efficiency gains, the undisclosed hardware investments and potentially loss-making pricing strategy cast doubt on the true extent of their cost advantage. Furthermore, while R1 performs well on various benchmarks, it doesn't represent a paradigm shift in AI capabilities that threatens the established players in the industry. The current panic within the AI industry is therefore overblown and warrants a more nuanced understanding of DeepSeek’s achievements and limitations.
+
+
+**References**
+
+Gore, N. (2025, January). DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency. Medium. [https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8](https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8)
+
+Maheshwarappa, A. (2025, January). DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation. Medium. [https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627](https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627)
+
+Odunola, J. (2023, November 15). Exploring Knowledge Distillation in Large Language Models. Medium. [https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669](https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669)
+
+Schmid, P. (n.d.). Bite: How Deepseek R1 was trained. [https://www.philschmid.de/deepseek-r1](https://www.philschmid.de/deepseek-r1)
+
+Singh, D. (2025, February 1). DeepSeek-R1: Redefining Open-Source Reasoning in LLMs. Medium. [https://medium.com/@deepankar080892/deepseek-r1-redefining-open-source-reasoning-in-llms-89f09250afed](https://medium.com/@deepankar080892/deepseek-r1-redefining-open-source-reasoning-in-llms-89f09250afed)
+
+TeqnoVerse. (2025, January). DeepSeek R1: It’s All About Architecture and Training Approach. Medium. [https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8](https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8)
+
+TIME. (n.d.). Is the DeepSeek Panic Overblown?. [https://time.com/7211646/is-deepseek-panic-overblown/](https://time.com/7211646/is-deepseek-panic-overblown/)
+
+Toloka. (n.d.). Knowledge distillation: a way to make a large model more efficient and accessible. [https://toloka.ai/blog/knowledge-distillation/](https://toloka.ai/blog/knowledge-distillation/)
\ No newline at end of file
diff --git a/outputs/task_1738560819_What is distillation in LLM and how did deep.pdf b/outputs/task_1738560819_What is distillation in LLM and how did deep.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..3251da2d1a12daa396551f646ee3a4baee9e9a5c
Binary files /dev/null and b/outputs/task_1738560819_What is distillation in LLM and how did deep.pdf differ
diff --git a/outputs/task_1738560819_What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed.json b/outputs/task_1738560819_What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed.json
new file mode 100644
index 0000000000000000000000000000000000000000..cfa0328e5acb2cee0b718c06d6fd9fd4c22e2530
--- /dev/null
+++ b/outputs/task_1738560819_What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed.json
@@ -0,0 +1,760 @@
+{
+ "timestamp": "2025-02-03T11:03:39.183012",
+ "events": [
+ {
+ "timestamp": "2025-02-03T11:03:43.296198",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:43.304335",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83e\udd16 AI Research Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:43.315154",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:48.087750",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.501301",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['distillation in large language models knowledge transfer techniques', 'deepseek r1 training process reinforcement learning from deepseek v3 base', 'deepseek r1 training cost analysis compared to openai models', 'flaws in arguments about ai industry panic due to deepseek r1 cost', 'What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed']...",
+ "metadata": [
+ "distillation in large language models knowledge transfer techniques",
+ "deepseek r1 training process reinforcement learning from deepseek v3 base",
+ "deepseek r1 training cost analysis compared to openai models",
+ "flaws in arguments about ai industry panic due to deepseek r1 cost",
+ "What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.509927",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'distillation in large language models knowledge transfer techniques'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.520342",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'deepseek r1 training process reinforcement learning from deepseek v3 base'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.528869",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'deepseek r1 training cost analysis compared to openai models'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.540629",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'flaws in arguments about ai industry panic due to deepseek r1 cost'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:51.549683",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.899264",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://bottr.me/blog/deepseek\n",
+ "metadata": "https://bottr.me/blog/deepseek"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.909625",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://blog.promptlayer.com/openai-vs-deepseek-an-analysis-of-r1-and-o1-models/\n",
+ "metadata": "https://blog.promptlayer.com/openai-vs-deepseek-an-analysis-of-r1-and-o1-models/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.922153",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.promptlayer.com/blog/openai-vs-deepseek-an-analysis-of-r1-and-o1-models\n",
+ "metadata": "https://www.promptlayer.com/blog/openai-vs-deepseek-an-analysis-of-r1-and-o1-models"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.933943",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\n",
+ "metadata": "https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.945131",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://venturebeat.com/ai/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/\n",
+ "metadata": "https://venturebeat.com/ai/deepseek-r1s-bold-bet-on-reinforcement-learning-how-it-outpaced-openai-at-3-of-the-cost/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.955124",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:53.968315",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.589679",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.608939",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://venturebeat.com/wp-content/uploads/2025/01/Screenshot-2025-01-25-at-6.06.56%E2%80%AFPM.png?w=800"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.619026",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.628539",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: deepseek r1 training cost analysis compared to openai models...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.858936",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://jessiecortes.medium.com/how-deepseeks-strategy-exposes-flaws-in-modern-ai-and-what-comes-next-6b122c28d556\n",
+ "metadata": "https://jessiecortes.medium.com/how-deepseeks-strategy-exposes-flaws-in-modern-ai-and-what-comes-next-6b122c28d556"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.883479",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://theconversation.com/deepseek-shatters-beliefs-about-the-cost-of-ai-leaving-us-tech-giants-reeling-248424\n",
+ "metadata": "https://theconversation.com/deepseek-shatters-beliefs-about-the-cost-of-ai-leaving-us-tech-giants-reeling-248424"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.893701",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://nymag.com/intelligencer/article/deepseek-r1-ai-panic-impact-commentary-analysis.html\n",
+ "metadata": "https://nymag.com/intelligencer/article/deepseek-r1-ai-panic-impact-commentary-analysis.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.902967",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://time.com/7211646/is-deepseek-panic-overblown/\n",
+ "metadata": "https://time.com/7211646/is-deepseek-panic-overblown/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.913541",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arstechnica.com/ai/2025/01/deepseek-spooks-american-tech-industry-as-it-tops-the-apple-app-store/\n",
+ "metadata": "https://arstechnica.com/ai/2025/01/deepseek-spooks-american-tech-industry-as-it-tops-the-apple-app-store/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.922618",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:03:58.932438",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.647767",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.660461",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 2 new images from 2 total images",
+ "metadata": [
+ "https://cdn.arstechnica.net/wp-content/uploads/2025/01/chinese_ai_flag_2-1152x648.jpg",
+ "https://cdn.arstechnica.net/wp-content/uploads/2025/01/yann_post_screenshot.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.674976",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.686809",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: flaws in arguments about ai industry panic due to deepseek r1 cost...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.912673",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\n",
+ "metadata": "https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.922208",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.philschmid.de/deepseek-r1\n",
+ "metadata": "https://www.philschmid.de/deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.933369",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://unfoldai.com/deepseek-r1/\n",
+ "metadata": "https://unfoldai.com/deepseek-r1/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.943195",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it\n",
+ "metadata": "https://www.vellum.ai/blog/the-training-of-deepseek-r1-and-ways-to-use-it"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.952606",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://newsletter.languagemodels.co/p/the-illustrated-deepseek-r1\n",
+ "metadata": "https://newsletter.languagemodels.co/p/the-illustrated-deepseek-r1"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.963560",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:00.973804",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.004639",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.014875",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 14 total images",
+ "metadata": [
+ "https://unfoldai.com/storage/2025/01/lm-studio-deepseek-r1.jpg",
+ "https://unfoldai.com/storage/2025/01/DeepSeek-R1-performance.jpg",
+ "https://unfoldai.com/storage/2025/01/distill-models-deepseek-r1-performance.jpg",
+ "https://substackcdn.com/image/fetch/w_1456,c_limit,f_auto,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2Fe48af6fa-8956-44b0-84cf-915e607f3b5e_1546x884.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.027052",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.037495",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: deepseek r1 training process reinforcement learning from deepseek v3 base...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.286694",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://zilliz.com/learn/knowledge-distillation-from-large-language-models-deep-dive\n",
+ "metadata": "https://zilliz.com/learn/knowledge-distillation-from-large-language-models-deep-dive"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.298721",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://toloka.ai/blog/knowledge-distillation/\n",
+ "metadata": "https://toloka.ai/blog/knowledge-distillation/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.307910",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.datacamp.com/blog/distillation-llm\n",
+ "metadata": "https://www.datacamp.com/blog/distillation-llm"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.318662",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\n",
+ "metadata": "https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.329293",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/abs/2306.08543\n",
+ "metadata": "https://arxiv.org/abs/2306.08543"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.340115",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:02.350219",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:03.647114",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:03.657294",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:03.668290",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:03.679253",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: distillation in large language models knowledge transfer techniques...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.028660",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://medium.com/@deepankar080892/deepseek-r1-redefining-open-source-reasoning-in-llms-89f09250afed\n",
+ "metadata": "https://medium.com/@deepankar080892/deepseek-r1-redefining-open-source-reasoning-in-llms-89f09250afed"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.040241",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\n",
+ "metadata": "https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.049847",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/pdf/2501.12948\n",
+ "metadata": "https://arxiv.org/pdf/2501.12948"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.060144",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://arxiv.org/abs/2501.12619\n",
+ "metadata": "https://arxiv.org/abs/2501.12619"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.071163",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.082015",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.225303",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 2 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.236355",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.247746",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:04.258453",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:12.162542",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: requiring RL training. This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: requiring RL training. This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: requiring RL training. This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is\n\nSource: https://medium.com/@deepankar080892/deepseek-r1-redefining-open-source-reasoning-in-llms-89f09250afed\nTitle: DeepSeek-R1: Transforming AI Reasoning with Reinforcement Learning and Efficient Distillation | by Deepankar Singh | Feb, 2025 | Medium\nContent: DeepSeek-R1: Transforming AI Reasoning with Reinforcement Learning and Efficient DistillationDeepankar Singh\u00b7Follow29 min read\u00b71 day ago--Share\nDeepSeek-R1: Transforming AI Reasoning with Reinforcement Learning and Efficient Distillation\nDeepankar Singh\u00b7Follow29 min read\u00b71 day ago--Share\nDeepankar Singh\u00b7Follow29 min read\u00b71 day ago--Share\nDeepankar Singh\u00b7Follow29 min read\u00b71 day ago--Share\nDeepankar Singh\u00b7Follow29 min read\u00b71 day ago\nDeepankar Singh\u00b7Follow29 min read\u00b71 day ago\n29 min read\u00b71 day ago\n29 min read\u00b71 day ago\n29 min read\u00b71 day ago\n29 min read\u00b71 day ago\n29 min read\u00b71 day ago\n29 min read\nDeepSeek-R1: Redefining Open-Source Reasoning in LLMs\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n\nSource: https://abhishek-maheshwarappa.medium.com/deepseek-r1-revolutionizing-reasoning-with-reinforcement-learning-and-distillation-24f9e1877627\nTitle: DeepSeek-R1: Revolutionizing Reasoning with Reinforcement Learning and Distillation | by Abhishek Maheshwarappa | Jan, 2025 | Medium\nContent: This approach democratizes access to advanced reasoning capabilities for research and industry applications.DeepSeek-R1 EvaluationDeepSeek-R1\u2019s performance is benchmarked against industry-leading models, showcasing:Reasoning Tasks: Achieving superior accuracy in benchmarks like AIME 2024 and MATH-500.General QA: Outperforming competitors like GPT-4o and Claude in creative writing and instruction-following tasks.Long-Context Understanding: Excelling in tasks requiring extended reasoning, such as AlpacaEval and ArenaHard.These results highlight the effectiveness of RL in improving reasoning and generalization across diverse tasks.Distillation vs. Reinforcement LearningDistillation vs. RLAdvantages of Distillation:Distillation achieves better performance for smaller models with less computational effort compared to RL.DeepSeek-R1 distilled models outperform traditional RL-trained compact architectures, such as QwQ-32B.Challenges with RL:RL for smaller models is computationally intensive\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:14.338701",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.\nSmaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.\nFor instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.\nComparative Cost Analysis\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to their efficiency-driven philosophy.Comparative Cost\n\nSource: https://medium.com/@nrgore1/deepseek-vs-openai-a-comparative-analysis-of-llm-development-and-cost-efficiency-a8534f32c9a8\nTitle: DeepSeek Vs OpenAI: A comparative analysis of LLM development and cost efficiency | by Narendra Gore | Jan, 2025 | Medium\nContent: DeepSeek-R1 shines in domain-specific and instruction-following tasks.Cost Efficiency: How Does DeepSeek Keep Costs Low?DeepSeek has revolutionized cost management by addressing key areas where traditional development incurs significant expenditure:Energy Efficiency: Their proprietary accelerators consume up to 50% less power per training iteration compared to OpenAI\u2019s reliance on generic GPU clusters.Data Optimization: Instead of training on an exhaustive range of general-purpose data, DeepSeek-R1 focuses on carefully curated domain-specific datasets, achieving faster convergence and fewer training epochs.Smaller Teams, Bigger Impact: DeepSeek maintains a smaller team of AI engineers but employs automation tools for model testing and refinement, reducing human labor costs.For instance, while OpenAI reportedly spent upwards of $100 million on training GPT-4, DeepSeek achieved similar outcomes for DeepSeek-R1 with a budget closer to $15 million. This stark difference is a testament to\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:18.986600",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: of claims about the low cost of training its models, tech CEOs cited reports that DeepSeek actually had a stash of 50,000 Nvidia chips, which it could not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion. It is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.Oliver Stephenson, associate director of AI and emerging tech policy at the Federation of American Scientists, says that people shouldn\u2019t draw conclusions from this price point. \u201cWhile DeepSeek has made genuine efficiency gains,\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: To further obscure the picture, DeepSeek may also not be being entirely honest about its expenses. In the wake of claims about the low cost of training its models, tech CEOs cited reports that DeepSeek actually had a stash of 50,000 Nvidia chips, which it could not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion.\nIt is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: actually had a stash of 50,000 Nvidia chips, which it could not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion. It is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.Oliver Stephenson, associate director of AI and emerging tech policy at the Federation of American Scientists, says that people shouldn\u2019t draw conclusions from this price point. \u201cWhile DeepSeek has made genuine efficiency gains, their pricing could be an attention-grabbing strategy,\u201d he says. \u201cThey could be making a loss\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: actually had a stash of 50,000 Nvidia chips, which it could not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion. It is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.Oliver Stephenson, associate director of AI and emerging tech policy at the Federation of American Scientists, says that people shouldn\u2019t draw conclusions from this price point. \u201cWhile DeepSeek has made genuine efficiency gains, their pricing could be an attention-grabbing strategy,\u201d he says. \u201cThey could be making a loss\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion. It is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.Oliver Stephenson, associate director of AI and emerging tech policy at the Federation of American Scientists, says that people shouldn\u2019t draw conclusions from this price point. \u201cWhile DeepSeek has made genuine efficiency gains, their pricing could be an attention-grabbing strategy,\u201d he says. \u201cThey could be making a loss on inference.\u201d (Inference is the running of an already-formed\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: not talk about due to U.S. export controls. Those chips would cost somewhere in the region of $1 billion. It is, however, true that DeepSeek\u2019s new R1 model is far cheaper for users to access than its competitor model OpenAI o1, with its model access fees around 30 times lower ($2.19 per million \u201ctokens,\u201d or segments of words outputted, versus $60). That sparked worries among some investors of a looming price war in the American AI industry, which could reduce expected returns on investment and make it more difficult for U.S. companies to raise funds required to build new data centers to fuel their AI models.Oliver Stephenson, associate director of AI and emerging tech policy at the Federation of American Scientists, says that people shouldn\u2019t draw conclusions from this price point. \u201cWhile DeepSeek has made genuine efficiency gains, their pricing could be an attention-grabbing strategy,\u201d he says. \u201cThey could be making a loss on inference.\u201d (Inference is the running of an already-formed\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: \u201cIt\u2019s not a leap forward on AI frontier capabilities,\u201d says Lennart Heim, an AI researcher at RAND. \u201cI think the market just got it wrong.\u201d\nRead More: What to Know About DeepSeek, the Chinese AI Company Causing Stock Market Chaos\nHere are several claims being widely circulated about DeepSeek\u2019s implications, and why scientists say they\u2019re incomplete or outright wrong.\nClaim: DeepSeek is much cheaper than other models.\nIn December, DeepSeek reported that its V3 model cost just $6 million to train. This figure seemed startlingly low compared to the more than $100 million that OpenAI said it spent training GPT-4, or the \u201cfew tens of millions\u201d that Anthropic spent training a recent version of its Claude model.\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: it is not a massive technological breakthrough\u2014and that the American AI industry still has key advantages over China\u2019s.\u201cIt\u2019s not a leap forward on AI frontier capabilities,\u201d says Lennart Heim, an AI researcher at RAND. \u201cI think the market just got it wrong.\u201dRead More: What to Know About DeepSeek, the Chinese AI Company Causing Stock Market ChaosHere are several claims being widely circulated about DeepSeek\u2019s implications, and why scientists say they\u2019re incomplete or outright wrong. Claim: DeepSeek is much cheaper than other models. In December, DeepSeek reported that its V3 model cost just $6 million to train. This figure seemed startlingly low compared to the more than $100 million that OpenAI said it spent training GPT-4, or the \u201cfew tens of millions\u201d that Anthropic spent training a recent version of its Claude model.DeepSeek\u2019s lower price tag was thanks to some big efficiency gains that the company\u2019s researchers described in a paper accompanying their model\u2019s release. But were\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: it is not a massive technological breakthrough\u2014and that the American AI industry still has key advantages over China\u2019s.\u201cIt\u2019s not a leap forward on AI frontier capabilities,\u201d says Lennart Heim, an AI researcher at RAND. \u201cI think the market just got it wrong.\u201dRead More: What to Know About DeepSeek, the Chinese AI Company Causing Stock Market ChaosHere are several claims being widely circulated about DeepSeek\u2019s implications, and why scientists say they\u2019re incomplete or outright wrong. Claim: DeepSeek is much cheaper than other models. In December, DeepSeek reported that its V3 model cost just $6 million to train. This figure seemed startlingly low compared to the more than $100 million that OpenAI said it spent training GPT-4, or the \u201cfew tens of millions\u201d that Anthropic spent training a recent version of its Claude model.DeepSeek\u2019s lower price tag was thanks to some big efficiency gains that the company\u2019s researchers described in a paper accompanying their model\u2019s release. But were\n\nSource: https://time.com/7211646/is-deepseek-panic-overblown/\nTitle: Is the DeepSeek Panic Overblown? | TIME\nContent: They say that while DeepSeek does represent a genuine advancement in AI efficiency, it is not a massive technological breakthrough\u2014and that the American AI industry still has key advantages over China\u2019s.\u201cIt\u2019s not a leap forward on AI frontier capabilities,\u201d says Lennart Heim, an AI researcher at RAND. \u201cI think the market just got it wrong.\u201dRead More: What to Know About DeepSeek, the Chinese AI Company Causing Stock Market ChaosHere are several claims being widely circulated about DeepSeek\u2019s implications, and why scientists say they\u2019re incomplete or outright wrong. Claim: DeepSeek is much cheaper than other models. In December, DeepSeek reported that its V3 model cost just $6 million to train. This figure seemed startlingly low compared to the more than $100 million that OpenAI said it spent training GPT-4, or the \u201cfew tens of millions\u201d that Anthropic spent training a recent version of its Claude model.DeepSeek\u2019s lower price tag was thanks to some big efficiency gains that the\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:23.195901",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.philschmid.de/deepseek-r1\nTitle: Bite: How Deepseek R1 was trained\nContent: reaching performance levels comparable to OpenAI-o1-0912 alongside output token length per problem increasing, indicating the model naturally learns to solve tasks with more thinking time/token generation. This has the drawback of leading to poor readability and language mixing but it was solved for R1 using a multi-stage approach with alternating SFT \u2192 RL steps. The Multi-Stage Training of DeepSeek R1 To prevent the early unstable cold start phase of reinforcement training (RL) training from the base model, the team started with supervised fine-tuning. Stage 1/4 Base to Supervised Fine-Tuning (SFT) Collected up to 10k token-long chain-of-thought (CoT) using the fine-tuned models, R1-zero and human annotator. The data is used to fine-tune Deepseek V3 base to improve readbility and coherence. Stage 2/4 RL for Reasoning Used the same RL pipeline as R1-Zero, focusing on reasoning-intensive tasks such as coding and math using the same Rule-Based Reward Models. This time, an additional\n\nSource: https://www.philschmid.de/deepseek-r1\nTitle: Bite: How Deepseek R1 was trained\nContent: from 15.6% to 71.0%, reaching performance levels comparable to OpenAI-o1-0912 alongside output token length per problem increasing, indicating the model naturally learns to solve tasks with more thinking time/token generation. This has the drawback of leading to poor readability and language mixing but it was solved for R1 using a multi-stage approach with alternating SFT \u2192 RL steps. The Multi-Stage Training of DeepSeek R1 To prevent the early unstable cold start phase of reinforcement training (RL) training from the base model, the team started with supervised fine-tuning. Stage 1/4 Base to Supervised Fine-Tuning (SFT) Collected up to 10k token-long chain-of-thought (CoT) using the fine-tuned models, R1-zero and human annotator. The data is used to fine-tune Deepseek V3 base to improve readbility and coherence. Stage 2/4 RL for Reasoning Used the same RL pipeline as R1-Zero, focusing on reasoning-intensive tasks such as coding and math using the same Rule-Based Reward Models. This\n\nSource: https://www.philschmid.de/deepseek-r1\nTitle: Bite: How Deepseek R1 was trained\nContent: to OpenAI-o1-0912 alongside output token length per problem increasing, indicating the model naturally learns to solve tasks with more thinking time/token generation. This has the drawback of leading to poor readability and language mixing but it was solved for R1 using a multi-stage approach with alternating SFT \u2192 RL steps. The Multi-Stage Training of DeepSeek R1 To prevent the early unstable cold start phase of reinforcement training (RL) training from the base model, the team started with supervised fine-tuning. Stage 1/4 Base to Supervised Fine-Tuning (SFT) Collected up to 10k token-long chain-of-thought (CoT) using the fine-tuned models, R1-zero and human annotator. The data is used to fine-tune Deepseek V3 base to improve readbility and coherence. Stage 2/4 RL for Reasoning Used the same RL pipeline as R1-Zero, focusing on reasoning-intensive tasks such as coding and math using the same Rule-Based Reward Models. This time, an additional reward for \"language consistency\" is used\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n\nSource: https://teqnoverse.medium.com/deepseek-r1-its-all-about-architecture-and-training-approach-50af74c223b8\nTitle: DeepSeek R1: It\u2019s All About Architecture and Training Approach | by TeqnoVerse | Jan, 2025 | Medium\nContent: behavior by leveraging a diverse set of rewards. This includes reasoning-specific rewards for tasks with clear rules, like math or coding, and human preference rewards to align the model with values such as helpfulness and harmlessness. Additionally, the model is trained on a wide variety of prompts to enhance its generalizabilityThis multi-stage process allows DeepSeek R1 to leverage the power of both supervised learning and reinforcement learning, resulting in a model with strong reasoning capabilities, improved readability, and better alignment with human preferences.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:31.465042",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://toloka.ai/blog/knowledge-distillation/\nTitle: Knowledge distillation: a way to make a large model more efficient and accessible\nContent: large language models (LLMs), knowledge distillation has facilitated the transfer of more abstract characteristics, such as the model's style, reasoning capabilities, and alignment with human preferences and values. Knowledge distillation techniques go beyond simply copying the outputs of teacher models; they strive to mimic the underlying \"thought processes\" of these models.Knowledge distillation represents one of the most effective ways to reduce the size of a model as well as its processing speed. As a result of model compression, the intricate and large deep neural network is condensed into a smaller and more simplified one, while preserving the accuracy and performance of the initial model.A scaled-down model, trained to replicate the behavior of a heavy and accurate teacher model, achieves similar results to the teacher model, significantly benefiting in terms of size and speed due to its simplified architecture. The student model may even outperform the teacher in some cases,\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Open in appSign upSign inWriteSign upSign inExploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Open in appSign upSign inWriteSign upSign inExploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Open in appSign upSign inWriteSign upSign inExploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n\nSource: https://medium.com/@jenrola_odun/exploring-knowledge-distillation-in-large-language-models-9d9be2bff669\nTitle: Exploring Knowledge Distillation in Large Language Models | by Odunola Jenrola | Medium\nContent: Exploring Knowledge Distillation in Large Language ModelsOdunola Jenrola\u00b7Follow10 min read\u00b7Nov 15, 2023--1ListenSharePhoto by Growtika on UnsplashAI companies continue to scale up language models , yet practical deployment of these large models remains a formidable challenge. In this article, we delve into an invaluable technique to address this issue: Knowledge distillation.Knowledge distillation enables the transfer of expertise from a complex, large model, known as the \u201cteacher,\u201d to a more compact, lightweight model, the \u201cstudent.\u201d This is now a cornerstone in AI as it not only eases the computational burden of deploying complex models but also facilitates their application in real-world scenarios especially in resource constrained environment like edge devices and CPU-only environments.In this article, Our goal is to distill a large BERT transformer model I already finetuned for text classification. Our student model is a more compact model also based on BERT but having fewer\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:31.481780",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.01993298",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:04:31.506303",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T11:05:21.187079",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'What is distillation in LLM and how did deepseek r1 was trained on the same and why the current panic in AI industry led by deepseek r1 training cost is flawed'",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "content": "selected_images",
+ "output": "-distillation/](https://toloka.ai/blog/knowledge-distillation/)",
+ "metadata": [
+ "https://venturebeat.com/wp-content/uploads/2025/01/Screenshot-2025-01-25-at-6.06.56%E2%80%AFPM.png?w=800",
+ "https://cdn.arstechnica.net/wp-content/uploads/2025/01/chinese_ai_flag_2-1152x648.jpg",
+ "https://cdn.arstechnica.net/wp-content/uploads/2025/01/yann_post_screenshot.jpg",
+ "https://unfoldai.com/storage/2025/01/lm-studio-deepseek-r1.jpg",
+ "https://unfoldai.com/storage/2025/01/DeepSeek-R1-performance.jpg",
+ "https://unfoldai.com/storage/2025/01/distill-models-deepseek-r1-performance.jpg",
+ "https://substackcdn.com/image/fetch/w_1456,c_limit,f_auto,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2Fe48af6fa-8956-44b0-84cf-915e607f3b5e_1546x884.png"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738581375_Whats the latest happening news in US economy and related to Banking sector.json b/outputs/task_1738581375_Whats the latest happening news in US economy and related to Banking sector.json
new file mode 100644
index 0000000000000000000000000000000000000000..5be33c2eabecb8ba6cb37552d56cee447a001030
--- /dev/null
+++ b/outputs/task_1738581375_Whats the latest happening news in US economy and related to Banking sector.json
@@ -0,0 +1,747 @@
+{
+ "timestamp": "2025-02-03T16:46:15.019625",
+ "events": [
+ {
+ "timestamp": "2025-02-03T16:46:17.567796",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:17.585853",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcf0 News Analyst Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:17.595551",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Whats the latest happening news in US economy and related to Banking sector ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:21.801050",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.047794",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['US economy news banking sector February 2025', 'Latest developments US banking sector interest rates February 2025', 'Impact of 2024 banking crisis on current US economy', 'Current state of major US banks financial performance', 'Whats the latest happening news in US economy and related to Banking sector ']...",
+ "metadata": [
+ "US economy news banking sector February 2025",
+ "Latest developments US banking sector interest rates February 2025",
+ "Impact of 2024 banking crisis on current US economy",
+ "Current state of major US banks financial performance",
+ "Whats the latest happening news in US economy and related to Banking sector "
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.067248",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'US economy news banking sector February 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.087881",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Latest developments US banking sector interest rates February 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.097897",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Impact of 2024 banking crisis on current US economy'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.106697",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Current state of major US banks financial performance'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:25.115705",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.540130",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.brooklinebank.com/2025/02/01/economic-news-february-2025/\n",
+ "metadata": "https://www.brooklinebank.com/2025/02/01/economic-news-february-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.554855",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.atlantafed.org/news/conferences-and-events/conferences/2025/02/27/banking-outlook-conference\n",
+ "metadata": "https://www.atlantafed.org/news/conferences-and-events/conferences/2025/02/27/banking-outlook-conference"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.563414",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fitchratings.com/research/banks/us-banks-outlook-2025-03-12-2024\n",
+ "metadata": "https://www.fitchratings.com/research/banks/us-banks-outlook-2025-03-12-2024"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.572293",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\n",
+ "metadata": "https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.581103",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.spglobal.com/ratings/en/research/articles/250115-u-s-bank-outlook-2025-published-entering-a-new-phase-under-a-new-administration-13385413\n",
+ "metadata": "https://www.spglobal.com/ratings/en/research/articles/250115-u-s-bank-outlook-2025-published-entering-a-new-phase-under-a-new-administration-13385413"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.590873",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:27.600209",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:28.697392",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 4 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:28.706773",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://www.atlantafed.org/-/media/Images/news/conferences-and-events/conferences/2025/02/banking-outlook-conference/banner.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:28.716293",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:28.725978",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: US economy news banking sector February 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.070512",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.usbank.com/dam/en/documents/pdfs/wealth-management/situation-analysis-01-29-2025.pdf\n",
+ "metadata": "https://www.usbank.com/dam/en/documents/pdfs/wealth-management/situation-analysis-01-29-2025.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.170893",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investopedia.com/interest-rates-outlook-2025-federal-reserve-mortgages-car-loans-credit-cards-8764416\n",
+ "metadata": "https://www.investopedia.com/interest-rates-outlook-2025-federal-reserve-mortgages-car-loans-credit-cards-8764416"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.181539",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.businessinsider.com/interest-rates-federal-reserve-holds-steady-powell-trump-inflation-tariffs-2025-1?op=1\n",
+ "metadata": "https://www.businessinsider.com/interest-rates-federal-reserve-holds-steady-powell-trump-inflation-tariffs-2025-1?op=1"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.191051",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\n",
+ "metadata": "https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.201211",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:29.210271",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.216019",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.226956",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 2 total images",
+ "metadata": [
+ "https://www.investopedia.com/thmb/ZJ6oPtO2c4wyQBCT403FEfqXY50=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/FedMeeting-0d70f08c723e4f84a28dc3acc86009d0.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.238107",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.247687",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Latest developments US banking sector interest rates February 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.297896",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\n",
+ "metadata": "https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.307812",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.spglobal.com/_assets/documents/ratings/research/101591972.pdf\n",
+ "metadata": "https://www.spglobal.com/_assets/documents/ratings/research/101591972.pdf"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.318795",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.reuters.com/business/finance/us-federal-reserve-releases-scenarios-2024-bank-stress-tests-2024-02-15/\n",
+ "metadata": "https://www.reuters.com/business/finance/us-federal-reserve-releases-scenarios-2024-bank-stress-tests-2024-02-15/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.329556",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://crsreports.congress.gov/product/pdf/IF/IF12649\n",
+ "metadata": "https://crsreports.congress.gov/product/pdf/IF/IF12649"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.339261",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://libertystreeteconomics.newyorkfed.org/2024/11/banking-system-vulnerability-2024-update/\n",
+ "metadata": "https://libertystreeteconomics.newyorkfed.org/2024/11/banking-system-vulnerability-2024-update/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.349403",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:31.359185",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.459678",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.470672",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 5 total images",
+ "metadata": [
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch1.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch2.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch3.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch4.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.480187",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.491176",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Impact of 2024 banking crisis on current US economy...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.579227",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.fitchratings.com/research/banks/us-banks-4q24-results-reflect-positive-trends-24-01-2025\n",
+ "metadata": "https://www.fitchratings.com/research/banks/us-banks-4q24-results-reflect-positive-trends-24-01-2025"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.588920",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\n",
+ "metadata": "https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.599117",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.newyorkfed.org/medialibrary/media/research/banking_research/quarterlytrends2022q1.pdf?la=en\n",
+ "metadata": "https://www.newyorkfed.org/medialibrary/media/research/banking_research/quarterlytrends2022q1.pdf?la=en"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.608812",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.stlouisfed.org/on-the-economy/2024/dec/strong-earnings-asset-quality-put-us-banks-solid-ground\n",
+ "metadata": "https://www.stlouisfed.org/on-the-economy/2024/dec/strong-earnings-asset-quality-put-us-banks-solid-ground"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.620232",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:32.630664",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 4 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.140444",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 1 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.150712",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 0 new images from 0 total images",
+ "metadata": []
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.165285",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.178612",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Current state of major US banks financial performance...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.256912",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.wsj.com/finance/banking\n",
+ "metadata": "https://www.wsj.com/finance/banking"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.267449",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\n",
+ "metadata": "https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.277873",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.ft.com/us-banks\n",
+ "metadata": "https://www.ft.com/us-banks"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.287340",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\n",
+ "metadata": "https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.298882",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.business-standard.com/topic/us-banks\n",
+ "metadata": "https://www.business-standard.com/topic/us-banks"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.309387",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:38.318907",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:39.926879",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:39.937487",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 16 total images",
+ "metadata": [
+ "https://media.cnn.com/api/v1/images/stellar/prod/e26ddeda-68ff-4213-9d35-d8d4d161bc8a.jpg?q=h_1996,w_3000,x_0,y_0/w_1280",
+ "https://media.cnn.com/api/v1/images/stellar/prod/230322171919-the-lead-richard-quest-00002603.png?c=16x9&q=w_1280,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/65daac3c-c7b8-4fbe-aa65-7b70b8a87edf.jpg?q=h_1067,w_1600,x_0,y_0/w_1280",
+ "https://media.cnn.com/api/v1/images/stellar/prod/bc55cceb-a53b-497a-af43-ec9223279381.jpg?q=h_1067,w_1600,x_0,y_0/w_1280"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:39.947726",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:39.958461",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Whats the latest happening news in US economy and related to Banking sector ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:41.480794",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.businessinsider.com/interest-rates-federal-reserve-holds-steady-powell-trump-inflation-tariffs-2025-1?op=1\nTitle: Fed Holds Interest Rates Steady in First Decision of 2025 - Business Insider\nContent: The Fed holds interest rates steady in its first decision of the year\nAyelet Sheffey 2025-01-29T19:00:29Z\nAyelet Sheffey 2025-01-29T19:00:29Z\nAyelet Sheffey 2025-01-29T19:00:29Z\nAyelet Sheffey 2025-01-29T19:00:29Z\nShare icon An curved arrow pointing right. Share Facebook Email X LinkedIn Copy Link lighning bolt icon An icon in the shape of a lightning bolt. Impact Link\nFacebook Email X LinkedIn Copy Link lighning bolt icon An icon in the shape of a lightning bolt. Impact Link\nlighning bolt icon An icon in the shape of a lightning bolt. Impact Link\nlighning bolt icon An icon in the shape of a lightning bolt. Impact Link\nlighning bolt icon An icon in the shape of a lightning bolt. Impact Link\nlighning bolt icon An icon in the shape of a lightning bolt.\nRead in app\nThe Federal Reserve held interest rates steady in its first decision of 2025. Alex Wong/Getty Images\nThe Federal Reserve held interest rates steady in its first decision of 2025.\nAlex Wong/Getty Images\nAlex Wong/Getty Images\n\nSource: https://www.businessinsider.com/interest-rates-federal-reserve-holds-steady-powell-trump-inflation-tariffs-2025-1?op=1\nTitle: Fed Holds Interest Rates Steady in First Decision of 2025 - Business Insider\nContent: Economy The Fed holds interest rates steady in its first decision of the year Ayelet Sheffey 2025-01-29T19:00:29Z Share icon An curved arrow pointing right. Share Facebook Email X LinkedIn Copy Link lighning bolt icon An icon in the shape of a lightning bolt. Impact Link Save Article Icon A bookmark Save Read in app The Federal Reserve held interest rates steady in its first decision of 2025. Alex Wong/Getty Images This story is available exclusively to Business Insider subscribers. Become an Insider and start reading now. Have an account? Log in. The Federal Reserve held interest rates steady on Wednesday.President Donald Trump has pressured the Fed to continue cutting rates.The Fed has two rate cuts penciled in for 2025, but Trump's trade policies could change its plans.The nation's central bank held interest rates steady in its first decision of the year. On Wednesday, the Federal Open Market Committee announced that it would maintain the Federal Reserve's target rate following a\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: The Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing might ease in 2025.The Fed's benchmark rate will stay in a range of 4.25% to 4.5%, keeping borrowing costs elevated in an effort to curb spending and bring down inflation.The central bank previously penciled in two 25-basis-point rate cuts as part of its projections for 2025, which would bring the benchmark rate to a range of 3.75% to 4% by year-end. However, the timing of these cuts has become less certain with year-over-year inflation ticking up from 2.5% to 2.9% since September, along with uncertainty about the inflationary effects of President Trump's proposed tariffs on foreign goods.A CNBC survey of economists published Tuesday shows that 65% think two rate cuts are expected, down from 78% in a previous survey conducted in December.Fed chair Jerome Powell said the central bank was entering a \"new phase\" where it would be \"cautious\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: The Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing might ease in 2025.The Fed's benchmark rate will stay in a range of 4.25% to 4.5%, keeping borrowing costs elevated in an effort to curb spending and bring down inflation.The central bank previously penciled in two 25-basis-point rate cuts as part of its projections for 2025, which would bring the benchmark rate to a range of 3.75% to 4% by year-end. However, the timing of these cuts has become less certain with year-over-year inflation ticking up from 2.5% to 2.9% since September, along with uncertainty about the inflationary effects of President Trump's proposed tariffs on foreign goods.A CNBC survey of economists published Tuesday shows that 65% think two rate cuts are expected, down from 78% in a previous survey conducted in December.Fed chair Jerome Powell said the central bank was entering a \"new phase\" where it would be \"cautious\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: The Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing might ease in 2025.\nThe Fed's benchmark rate will stay in a range of 4.25% to 4.5%, keeping borrowing costs elevated in an effort to curb spending and bring down inflation.\nThe central bank previously penciled in two 25-basis-point rate cuts as part of its projections for 2025, which would bring the benchmark rate to a range of 3.75% to 4% by year-end.\nHowever, the timing of these cuts has become less certain with year-over-year inflation ticking up from 2.5% to 2.9% since September, along with uncertainty about the inflationary effects of President Trump's proposed tariffs on foreign goods.\nA CNBC survey of economists published Tuesday shows that 65% think two rate cuts are expected, down from 78% in a previous survey conducted in December.\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: Related StoriesSave and InvestThe Fed cuts rates by another 25 basis points\u2014here\u2019s what just got cheaperSave and InvestThe Fed cut interest rates by another 25 basis points\u2014what will get cheaperSpendWhy mortgage rates keep climbing despite Fed interest rate cutsSpendWhy mortgage rates remain so high\u2014and what to expect in 2025SpendWhat's a 'tariff' again? And how they might end up raising prices for consumersSpendFed pauses interest rate cuts\u2014here\u2019s when borrowing costs might drop in 2025Published Wed, Jan 29 20252:05 PM ESTMike Winters@mike_wintrsShareShare Article via FacebookShare Article via TwitterShare Article via LinkedInShare Article via EmailUS Federal Reserve Chairman Jerome Powell speaks at a press conference after the Monetary Policy Committee meeting in Washington, DC, on December 18, 2024.Andrew Caballero-Reynolds | Getty ImagesThe Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: Related StoriesSave and InvestThe Fed cuts rates by another 25 basis points\u2014here\u2019s what just got cheaperSave and InvestThe Fed cut interest rates by another 25 basis points\u2014what will get cheaperSpendWhy mortgage rates keep climbing despite Fed interest rate cutsSpendWhy mortgage rates remain so high\u2014and what to expect in 2025SpendWhat's a 'tariff' again? And how they might end up raising prices for consumersSpendFed pauses interest rate cuts\u2014here\u2019s when borrowing costs might drop in 2025Published Wed, Jan 29 20252:05 PM ESTMike Winters@mike_wintrsShareShare Article via FacebookShare Article via TwitterShare Article via LinkedInShare Article via EmailUS Federal Reserve Chairman Jerome Powell speaks at a press conference after the Monetary Policy Committee meeting in Washington, DC, on December 18, 2024.Andrew Caballero-Reynolds | Getty ImagesThe Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: Related StoriesSave and InvestThe Fed cuts rates by another 25 basis points\u2014here\u2019s what just got cheaperSave and InvestThe Fed cut interest rates by another 25 basis points\u2014what will get cheaperSpendWhy mortgage rates keep climbing despite Fed interest rate cutsSpendWhy mortgage rates remain so high\u2014and what to expect in 2025SpendWhat's a 'tariff' again? And how they might end up raising prices for consumersSpendFed pauses interest rate cuts\u2014here\u2019s when borrowing costs might drop in 2025Published Wed, Jan 29 20252:05 PM ESTMike Winters@mike_wintrsShareShare Article via FacebookShare Article via TwitterShare Article via LinkedInShare Article via EmailUS Federal Reserve Chairman Jerome Powell speaks at a press conference after the Monetary Policy Committee meeting in Washington, DC, on December 18, 2024.Andrew Caballero-Reynolds | Getty ImagesThe Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: Related StoriesSave and InvestThe Fed cuts rates by another 25 basis points\u2014here\u2019s what just got cheaperSave and InvestThe Fed cut interest rates by another 25 basis points\u2014what will get cheaperSpendWhy mortgage rates keep climbing despite Fed interest rate cutsSpendWhy mortgage rates remain so high\u2014and what to expect in 2025SpendWhat's a 'tariff' again? And how they might end up raising prices for consumersSpendFed pauses interest rate cuts\u2014here\u2019s when borrowing costs might drop in 2025Published Wed, Jan 29 20252:05 PM ESTMike Winters@mike_wintrsShareShare Article via FacebookShare Article via TwitterShare Article via LinkedInShare Article via EmailUS Federal Reserve Chairman Jerome Powell speaks at a press conference after the Monetary Policy Committee meeting in Washington, DC, on December 18, 2024.Andrew Caballero-Reynolds | Getty ImagesThe Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing\n\nSource: https://www.cnbc.com/2025/01/29/fed-holds-on-interest-rates-when-borrowing-costs-could-drop.html\nTitle: Fed holds on interest rates\u2014when borrowing costs could drop in 2025\nContent: Article via LinkedInShare Article via EmailUS Federal Reserve Chairman Jerome Powell speaks at a press conference after the Monetary Policy Committee meeting in Washington, DC, on December 18, 2024.Andrew Caballero-Reynolds | Getty ImagesThe Federal Reserve left interest rates unchanged Wednesday amid uncertainty over when borrowing costs for loans, credit cards and auto financing might ease in 2025.The Fed's benchmark rate will stay in a range of 4.25% to 4.5%, keeping borrowing costs elevated in an effort to curb spending and bring down inflation.The central bank previously penciled in two 25-basis-point rate cuts as part of its projections for 2025, which would bring the benchmark rate to a range of 3.75% to 4% by year-end. However, the timing of these cuts has become less certain with year-over-year inflation ticking up from 2.5% to 2.9% since September, along with uncertainty about the inflationary effects of President Trump's proposed tariffs on foreign goods.A CNBC survey of\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:46.635156",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Why economists are warning of another US banking crisis Published: February 26, 2024 5.19pm GMT\nWhy economists are warning of another US banking crisis Published: February 26, 2024 5.19pm GMT\nWhy economists are warning of another US banking crisis\nWhy economists are warning of another US banking crisis\nWhy economists are warning of another US banking crisis\nPublished: February 26, 2024 5.19pm GMT\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Why economists are warning of another US banking crisis Published: February 26, 2024 5.19pm GMT Ru Xie, University of Bath Author Ru Xie Associate Professor in Finance, University of Bath Disclosure statement Ru Xie does not work for, consult, own shares in or receive funding from any company or organisation that would benefit from this article, and has disclosed no relevant affiliations beyond their academic appointment. Partners University of Bath provides funding as a member of The Conversation UK. View all partners Fed Chair Jay Powell is betting that banks can withstand the end of his rescue programme in March. EPA Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article March 2024 is making investors nervous. A major scheme to prop up the US banking system is ending, while a second may be winding down. Some economic commentators fear another\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Edition: Available editions Global Africa Australia Brasil Canada Canada (fran\u00e7ais) Espa\u00f1a Europe France Indonesia New Zealand United Kingdom United States Get newsletter Become an author Sign up as a reader Sign in Search Academic rigour, journalistic flair Why economists are warning of another US banking crisis Published: February 26, 2024 5.19pm GMT Ru Xie, University of Bath Author Ru Xie Associate Professor in Finance, University of Bath Disclosure statement Ru Xie does not work for, consult, own shares in or receive funding from any company or organisation that would benefit from this article, and has disclosed no relevant affiliations beyond their academic appointment. Partners University of Bath provides funding as a member of The Conversation UK. View all partners Fed Chair Jay Powell is betting that banks can withstand the end of his rescue programme in March. EPA Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Ru Xie, University of Bath Author Ru Xie Associate Professor in Finance, University of Bath Disclosure statement Ru Xie does not work for, consult, own shares in or receive funding from any company or organisation that would benefit from this article, and has disclosed no relevant affiliations beyond their academic appointment. Partners University of Bath provides funding as a member of The Conversation UK. View all partners Fed Chair Jay Powell is betting that banks can withstand the end of his rescue programme in March. EPA Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article March 2024 is making investors nervous. A major scheme to prop up the US banking system is ending, while a second may be winding down. Some economic commentators fear another banking crisis. So how worried should we be? The red letter day is March 11, when US central bank the\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article\nhttps://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article\nhttps://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: Fed Chair Jay Powell is betting that banks can withstand the end of his rescue programme in March. EPA Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article March 2024 is making investors nervous. A major scheme to prop up the US banking system is ending, while a second may be winding down. Some economic commentators fear another banking crisis. So how worried should we be? The red letter day is March 11, when US central bank the Federal Reserve will end the bank term funding program (BTFP), a year after it began in response to the failures of regional banks Signature, Silvergate and Silicon Valley. These banks were brought down by customers withdrawing deposits en masse, both because many were tech or crypto businesses that needed money to cover losses, and because there were better savings rates available elsewhere. This damaged the banks\u2019\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: programme in March. EPA Copy link Email X (Twitter) Bluesky Facebook LinkedIn WhatsApp Messenger https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092 Link copied Share article March 2024 is making investors nervous. A major scheme to prop up the US banking system is ending, while a second may be winding down. Some economic commentators fear another banking crisis. So how worried should we be? The red letter day is March 11, when US central bank the Federal Reserve will end the bank term funding program (BTFP), a year after it began in response to the failures of regional banks Signature, Silvergate and Silicon Valley. These banks were brought down by customers withdrawing deposits en masse, both because many were tech or crypto businesses that needed money to cover losses, and because there were better savings rates available elsewhere. This damaged the banks\u2019 profitability at a time when raised interest rates had already weakened their balance\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: March 2024 is making investors nervous. A major scheme to prop up the US banking system is ending, while a second may be winding down. Some economic commentators fear another banking crisis. So how worried should we be?\nThe red letter day is March 11, when US central bank the Federal Reserve will end the bank term funding program (BTFP), a year after it began in response to the failures of regional banks Signature, Silvergate and Silicon Valley. These banks were brought down by customers withdrawing deposits en masse, both because many were tech or crypto businesses that needed money to cover losses, and because there were better savings rates available elsewhere.\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: that programme. Nonetheless, the transition could be bumpy, with banks potentially raising lending rates and becoming less willing to lend. Many analysts expect the buffer to disappear in 2024, with a range of predictions from late in the year to as soon as March. Risky times Heightened interest rates have already led to the most stringent credit standards and weakest loan demand from consumers and businesses in a long time in the US. Meanwhile, banks are dealing with other major challenges such as the plunge in demand for office space as a result of home working. This has brought the medium-sized New York Community Bank to the brink in recent weeks, for instance. The closure of the BTFP and the end of the reverse repo buffer, particularly if they coincide, could clearly make banks even more risk averse and profit-hungry. The danger is that this all damages the economy to the point that bad debts pile up and we hit another 2008-style liquidity crisis where banks become wary of lending\n\nSource: https://theconversation.com/why-economists-are-warning-of-another-us-banking-crisis-224092\nTitle: Why economists are warning of another US banking crisis\nContent: will the system feel the full effect of QT. At this stage, the Fed has indicated it will slow and then end that programme. Nonetheless, the transition could be bumpy, with banks potentially raising lending rates and becoming less willing to lend. Many analysts expect the buffer to disappear in 2024, with a range of predictions from late in the year to as soon as March. Risky times Heightened interest rates have already led to the most stringent credit standards and weakest loan demand from consumers and businesses in a long time in the US. Meanwhile, banks are dealing with other major challenges such as the plunge in demand for office space as a result of home working. This has brought the medium-sized New York Community Bank to the brink in recent weeks, for instance. The closure of the BTFP and the end of the reverse repo buffer, particularly if they coincide, could clearly make banks even more risk averse and profit-hungry. The danger is that this all damages the economy to the\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:46:52.045184",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: banks, Goldman Sachs shares finished 2024 48% higher, while JPMorgan enjoyed a 41% gain and Wells Fargo shares climbed 43%.JPMorgan reported Wednesday that its interest income fell 3% to $23.5 billion, thanks to a downtick in interest rates.CEO Jamie Dimon said the bank got a boost from investment banking business, where fees rose 49% and markets revenue jumped 21%. The bank\u2019s consumer banking business also thrived, with clients opening nearly 2 million checking accounts.The New York bank set aside $2.6 billion to cover bad loans, down slightly from the same period a year ago.Dimon said the U.S. economy remains strong, noting the nation\u2019s low unemployment and strong consumer spending.\u201cBusinesses are more optimistic about the economy, and they are encouraged by expectations for a more pro-growth agenda and improved collaboration between government and business,\u201d said, alluding to the incoming Trump administration which has promised to cut regulations across industries. Dimon said that\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: Business JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024\nBusiness JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024\nJPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: Citigroup, Wells Fargo and Goldman Sachs also issued strong results on Wednesday.\nThe country\u2019s biggest banks have benefitted from higher interest rates for the last two years, when the Federal Reserve jacked up rates to combat the inflation that took root in the wake of the COVID-19 pandemic.\nThe government\u2019s latest consumer prices report, also issued Wednesday, showed that prices for many essentials rose, pushing the consumer price index to 2.9% in December, the highest it has been since July. But underlying inflation trends \u2014 watched closely by the Fed \u2014 slowed to 3.2% in December, better than analysts expected and a good sign for consumers and the broader economy.\nprices for many essentials rose\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: Business JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 Pedestrians approach JP Morgan Chase headquarters, Wednesday, Dec. 29, 2023, in New York. (AP Photo/Peter Morgan, File) By MATT OTT Share Share Copy Link copied Email Facebook X Reddit LinkedIn Pinterest Flipboard Print WASHINGTON (AP) \u2014 JPMorgan\u2019s net income soared 50% to more than $14 billion in the fourth quarter as the bank\u2019s profit and revenue easily beat Wall Street forecasts, and other major banks reported banner earnings for the year as businesses and consumers continued to spend despite elevated interest rates. JPMorgan\u2019s earnings per share rose to $4.81 from $3.04 a year ago. The result beat Wall Street profit projections of $4.09 a share, according to the data firm FactSet. Total managed revenue hit $43.7 billion, up 10%, from $39.9 billion a year ago. Wall Street was expecting revenue of $41.9 billion.JPMorgan posted a record $54 billion profit for the year, or $18.22 per\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: By MATT OTT Share Share Copy Link copied Email Facebook X Reddit LinkedIn Pinterest Flipboard Print WASHINGTON (AP) \u2014 JPMorgan\u2019s net income soared 50% to more than $14 billion in the fourth quarter as the bank\u2019s profit and revenue easily beat Wall Street forecasts, and other major banks reported banner earnings for the year as businesses and consumers continued to spend despite elevated interest rates. JPMorgan\u2019s earnings per share rose to $4.81 from $3.04 a year ago. The result beat Wall Street profit projections of $4.09 a share, according to the data firm FactSet. Total managed revenue hit $43.7 billion, up 10%, from $39.9 billion a year ago. Wall Street was expecting revenue of $41.9 billion.JPMorgan posted a record $54 billion profit for the year, or $18.22 per share, adjusted for one-time expenses. JPMorgan shares rose just less than 1% in morning trading.Citigroup, Wells Fargo and Goldman Sachs also issued strong results on Wednesday. The country\u2019s biggest banks have benefitted\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: last year, the Nasdaq climbed more than 28% and the Dow finished up nearly 13%. As for the banks, Goldman Sachs shares finished 2024 48% higher, while JPMorgan enjoyed a 41% gain and Wells Fargo shares climbed 43%.JPMorgan reported Wednesday that its interest income fell 3% to $23.5 billion, thanks to a downtick in interest rates.CEO Jamie Dimon said the bank got a boost from investment banking business, where fees rose 49% and markets revenue jumped 21%. The bank\u2019s consumer banking business also thrived, with clients opening nearly 2 million checking accounts.The New York bank set aside $2.6 billion to cover bad loans, down slightly from the same period a year ago.Dimon said the U.S. economy remains strong, noting the nation\u2019s low unemployment and strong consumer spending.\u201cBusinesses are more optimistic about the economy, and they are encouraged by expectations for a more pro-growth agenda and improved collaboration between government and business,\u201d said, alluding to the incoming\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: WASHINGTON (AP) \u2014 JPMorgan\u2019s net income soared 50% to more than $14 billion in the fourth quarter as the bank\u2019s profit and revenue easily beat Wall Street forecasts, and other major banks reported banner earnings for the year as businesses and consumers continued to spend despite elevated interest rates. JPMorgan\u2019s earnings per share rose to $4.81 from $3.04 a year ago. The result beat Wall Street profit projections of $4.09 a share, according to the data firm FactSet. Total managed revenue hit $43.7 billion, up 10%, from $39.9 billion a year ago. Wall Street was expecting revenue of $41.9 billion.JPMorgan posted a record $54 billion profit for the year, or $18.22 per share, adjusted for one-time expenses. JPMorgan shares rose just less than 1% in morning trading.Citigroup, Wells Fargo and Goldman Sachs also issued strong results on Wednesday. The country\u2019s biggest banks have benefitted from higher interest rates for the last two years, when the Federal Reserve jacked up rates to\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: rates.CEO Jamie Dimon said the bank got a boost from investment banking business, where fees rose 49% and markets revenue jumped 21%. The bank\u2019s consumer banking business also thrived, with clients opening nearly 2 million checking accounts.The New York bank set aside $2.6 billion to cover bad loans, down slightly from the same period a year ago.Dimon said the U.S. economy remains strong, noting the nation\u2019s low unemployment and strong consumer spending.\u201cBusinesses are more optimistic about the economy, and they are encouraged by expectations for a more pro-growth agenda and improved collaboration between government and business,\u201d said, alluding to the incoming Trump administration which has promised to cut regulations across industries. Dimon said that any regulation should balance promoting growth and keeping the banking system safe. \u201cThis is not about weakening regulation ... but rather about setting rules that are transparent, fair and holistic in their approach and based on\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: Wednesday that its interest income fell 3% to $23.5 billion, thanks to a downtick in interest rates.CEO Jamie Dimon said the bank got a boost from investment banking business, where fees rose 49% and markets revenue jumped 21%. The bank\u2019s consumer banking business also thrived, with clients opening nearly 2 million checking accounts.The New York bank set aside $2.6 billion to cover bad loans, down slightly from the same period a year ago.Dimon said the U.S. economy remains strong, noting the nation\u2019s low unemployment and strong consumer spending.\u201cBusinesses are more optimistic about the economy, and they are encouraged by expectations for a more pro-growth agenda and improved collaboration between government and business,\u201d said, alluding to the incoming Trump administration which has promised to cut regulations across industries. Dimon said that any regulation should balance promoting growth and keeping the banking system safe. \u201cThis is not about weakening regulation ... but rather\n\nSource: https://apnews.com/article/jpmorgan-chase-bank-earnings-profit-38c5a832fdb4503d8483d6115b4a8ee1\nTitle: JPMorgan posts record annual profits as major US banks thrive in the final quarter of 2024 | AP News\nContent: as other US companies scale theirs back That, combined with the strong bank earnings, boosted markets, with the S&P 500 and Dow Jones Industrials each climbing 1.7% and the technology-heavy Nasdaq gaining 2.2%. As great as 2024 was for markets, bank stocks did even better, despite the Federal Reserve trimming its benchmark interest rate three times between September and December.When it issued its last cut in December, the Fed also trimmed its forecast for 2025 rate cuts to two from four as inflation remained stubbornly above the Fed\u2019s 2% target. That sent markets into a mini-slump, but not enough to dampen what was a spectacular 2024 run. The S&P gained 23% last year, the Nasdaq climbed more than 28% and the Dow finished up nearly 13%. As for the banks, Goldman Sachs shares finished 2024 48% higher, while JPMorgan enjoyed a 41% gain and Wells Fargo shares climbed 43%.JPMorgan reported Wednesday that its interest income fell 3% to $23.5 billion, thanks to a downtick in interest\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:47:02.163460",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\nTitle: What\u2019s really going on with bank stocks | CNN Business\nContent: Markets DOW S&P 500 NASDAQ Hot Stocks Fear & Greed Index ----- is driving the US market Latest Market News Trump\u2019s under-the-radar Alaska order has environmentalists on edge Stock markets slide around the world as Trump\u2019s new trade war rattles investor confidence Target was one of the most outspoken supporters of DEI. It\u2019s changed its tune Hot Stocks ----- is driving the US market Something isn't loading properly. Please check back later. Ad Feedback Ad Feedback Business / Economy What\u2019s really going on with bank stocks By Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied! The New York Community Bancorp shares plunged by nearly 50% over two days after reporting a surprise loss tied to deteriorating credit quality and a cut to its dividend. Bing Guan/Bloomberg/Getty Images New York CNN \u2014 On Wednesday, the Federal Reserve ditched a line it used in every meeting statement since three banks failed last spring, which said that the \u201cUS banking\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: discussed current conditions in the banking sector and noted that while some institutions have come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: discussed current conditions in the banking sector and noted that while some institutions have come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan Chase & CoUS banks in \u2018go-mode\u2019 under Trump, says JPMorgan executiveWall Street bets that new administration\u2019s lighter-touch regulatory regime will spur dealmaking Save Friday, 17 January, 2025 US equitiesUS stocks post best week since Donald Trump\u2019s election winEasing underlying US inflation pressures and strong bank earnings \u2018emboldened the bulls\u2019 on Wall Street Save Friday, 17 January, 2025 On Wall StreetCraig CobenHow the bonus season unfoldsThe past dramas of \u2018comp days\u2019 have given way to more sanitised procedures Save Friday, 17 January, 2025 Biggest US banks notch up $142bn in profits in blockbuster 2024Robust performance in trading and\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: Latest on global markets and banking crisis By Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023 Link Copied!\nLatest on global markets and banking crisis\nLatest on global markets and banking crisis\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023 Link Copied!\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023\nBy Krystal Hur and Nicole Goodkind, CNN Business\nBy Krystal Hur and Nicole Goodkind, CNN Business\nUpdated 5:59 PM EDT, Fri March 24, 2023\nUpdated 5:59 PM EDT, Fri March 24, 2023\nVideo Ad Feedback Richard Quest explains what the Federal Reserve's latest rate hike decision means for consumers 01:21 - Source: CNN\nVideo Ad Feedback Richard Quest explains what the Federal Reserve's latest rate hike decision means for consumers 01:21 - Source: CNN\nVideo Ad Feedback\nVideo Ad Feedback\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: US banks Add to myFT Friday, 31 January, 2025 Goldman SachsPutin allows Goldman Sachs to quit RussiaSale to Armenian investment fund comes almost three years after US investment bank pledged to leave Save Monday, 27 January, 2025 Citigroup IncCiti loses head of private banking unitIda Liu was one of few senior female executives below chief executive Jane Fraser Save Thursday, 23 January, 2025 JPMorgan Chase & CoJPMorgan lifts chief executive Dimon\u2019s pay by 8% to $39mnWall Street bank says increase reflects his \u2018stewardship of the firm\u2019 Save Thursday, 23 January, 2025 LexRegional US bank stocks are still in the doghouse Premium contentExposure to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: US banks Add to myFT Friday, 31 January, 2025 Goldman SachsPutin allows Goldman Sachs to quit RussiaSale to Armenian investment fund comes almost three years after US investment bank pledged to leave Save Monday, 27 January, 2025 Citigroup IncCiti loses head of private banking unitIda Liu was one of few senior female executives below chief executive Jane Fraser Save Thursday, 23 January, 2025 JPMorgan Chase & CoJPMorgan lifts chief executive Dimon\u2019s pay by 8% to $39mnWall Street bank says increase reflects his \u2018stewardship of the firm\u2019 Save Thursday, 23 January, 2025 LexRegional US bank stocks are still in the doghouse Premium contentExposure to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the state of the financial sector. All three major indexes rose to end the week. The Dow Jones\n\nSource: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\nTitle: What\u2019s really going on with bank stocks | CNN Business\nContent: Hot Stocks ----- is driving the US market\n----- is driving the US market\nSomething isn't loading properly. Please check back later.\nSomething isn't loading properly. Please check back later.\nBusiness / Economy\nWhat\u2019s really going on with bank stocks By Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied!\nWhat\u2019s really going on with bank stocks\nWhat\u2019s really going on with bank stocks\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied!\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024\nBy Elisabeth Buchwald, CNN\nBy Elisabeth Buchwald, CNN\n3 minute read Updated 8:15 AM EST, Fri February 2, 2024\n3 minute read\nUpdated 8:15 AM EST, Fri February 2, 2024\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied!\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:47:33.861687",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate,\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate,\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025. Financial services industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully.\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025. Financial services industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully.\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects.\nThe US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: How\u2014and to what extent\u2014will macroeconomic shifts impact US banks in 2025? Key messages: Macroeconomic and geopolitical uncertainties should keep bank executives on their toes.Higher deposit costs will keep net interest income in check.Noninterest income could offer a bright spot for topline growth.Higher compensation expenses and technology investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025.\nHow\u2014and to what extent\u2014will macroeconomic shifts impact US banks in 2025? Key messages: Macroeconomic and geopolitical uncertainties should keep bank executives on their toes.Higher deposit costs will keep net interest income in check.Noninterest income could offer a bright spot for topline growth.Higher compensation expenses and technology investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:47:33.878122",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.018781680000000002",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:47:33.901465",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "images",
+ "content": "selected_images",
+ "output": "[\"https://www.atlantafed.org/-/media/Images/news/conferences-and-events/conferences/2025/02/banking-outlook-conference/banner.png\", \"https://www.investopedia.com/thmb/ZJ6oPtO2c4wyQBCT403FEfqXY50=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/FedMeeting-0d70f08c723e4f84a28dc3acc86009d0.jpg\", \"https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch1.png\", \"https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch2.png\", \"https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch3.png\", \"https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch4.png\", \"https://media.cnn.com/api/v1/images/stellar/prod/e26ddeda-68ff-4213-9d35-d8d4d161bc8a.jpg?q=h_1996,w_3000,x_0,y_0/w_1280\", \"https://media.cnn.com/api/v1/images/stellar/prod/230322171919-the-lead-richard-quest-00002603.png?c=16x9&q=w_1280,c_fill\", \"https://media.cnn.com/api/v1/images/stellar/prod/65daac3c-c7b8-4fbe-aa65-7b70b8a87edf.jpg?q=h_1067,w_1600,x_0,y_0/w_1280\", \"https://media.cnn.com/api/v1/images/stellar/prod/bc55cceb-a53b-497a-af43-ec9223279381.jpg?q=h_1067,w_1600,x_0,y_0/w_1280\"]",
+ "metadata": [
+ "https://www.atlantafed.org/-/media/Images/news/conferences-and-events/conferences/2025/02/banking-outlook-conference/banner.png",
+ "https://www.investopedia.com/thmb/ZJ6oPtO2c4wyQBCT403FEfqXY50=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/FedMeeting-0d70f08c723e4f84a28dc3acc86009d0.jpg",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch1.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch2.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch3.png",
+ "https://libertystreeteconomics.newyorkfed.org/wp-content/uploads/sites/2/2024/11/LSE_2024_fiva_crosignani_ch4.png",
+ "https://media.cnn.com/api/v1/images/stellar/prod/e26ddeda-68ff-4213-9d35-d8d4d161bc8a.jpg?q=h_1996,w_3000,x_0,y_0/w_1280",
+ "https://media.cnn.com/api/v1/images/stellar/prod/230322171919-the-lead-richard-quest-00002603.png?c=16x9&q=w_1280,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/65daac3c-c7b8-4fbe-aa65-7b70b8a87edf.jpg?q=h_1067,w_1600,x_0,y_0/w_1280",
+ "https://media.cnn.com/api/v1/images/stellar/prod/bc55cceb-a53b-497a-af43-ec9223279381.jpg?q=h_1067,w_1600,x_0,y_0/w_1280"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/outputs/task_1738582091_Whats the latest happening news in US econom.docx b/outputs/task_1738582091_Whats the latest happening news in US econom.docx
new file mode 100644
index 0000000000000000000000000000000000000000..baa2b410d3fc2c8517efb704253690e06e6d9915
Binary files /dev/null and b/outputs/task_1738582091_Whats the latest happening news in US econom.docx differ
diff --git a/outputs/task_1738582091_Whats the latest happening news in US econom.md b/outputs/task_1738582091_Whats the latest happening news in US econom.md
new file mode 100644
index 0000000000000000000000000000000000000000..b720de4c9240f481e556bea107d8cea5ee6b4012
--- /dev/null
+++ b/outputs/task_1738582091_Whats the latest happening news in US econom.md
@@ -0,0 +1,44 @@
+## The State of the US Banking Sector and Economy in Early 2025: A Critical Analysis
+
+The US banking sector enters 2025 navigating a complex and uncertain landscape. While 2024 concluded with better-than-anticipated economic performance, the outlook for 2025 presents a mix of challenges and opportunities. This report analyzes the current state of the US economy and banking sector, drawing upon a range of reputable sources to provide a comprehensive overview.
+
+**Economic Outlook: A Soft Landing with Lingering Uncertainties**
+
+Deloitte projects a "soft landing" for the US economy in 2025, with GDP growth estimated at 1.5% in their baseline scenario ([Deloitte, 2025](https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html)). This deceleration from the estimated 2.7% GDP growth in 2024 signals a potential cooling of economic activity. Moderating consumer spending, a rising unemployment rate, and weak business investment are identified as contributing factors to this slowdown ([Deloitte, 2025](https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html)). While inflation appears to be subsiding and interest rates are dropping, these positive trends are tempered by subpar economic growth, ongoing geopolitical shocks, and regulatory uncertainty ([Deloitte, 2025](https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html)). This confluence of factors creates a challenging environment for bank CEOs, who face the difficult task of adapting to a low-growth, low-rate environment.
+
+**The Federal Reserve's Balancing Act**
+
+The Federal Reserve's monetary policy plays a crucial role in shaping the economic and banking landscape. Forbes anticipates interest rate cuts in 2025, albeit at a "relatively slow rate," with two cuts being the most likely scenario ([Moore, 2025](https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/)). The second half of the year presents greater uncertainty, as rising unemployment or disinflation could prompt more aggressive rate reductions. The Fed's actions are contingent upon several closely watched economic variables. Unemployment, currently at 4.2%, is expected to rise at a measured pace. Any abrupt increase could trigger more aggressive rate cuts ([Moore, 2025](https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/)). Inflation, while projected to remain above the Fed's 2% target, is not expected to accelerate significantly. A return to or below the 2% target could also lead to more substantial rate cuts ([Moore, 2025](https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/)). Investopedia highlights the uncertainty surrounding the Fed's plans, noting that officials are closely monitoring the potential impact of President Trump's policies, particularly his proposed tariffs, on the economy and inflation ([Investopedia, 2025](https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271)). The potential for conflict between President Trump and Fed Chair Jerome Powell adds another layer of complexity to the situation ([Investopedia, 2025](https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271)).
+
+**Banking Sector Trends: M&A, Regulation, and Technological Disruption**
+
+The return of Donald Trump to the White House is expected to significantly influence the banking sector. Banking Dive anticipates changes in regulation, an increase in mergers and acquisitions (M&A), and a potential boost to the profile of cryptocurrencies ([Banking Dive, 2025](https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/)). The banking sector experienced an M&A rebound in 2024, with six deals exceeding $1 billion ([Banking Dive, 2025](https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/)). The regulatory landscape is also poised for change, although analysts suggest that alterations to banking regulations and agency leadership may not be immediate ([Banking Dive, 2025](https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/)). Capital requirements, a key regulatory issue in 2024, are expected to undergo significant transformation in 2025 ([Banking Dive, 2025](https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/)). The unexpected backlash against environmental, social, and governance (ESG) issues in banking further complicates the outlook ([Banking Dive, 2025](https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/)).
+
+**Bank Performance: Mixed Signals**
+
+U.S. Bancorp's Q4 2024 earnings provide a glimpse into the performance of a major banking institution. The company reported an EPS of $1.07, exceeding analysts' estimates of $1.06 ([MarketBeat, 2025](https://www.marketbeat.com/stocks/NYSE/USB/earnings/)). Revenue for the quarter reached $7.01 billion, surpassing the consensus estimate of $7 billion ([MarketBeat, 2025](https://www.marketbeat.com/stocks/NYSE/USB/earnings/)). While these figures suggest positive performance, the broader context of the banking sector warrants caution. CNN Business reports on the challenges facing bank stocks, citing New York Community Bancorp's significant share price decline following a surprise loss and dividend cut ([Buchwald, 2024](https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html)). This incident highlights the vulnerability of banks to deteriorating credit quality and other economic headwinds.
+
+**Conclusion: Navigating a Complex Landscape**The US banking sector and economy face a complex and uncertain future in 2025. While the economy is expected to achieve a soft landing, the confluence of slowing growth, dropping interest rates, geopolitical uncertainties, and regulatory changes creates a challenging environment for banks. The Federal Reserve's monetary policy decisions will be critical in navigating this landscape. The potential for increased M&A activity, coupled with evolving regulations and technological disruption, further adds to the complexity. Banks will need to adapt strategically to these challenges to maintain profitability and stability in the years ahead. The performance of individual banks, while showing some positive signs, also underscores the potential for volatility and the need for careful risk management. Ongoing monitoring of economic indicators, regulatory developments, and geopolitical events will be crucial for understanding the evolving dynamics of the US banking sector and economy.
+
+
+**References**
+
+Banking Dive. (2025, January 8). 6 banking trends to watch in 2025. *Banking Dive*. [https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/](https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/)
+
+Banking Dive. (2025, January 31). What’s coming for the banking industry in 2025. *Banking Dive*. [https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/](https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/)
+
+Buchwald, E. (2024, February 2). What’s really going on with bank stocks. *CNN Business*. [https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html](https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html)
+
+CNN Business. (2023, March 24). *Live updates: Latest on global markets and banking crisis*. [https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html](https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html)
+
+Deloitte. (2025). *2025 banking industry outlook*. Deloitte Insights. [https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html](https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html)
+
+Financial Times. (2025, January 31). *US banks*. [https://www.ft.com/us-banks](https://www.ft.com/us-banks)
+
+Investopedia. (2025). *What's ahead for the Federal Reserve in 2025?*. [https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271](https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271)
+
+MarketBeat. (2025). *U.S. Bancorp (USB) earnings date and reports 2025*. [https://www.marketbeat.com/stocks/NYSE/USB/earnings/](https://www.marketbeat.com/stocks/NYSE/USB/earnings/)
+
+Moore, S. (2025, January 5). *Here’s the Fed’s 2025 meeting schedule and what to expect for interest rates*. Forbes. [https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/](https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/)
+
+TipRanks.com. (2025). *US Bancorp (USB) earnings dates, call summary & reports*. [https://www.tipranks.com/stocks/usb/earnings](https://www.tipranks.com/stocks/usb/earnings)
\ No newline at end of file
diff --git a/outputs/task_1738582091_Whats the latest happening news in US econom.pdf b/outputs/task_1738582091_Whats the latest happening news in US econom.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..5530e076fa709c9354093f969740e82ea3af5bd1
Binary files /dev/null and b/outputs/task_1738582091_Whats the latest happening news in US econom.pdf differ
diff --git a/outputs/task_1738582091_Whats the latest happening news in US economy and related to Banking sector.json b/outputs/task_1738582091_Whats the latest happening news in US economy and related to Banking sector.json
new file mode 100644
index 0000000000000000000000000000000000000000..607656f69d5b26aaa427eae440fc9ff61bf4d737
--- /dev/null
+++ b/outputs/task_1738582091_Whats the latest happening news in US economy and related to Banking sector.json
@@ -0,0 +1,784 @@
+{
+ "timestamp": "2025-02-03T16:58:11.129575",
+ "events": [
+ {
+ "timestamp": "2025-02-03T16:58:15.911183",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "starting_research",
+ "output": "\ud83d\udd0d Starting the research task for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:15.920497",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "agent_generated",
+ "output": "\ud83d\udcf0 News Analyst Agent",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:15.930539",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83c\udf10 Browsing the web to learn more about the task: Whats the latest happening news in US economy and related to Banking sector ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:18.076072",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "planning_research",
+ "output": "\ud83e\udd14 Planning the research strategy and subtasks...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.028116",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subqueries",
+ "output": "\ud83d\uddc2\ufe0f I will conduct my research based on the following queries: ['US economy news February 2025 banking sector', 'latest developments US banking sector February 2025', 'Federal Reserve interest rate policy impact on banks 2025', 'US bank earnings reports Q1 2025', 'Whats the latest happening news in US economy and related to Banking sector ']...",
+ "metadata": [
+ "US economy news February 2025 banking sector",
+ "latest developments US banking sector February 2025",
+ "Federal Reserve interest rate policy impact on banks 2025",
+ "US bank earnings reports Q1 2025",
+ "Whats the latest happening news in US economy and related to Banking sector "
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.048030",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'US economy news February 2025 banking sector'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.057444",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'latest developments US banking sector February 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.066466",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Federal Reserve interest rate policy impact on banks 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.075550",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'US bank earnings reports Q1 2025'...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:20.085827",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "running_subquery_research",
+ "output": "\n\ud83d\udd0d Running research for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.550037",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.brooklinebank.com/2025/02/01/economic-news-february-2025/\n",
+ "metadata": "https://www.brooklinebank.com/2025/02/01/economic-news-february-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.569493",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.atlantafed.org/news/conferences-and-events/conferences/2025/02/27/banking-outlook-conference\n",
+ "metadata": "https://www.atlantafed.org/news/conferences-and-events/conferences/2025/02/27/banking-outlook-conference"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.578163",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.usbank.com/financialiq/invest-your-money/market-perspectives/economic-forecast.html\n",
+ "metadata": "https://www.usbank.com/financialiq/invest-your-money/market-perspectives/economic-forecast.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.586315",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\n",
+ "metadata": "https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.595750",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.usbank.com/about-us-bank/company-blog/article-library/2025-outlook-strong-economy-inflation-policy-pivot-on-the-horizon.html\n",
+ "metadata": "https://www.usbank.com/about-us-bank/company-blog/article-library/2025-outlook-strong-economy-inflation-policy-pivot-on-the-horizon.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.605312",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:22.614384",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.289033",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.299359",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 1 new images from 1 total images",
+ "metadata": [
+ "https://www.atlantafed.org/-/media/Images/news/conferences-and-events/conferences/2025/02/banking-outlook-conference/banner.png"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.309863",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.320453",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: US economy news February 2025 banking sector...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.736901",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.acuitykp.com/market-guide/2025-banking-sector-outlook-cautious-optimism-amid-fast-changing-environment/\n",
+ "metadata": "https://www.acuitykp.com/market-guide/2025-banking-sector-outlook-cautious-optimism-amid-fast-changing-environment/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.866303",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/\n",
+ "metadata": "https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.876310",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forbes.com/sites/michaelabbott/2025/01/13/top-10-trends-for-banking-in-2025--the-future-is-back/\n",
+ "metadata": "https://www.forbes.com/sites/michaelabbott/2025/01/13/top-10-trends-for-banking-in-2025--the-future-is-back/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.886530",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://experianacademy.com/blog/2025/01/29/banking-and-financial-services-predictions-2025/\n",
+ "metadata": "https://experianacademy.com/blog/2025/01/29/banking-and-financial-services-predictions-2025/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.895351",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\n",
+ "metadata": "https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.905863",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:24.916324",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.145549",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.157633",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 19 total images",
+ "metadata": [
+ "https://experianacademy.com/wp-content/uploads/2025/01/GettyImages-485618716-e1738140763381.jpg?w=1050&h=340&crop=1",
+ "https://experianacademy.com/wp-content/uploads/2025/01/Cover-image.png?w=1024",
+ "https://experianacademy.com/wp-content/uploads/2025/01/Section-2-image.png?w=1024",
+ "https://experianacademy.com/wp-content/uploads/2025/01/section-4-image.png?w=1024"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.170904",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.181008",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: latest developments US banking sector February 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.382158",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.tipranks.com/stocks/usb/earnings\n",
+ "metadata": "https://www.tipranks.com/stocks/usb/earnings"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.393523",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.zacks.com/stock/research/USB/earnings-calendar\n",
+ "metadata": "https://www.zacks.com/stock/research/USB/earnings-calendar"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.403687",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.morningstar.com/stocks/us-bancorp-earnings-net-interest-income-flat-guided-positive-operating-leverage-2025\n",
+ "metadata": "https://www.morningstar.com/stocks/us-bancorp-earnings-net-interest-income-flat-guided-positive-operating-leverage-2025"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.412382",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.wallstreetzen.com/stocks/us/nyse/usb/earnings\n",
+ "metadata": "https://www.wallstreetzen.com/stocks/us/nyse/usb/earnings"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.423497",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\n",
+ "metadata": "https://www.marketbeat.com/stocks/NYSE/USB/earnings/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.434443",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:27.443800",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.089913",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.100970",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 6 total images",
+ "metadata": [
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.112075",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.122764",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: US bank earnings reports Q1 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.291039",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.wsj.com/finance/banking\n",
+ "metadata": "https://www.wsj.com/finance/banking"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.302134",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\n",
+ "metadata": "https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.312613",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.ft.com/us-banks\n",
+ "metadata": "https://www.ft.com/us-banks"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.323060",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\n",
+ "metadata": "https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.333240",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.business-standard.com/topic/us-banks\n",
+ "metadata": "https://www.business-standard.com/topic/us-banks"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.344051",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:31.353862",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.632207",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 3 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.643523",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 4 new images from 16 total images",
+ "metadata": [
+ "https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-1970581575.jpg?c=16x9&q=h_833,w_1480,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-1969097224.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/london-housing028.jpg?c=16x9&q=h_144,w_256,c_fill",
+ "https://media.cnn.com/api/v1/images/stellar/prod/enten-20250203000145051.jpg?c=16x9&q=h_144,w_256,c_fill"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.653040",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.664162",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Whats the latest happening news in US economy and related to Banking sector ...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.810658",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271\n",
+ "metadata": "https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.822027",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investopedia.com/interest-rates-outlook-2025-federal-reserve-mortgages-car-loans-credit-cards-8764416\n",
+ "metadata": "https://www.investopedia.com/interest-rates-outlook-2025-federal-reserve-mortgages-car-loans-credit-cards-8764416"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.832850",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\n",
+ "metadata": "https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.843143",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://apnews.com/article/federal-reserve-interest-rates-loans-consumers-borrowing-f2cc94978bb7909de9fe49a3280473dd\n",
+ "metadata": "https://apnews.com/article/federal-reserve-interest-rates-loans-consumers-borrowing-f2cc94978bb7909de9fe49a3280473dd"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.853400",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "added_source_url",
+ "output": "\u2705 Added source url to research: https://www.investopedia.com/will-the-fed-cut-borrowing-costs-in-2025-8771153\n",
+ "metadata": "https://www.investopedia.com/will-the-fed-cut-borrowing-costs-in-2025-8771153"
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.863827",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "researching",
+ "output": "\ud83e\udd14 Researching for relevant information across multiple sources...\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:32.873871",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_urls",
+ "output": "\ud83c\udf10 Scraping content from 5 URLs...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:34.471367",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_content",
+ "output": "\ud83d\udcc4 Scraped 5 pages of content",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:34.482309",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_images",
+ "output": "\ud83d\uddbc\ufe0f Selected 3 new images from 6 total images",
+ "metadata": [
+ "https://www.investopedia.com/thmb/ZJ6oPtO2c4wyQBCT403FEfqXY50=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/FedMeeting-0d70f08c723e4f84a28dc3acc86009d0.jpg",
+ "https://www.investopedia.com/thmb/sNK1O1rCqCSVIHZ1vHcvJ5znUcI=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/GettyImages-2188256134-5d824e1c4d044068ad1b8156be430d10.jpg",
+ "https://www.investopedia.com/thmb/Jr12NDpg471vvPWKYO43HO5egPo=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/GettyImages-2189982968-58952878f0554f4dbaa8b28c7a3c3a74.jpg"
+ ]
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:34.492640",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "scraping_complete",
+ "output": "\ud83c\udf10 Scraping complete",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:34.505904",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "fetching_query_content",
+ "output": "\ud83d\udcda Getting relevant content based on query: Federal Reserve interest rate policy impact on banks 2025...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:54.027977",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\nTitle: What\u2019s really going on with bank stocks | CNN Business\nContent: Markets DOW S&P 500 NASDAQ Hot Stocks Fear & Greed Index ----- is driving the US market Latest Market News Trump\u2019s under-the-radar Alaska order has environmentalists on edge Stock markets slide around the world as Trump\u2019s new trade war rattles investor confidence Target was one of the most outspoken supporters of DEI. It\u2019s changed its tune Hot Stocks ----- is driving the US market Something isn't loading properly. Please check back later. Ad Feedback Ad Feedback Business / Economy What\u2019s really going on with bank stocks By Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied! The New York Community Bancorp shares plunged by nearly 50% over two days after reporting a surprise loss tied to deteriorating credit quality and a cut to its dividend. Bing Guan/Bloomberg/Getty Images New York CNN \u2014 On Wednesday, the Federal Reserve ditched a line it used in every meeting statement since three banks failed last spring, which said that the \u201cUS banking\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: discussed current conditions in the banking sector and noted that while some institutions have come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: discussed current conditions in the banking sector and noted that while some institutions have come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan Chase & CoUS banks in \u2018go-mode\u2019 under Trump, says JPMorgan executiveWall Street bets that new administration\u2019s lighter-touch regulatory regime will spur dealmaking Save Friday, 17 January, 2025 US equitiesUS stocks post best week since Donald Trump\u2019s election winEasing underlying US inflation pressures and strong bank earnings \u2018emboldened the bulls\u2019 on Wall Street Save Friday, 17 January, 2025 On Wall StreetCraig CobenHow the bonus season unfoldsThe past dramas of \u2018comp days\u2019 have given way to more sanitised procedures Save Friday, 17 January, 2025 Biggest US banks notch up $142bn in profits in blockbuster 2024Robust performance in trading and\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: Latest on global markets and banking crisis By Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023 Link Copied!\nLatest on global markets and banking crisis\nLatest on global markets and banking crisis\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023 Link Copied!\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023\nBy Krystal Hur and Nicole Goodkind, CNN Business Updated 5:59 PM EDT, Fri March 24, 2023\nBy Krystal Hur and Nicole Goodkind, CNN Business\nBy Krystal Hur and Nicole Goodkind, CNN Business\nUpdated 5:59 PM EDT, Fri March 24, 2023\nUpdated 5:59 PM EDT, Fri March 24, 2023\nVideo Ad Feedback Richard Quest explains what the Federal Reserve's latest rate hike decision means for consumers 01:21 - Source: CNN\nVideo Ad Feedback Richard Quest explains what the Federal Reserve's latest rate hike decision means for consumers 01:21 - Source: CNN\nVideo Ad Feedback\nVideo Ad Feedback\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: US banks Add to myFT Friday, 31 January, 2025 Goldman SachsPutin allows Goldman Sachs to quit RussiaSale to Armenian investment fund comes almost three years after US investment bank pledged to leave Save Monday, 27 January, 2025 Citigroup IncCiti loses head of private banking unitIda Liu was one of few senior female executives below chief executive Jane Fraser Save Thursday, 23 January, 2025 JPMorgan Chase & CoJPMorgan lifts chief executive Dimon\u2019s pay by 8% to $39mnWall Street bank says increase reflects his \u2018stewardship of the firm\u2019 Save Thursday, 23 January, 2025 LexRegional US bank stocks are still in the doghouse Premium contentExposure to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan\n\nSource: https://www.ft.com/us-banks\nTitle: US banks\nContent: US banks Add to myFT Friday, 31 January, 2025 Goldman SachsPutin allows Goldman Sachs to quit RussiaSale to Armenian investment fund comes almost three years after US investment bank pledged to leave Save Monday, 27 January, 2025 Citigroup IncCiti loses head of private banking unitIda Liu was one of few senior female executives below chief executive Jane Fraser Save Thursday, 23 January, 2025 JPMorgan Chase & CoJPMorgan lifts chief executive Dimon\u2019s pay by 8% to $39mnWall Street bank says increase reflects his \u2018stewardship of the firm\u2019 Save Thursday, 23 January, 2025 LexRegional US bank stocks are still in the doghouse Premium contentExposure to the troubled $6tn commercial real estate market weighs heavily on regional and community banks Save Tuesday, 21 January, 2025 LexGoldman SachsBanks find new weapon in private markets fight: the org chart Premium contentGoldman Sachs is perhaps best placed to think more creatively about collaboration Save Tuesday, 21 January, 2025 JPMorgan\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: come under stress, the US banking system remains sound and resilient,\u201d the readout said. It\u2019s not clear which banks in particular were discussed but the meeting comes after days of turbulence in the share price of regional banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied! Stocks end Friday and the week higher despite lingering banking fears From CNN's Krystal Hur People pass the front of the New York Stock Exchange on March 21. (Peter Morgan/AP) Stocks closed higher Friday, recovering from earlier losses brought about by a plunge in Deutsche Bank stock. Shares of the German bank fell 8.5% after a surge in its bond insurances prices spiked investors\u2019 fears about the state of the financial sector. All three major indexes rose to end the week. The Dow Jones\n\nSource: https://www.cnn.com/2024/02/02/economy/whats-going-on-with-bank-stocks/index.html\nTitle: What\u2019s really going on with bank stocks | CNN Business\nContent: Hot Stocks ----- is driving the US market\n----- is driving the US market\nSomething isn't loading properly. Please check back later.\nSomething isn't loading properly. Please check back later.\nBusiness / Economy\nWhat\u2019s really going on with bank stocks By Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied!\nWhat\u2019s really going on with bank stocks\nWhat\u2019s really going on with bank stocks\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024 Link Copied!\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024\nBy Elisabeth Buchwald, CNN 3 minute read Updated 8:15 AM EST, Fri February 2, 2024\nBy Elisabeth Buchwald, CNN\nBy Elisabeth Buchwald, CNN\n3 minute read Updated 8:15 AM EST, Fri February 2, 2024\n3 minute read\nUpdated 8:15 AM EST, Fri February 2, 2024\n\nSource: https://www.cnn.com/business/live-news/stock-market-bank-crisis-fed-rate-news-03-24-23/index.html\nTitle: Live updates: Latest on global markets and banking crisis | CNN Business\nContent: banks and a plunge for Germany\u2019s biggest bank, Deutsche Bank. Treasury said regulators discussed ongoing efforts at agencies to monitor financial developments and also heard a presentation from staff at the New York Federal Reserve Bank on \u201cmarket developments.\u201d Link Copied!\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:57.602427",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: Conference Call Transcript\nEarnings Press Release\nEarnings Slide Deck\nUSB Upcoming EarningsU.S. Bancorp's next earnings date is estimated for Wednesday, April 16, 2025, based off prior year's reporting schedules.\nUSB Upcoming Earnings\nU.S. Bancorp's next earnings date is estimated for Wednesday, April 16, 2025, based off prior year's reporting schedules.\nGet U.S. Bancorp Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on U.S. Bancorp and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up\nGet U.S. Bancorp Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on U.S. Bancorp and other key companies, straight to your inbox. Enter your email to sign up for newsletter Sign Up\n\nSource: https://www.tipranks.com/stocks/usb/earnings\nTitle: US Bancorp (USB) Earnings Dates, Call Summary & Reports - TipRanks.com\nContent: a diluted earnings per share of $1.01, or $1.07 when adjusted for notable items, with net revenue reaching $7 billion for the quarter and $27.5 billion for the year. U.S. Bancorp highlighted a 190 basis point increase in positive operating leverage year-over-year on an adjusted basis, driven by effective balance sheet management and diversified fee business offerings. The CET1 capital ratio improved by 10 basis points to 10.6%, while the tangible book value per share increased by 10.4% to $24.63. For the first quarter of 2025, the company expects stable net interest income, excluding the impact of fewer days, and stable noninterest expenses around $4.2 billion. Full-year 2025 guidance anticipates total revenue growth of 3% to 5% on an adjusted basis, with positive operating leverage of greater than 200 basis points. The company remains focused on prudent expense management, asset repricing benefits, and modest capital distributions, including an initial $100 million in share\n\nSource: https://www.tipranks.com/stocks/usb/earnings\nTitle: US Bancorp (USB) Earnings Dates, Call Summary & Reports - TipRanks.com\nContent: a diluted earnings per share of $1.01, or $1.07 when adjusted for notable items, with net revenue reaching $7 billion for the quarter and $27.5 billion for the year. U.S. Bancorp highlighted a 190 basis point increase in positive operating leverage year-over-year on an adjusted basis, driven by effective balance sheet management and diversified fee business offerings. The CET1 capital ratio improved by 10 basis points to 10.6%, while the tangible book value per share increased by 10.4% to $24.63. For the first quarter of 2025, the company expects stable net interest income, excluding the impact of fewer days, and stable noninterest expenses around $4.2 billion. Full-year 2025 guidance anticipates total revenue growth of 3% to 5% on an adjusted basis, with positive operating leverage of greater than 200 basis points. The company remains focused on prudent expense management, asset repricing benefits, and modest capital distributions, including an initial $100 million in share\n\nSource: https://www.tipranks.com/stocks/usb/earnings\nTitle: US Bancorp (USB) Earnings Dates, Call Summary & Reports - TipRanks.com\nContent: a diluted earnings per share of $1.01, or $1.07 when adjusted for notable items, with net revenue reaching $7 billion for the quarter and $27.5 billion for the year. U.S. Bancorp highlighted a 190 basis point increase in positive operating leverage year-over-year on an adjusted basis, driven by effective balance sheet management and diversified fee business offerings. The CET1 capital ratio improved by 10 basis points to 10.6%, while the tangible book value per share increased by 10.4% to $24.63. For the first quarter of 2025, the company expects stable net interest income, excluding the impact of fewer days, and stable noninterest expenses around $4.2 billion. Full-year 2025 guidance anticipates total revenue growth of 3% to 5% on an adjusted basis, with positive operating leverage of greater than 200 basis points. The company remains focused on prudent expense management, asset repricing benefits, and modest capital distributions, including an initial $100 million in share\n\nSource: https://www.tipranks.com/stocks/usb/earnings\nTitle: US Bancorp (USB) Earnings Dates, Call Summary & Reports - TipRanks.com\nContent: Earnings Data Report DateApr 16, 2025TBA Not ConfirmedPeriod Ending2025 (Q1)Consensus EPS Forecast0.98Last Year\u00e2\u0080\u0099s EPS0.78Same Quarter Last YearAnalyst ConsensusModerate BuyBased on 19 Analysts Ratings---Earnings Call Summary Earnings Call Date:Oct 17, 2018|% Change Since: -6.13%|Next Earnings Date:Jul 18, 2018Earnings Call Sentiment|NeutralThe earnings call highlighted solid revenue growth, successful capital management, and improved operational efficiency. However, notable expense items, challenges in merchant acquiring, and uncertain loan growth present some concerns.Company GuidanceDuring the U.S. Bancorp Fourth Quarter 2024 Earnings Call, executives provided detailed guidance focusing on several key financial metrics. The company reported a diluted earnings per share of $1.01, or $1.07 when adjusted for notable items, with net revenue reaching $7 billion for the quarter and $27.5 billion for the year. U.S. Bancorp highlighted a 190 basis point increase in positive operating\n\nSource: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsDividendEarningsFinancialsHeadlinesInsider TradesOptions ChainOwnershipSEC FilingsShort InterestSustainabilityTrends U.S. Bancorp Latest Earnings SummaryUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06 U.S. Bancorp released Q4 2024 earnings on January 16, 2025, reporting an EPS of $1.07, which topped analysts' consensus estimates of $1.06 by $0.01. Quarterly revenue rose 3.7% year-over-year to $7.01 billion, above the consensus estimate of $7 billion. With a trailing EPS of $3.79 and a P/E Ratio of 12.62, U.S. Bancorp's earnings are expected to grow 9.38% next year, from $4.37 to $4.78 per share. Conference CallConference Call TranscriptEarnings Press ReleaseEarnings Slide DeckPowered by USB Upcoming EarningsU.S. Bancorp's next earnings date is estimated for Wednesday, April 16, 2025, based off prior year's reporting schedules. Get U.S. Bancorp\n\nSource: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: Earnings Stock AnalysisAnalyst ForecastsChartCompetitorsDividendEarningsFinancialsHeadlinesInsider TradesOptions ChainOwnershipSEC FilingsShort InterestSustainabilityTrends U.S. Bancorp Latest Earnings SummaryUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06 U.S. Bancorp released Q4 2024 earnings on January 16, 2025, reporting an EPS of $1.07, which topped analysts' consensus estimates of $1.06 by $0.01. Quarterly revenue rose 3.7% year-over-year to $7.01 billion, above the consensus estimate of $7 billion. With a trailing EPS of $3.79 and a P/E Ratio of 12.62, U.S. Bancorp's earnings are expected to grow 9.38% next year, from $4.37 to $4.78 per share. Conference CallConference Call TranscriptEarnings Press ReleaseEarnings Slide DeckPowered by USB Upcoming EarningsU.S. Bancorp's next earnings date is estimated for Wednesday, April 16, 2025, based off prior year's reporting schedules. Get U.S. Bancorp\n\nSource: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: U.S. Bancorp issued an update on its FY 2025 earnings guidance on Thursday, January, 16th. The company issued revenue guidance of $28.4 billion-$29.0 billion, compared to the consensus revenue estimate of $28.7 billion.\nDid U.S. Bancorp beat their earnings estimates last quarter? In the previous quarter, U.S. Bancorp (NYSE:USB) reported $1.07 earnings per share (EPS) to beat the analysts' consensus estimate of $1.06 by $0.01. Learn more on analysts' earnings estimate vs. USB's actual earnings.\nDid U.S. Bancorp beat their earnings estimates last quarter?\nIn the previous quarter, U.S. Bancorp (NYSE:USB) reported $1.07 earnings per share (EPS) to beat the analysts' consensus estimate of $1.06 by $0.01. Learn more on analysts' earnings estimate vs. USB's actual earnings.\nIn the previous quarter, U.S. Bancorp (NYSE:USB) reported $1.07 earnings per share (EPS) to beat the analysts' consensus estimate of $1.06 by $0.01. Learn more on analysts' earnings estimate vs. USB's actual earnings.\n\nSource: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: U.S. Bancorp Latest Earnings Summary\nU.S. Bancorp Latest Earnings Summary\nUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06 U.S. Bancorp released Q4 2024 earnings on January 16, 2025, reporting an EPS of $1.07, which topped analysts' consensus estimates of $1.06 by $0.01. Quarterly revenue rose 3.7% year-over-year to $7.01 billion, above the consensus estimate of $7 billion. With a trailing EPS of $3.79 and a P/E Ratio of 12.62, U.S. Bancorp's earnings are expected to grow 9.38% next year, from $4.37 to $4.78 per share.\nUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06\nUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06\nUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimated\nBefore Market OpensEstimated\nBefore Market Opens\n\nSource: https://www.marketbeat.com/stocks/NYSE/USB/earnings/\nTitle: \r\n\tU.S. Bancorp (USB) Earnings Date and Reports 2025\r\n\nContent: U.S. Bancorp Latest Earnings SummaryUpcoming Q1 Earnings DateApr. 16Before Market OpensEstimatedActual EPS (Jan. 16) $1.07 Beat By $0.01 Consensus EPS (Jan. 16) $1.06 U.S. Bancorp released Q4 2024 earnings on January 16, 2025, reporting an EPS of $1.07, which topped analysts' consensus estimates of $1.06 by $0.01. Quarterly revenue rose 3.7% year-over-year to $7.01 billion, above the consensus estimate of $7 billion. With a trailing EPS of $3.79 and a P/E Ratio of 12.62, U.S. Bancorp's earnings are expected to grow 9.38% next year, from $4.37 to $4.78 per share. Conference CallConference Call TranscriptEarnings Press ReleaseEarnings Slide DeckPowered by USB Upcoming EarningsU.S. Bancorp's next earnings date is estimated for Wednesday, April 16, 2025, based off prior year's reporting schedules. Get U.S. Bancorp Earnings Alerts Want to stay updated on the latest earnings announcements and upcoming reports? Sign up for Earnings360's daily newsletter to receive timely earnings updates on\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:57.747775",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: An article from What\u2019s coming for the banking industry in 2025 Banking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough. Published Jan. 31, 2025 By Banking Dive staff post share post print email President Donald Trump addresses the 2025 Republican Issues Conference at the Trump National Doral Miami on Jan. 27, 2025. Joe Raedle via Getty Images\nAn article from\nAn article from\nWhat\u2019s coming for the banking industry in 2025 Banking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough.\nWhat\u2019s coming for the banking industry in 2025\nBanking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough.\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: \u2013 from regulation to M&A to crypto. Read the full article \u2794 Banking sector girds for M&A uptick in 2025 By Rajashree Chakravarty \u2022 Jan. 31, 2025 Bank M&A rebounded in 2024, with six deals over $1 billion announced. The Fed approved the two largest before the Trump administration took office. But where to go from here? Read the full article \u2794 Ousting the CFPB\u2019s Chopra wasn\u2019t a \u2018day one\u2019 priority By Dan Ennis \u2022 Jan. 27, 2025 The Trump administration has issued dozens of executive orders, but change to banking regulations and agency leaders isn\u2019t going to be quick, analysts say. Read the full article \u2794 Filed Under: Commercial, Retail, Regulations & Policy, Technology, Risk, Fintech Banking Dive news delivered to your inbox Get the free daily newsletter read by industry experts Email: Select Newsletter: Daily Dive M-F Select Newsletter: Fintech Weekly Every Wednesday Select user consent: By signing up to receive our newsletter, you agree to our Terms of Use and Privacy Policy. You can\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: \u2013 from regulation to M&A to crypto. Read the full article \u2794 Banking sector girds for M&A uptick in 2025 By Rajashree Chakravarty \u2022 Jan. 31, 2025 Bank M&A rebounded in 2024, with six deals over $1 billion announced. The Fed approved the two largest before the Trump administration took office. But where to go from here? Read the full article \u2794 Ousting the CFPB\u2019s Chopra wasn\u2019t a \u2018day one\u2019 priority By Dan Ennis \u2022 Jan. 27, 2025 The Trump administration has issued dozens of executive orders, but change to banking regulations and agency leaders isn\u2019t going to be quick, analysts say. Read the full article \u2794 Filed Under: Commercial, Retail, Regulations & Policy, Technology, Risk, Fintech Banking Dive news delivered to your inbox Get the free daily newsletter read by industry experts Email: Select Newsletter: Daily Dive M-F Select Newsletter: Fintech Weekly Every Wednesday Select user consent: By signing up to receive our newsletter, you agree to our Terms of Use and Privacy Policy. You can\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: \u2013 from regulation to M&A to crypto. Read the full article \u2794 Banking sector girds for M&A uptick in 2025 By Rajashree Chakravarty \u2022 Jan. 31, 2025 Bank M&A rebounded in 2024, with six deals over $1 billion announced. The Fed approved the two largest before the Trump administration took office. But where to go from here? Read the full article \u2794 Ousting the CFPB\u2019s Chopra wasn\u2019t a \u2018day one\u2019 priority By Dan Ennis \u2022 Jan. 27, 2025 The Trump administration has issued dozens of executive orders, but change to banking regulations and agency leaders isn\u2019t going to be quick, analysts say. Read the full article \u2794 Filed Under: Commercial, Retail, Regulations & Policy, Technology, Risk, Fintech Banking Dive news delivered to your inbox Get the free daily newsletter read by industry experts Email: Select Newsletter: Daily Dive M-F Select Newsletter: Fintech Weekly Every Wednesday Select user consent: By signing up to receive our newsletter, you agree to our Terms of Use and Privacy Policy. You can\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: \u2013 from regulation to M&A to crypto. Read the full article \u2794 Banking sector girds for M&A uptick in 2025 By Rajashree Chakravarty \u2022 Jan. 31, 2025 Bank M&A rebounded in 2024, with six deals over $1 billion announced. The Fed approved the two largest before the Trump administration took office. But where to go from here? Read the full article \u2794 Ousting the CFPB\u2019s Chopra wasn\u2019t a \u2018day one\u2019 priority By Dan Ennis \u2022 Jan. 27, 2025 The Trump administration has issued dozens of executive orders, but change to banking regulations and agency leaders isn\u2019t going to be quick, analysts say. Read the full article \u2794 Filed Under: Commercial, Retail, Regulations & Policy, Technology, Risk, Fintech Banking Dive news delivered to your inbox Get the free daily newsletter read by industry experts Email: Select Newsletter: Daily Dive M-F Select Newsletter: Fintech Weekly Every Wednesday Select user consent: By signing up to receive our newsletter, you agree to our Terms of Use and Privacy Policy. You can\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: Deep Dive Opinion Library Events Press Releases Topics Sign up Search Sign up Search Commercial Retail Credit Unions Payments Regulations & Policy Technology Risk Fintech An article from What\u2019s coming for the banking industry in 2025 Banking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough. Published Jan. 31, 2025 By Banking Dive staff post share post print email President Donald Trump addresses the 2025 Republican Issues Conference at the Trump National Doral Miami on Jan. 27, 2025. Joe Raedle via Getty Images Donald Trump\u2019s return to the White House cannot be understated as a potential catalyst for change in banking. To say that capital requirements, the hot-button regulatory issue of 2024, will look vastly different by the end of 2025 than it did last year, would be a laughably easy prediction. Look at ESG. Few could have foreseen how swiftly the\n\nSource: https://www.bankingdive.com/news/2025-trump-cfpb-fed-fdic-merger-acquisition-capital-one-discover-crypto-occ-artificial-intelligence/736809/\nTitle: 6 banking trends to watch in 2025 | Banking Dive\nContent: Ever since November\u2019s presidential election cemented Donald Trump\u2019s return to the White House, speculation has been rife that the banking sector would see a downturn in regulation, an uptick in mergers and acquisitions, and a boost in the profile and credibility of cryptocurrencies. If these early days of 2025 seem like a wait-and-see period, there may be something to that. But observers have learned from Trump\u2019s first term that change can come quickly and furiously once the new administration takes hold.\nHere are a few areas where Banking Dive anticipates new developments in the upcoming year.\nHere are a few areas where Banking Dive anticipates new developments in the upcoming year.\nHere are a few areas where Banking Dive anticipates new developments in the upcoming year.\nHere are a few areas where Banking Dive anticipates new developments in the upcoming year.\nHere are a few areas where Banking Dive anticipates new developments in the upcoming year.\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: Caitlin Mullen and Gabrielle Saulsbery \u2022 Jan. 8, 2025 Donald Trump\u2019s return to the White House is expected to make an impression on a number of facets in banking \u2013 from regulation to M&A to crypto. Read the full article \u2794 Banking sector girds for M&A uptick in 2025 By Rajashree Chakravarty \u2022 Jan. 31, 2025 Bank M&A rebounded in 2024, with six deals over $1 billion announced. The Fed approved the two largest before the Trump administration took office. But where to go from here? Read the full article \u2794 Ousting the CFPB\u2019s Chopra wasn\u2019t a \u2018day one\u2019 priority By Dan Ennis \u2022 Jan. 27, 2025 The Trump administration has issued dozens of executive orders, but change to banking regulations and agency leaders isn\u2019t going to be quick, analysts say. Read the full article \u2794 Filed Under: Commercial, Retail, Regulations & Policy, Technology, Risk, Fintech Banking Dive news delivered to your inbox Get the free daily newsletter read by industry experts Email: Select Newsletter: Daily Dive M-F Select\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: An article from What\u2019s coming for the banking industry in 2025 Banking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough. Published Jan. 31, 2025 By Banking Dive staff post share post print email President Donald Trump addresses the 2025 Republican Issues Conference at the Trump National Doral Miami on Jan. 27, 2025. Joe Raedle via Getty Images Donald Trump\u2019s return to the White House cannot be understated as a potential catalyst for change in banking. To say that capital requirements, the hot-button regulatory issue of 2024, will look vastly different by the end of 2025 than it did last year, would be a laughably easy prediction. Look at ESG. Few could have foreseen how swiftly the blowback against environmental, social and governance issues in banking would take hold \u2013 especially if the U.S. presidential election had a different result. But between\n\nSource: https://www.bankingdive.com/news/trump-bank-regulation-cfpb-chopra-fdic-hill-mergers-acquisitions-capital-one-discover-ai-crypto-aml/738920/\nTitle: What\u2019s coming for the banking industry in 2025 | Banking Dive\nContent: An article from What\u2019s coming for the banking industry in 2025 Banking Dive takes a look at trend areas \u2014 from M&A to regulation \u2014 to give its best guess on this year\u2019s developments. But as observers in ESG will tell it, change has been rapid and thorough. Published Jan. 31, 2025 By Banking Dive staff post share post print email President Donald Trump addresses the 2025 Republican Issues Conference at the Trump National Doral Miami on Jan. 27, 2025. Joe Raedle via Getty Images Donald Trump\u2019s return to the White House cannot be understated as a potential catalyst for change in banking. To say that capital requirements, the hot-button regulatory issue of 2024, will look vastly different by the end of 2025 than it did last year, would be a laughably easy prediction. Look at ESG. Few could have foreseen how swiftly the blowback against environmental, social and governance issues in banking would take hold \u2013 especially if the U.S. presidential election had a different result. But between\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:58:58.328385",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: What To Expect From The Fed For 2025 The year 2025 is expected to see interest rate cuts from the FOMC, but at a relatively slow rate, with two cuts the most likely scenario. There\u2019s more uncertainty in the second half of the year when rising unemployment or disinflation could prompt Fed officials to reduce rates more than currently estimated.\n\nSource: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: What To Expect From The Fed For 2025 The year 2025 is expected to see interest rate cuts from the FOMC, but at a relatively slow rate, with two cuts the most likely scenario. There\u2019s more uncertainty in the second half of the year when rising unemployment or disinflation could prompt Fed officials to reduce rates more than currently estimated. Follow me on Twitter or LinkedIn. Check out my website or some of my other work here. Simon MooreFollowingEditorial StandardsForbes Accolades\n\nSource: https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271\nTitle: What's Ahead For The Federal Reserve In 2025?\nContent: Close Key Takeaways The Federal Reserve's plans for interest rate cuts in 2025 are up in the air as officials wait to see what policy President-elect Donald Trump will impose and what effect it has on the economy.In particular, Trump's plans to impose tariffs have raised eyebrows at the central bank because they could push up inflation.The year ahead could bring conflict between Trump and Fed Chair Jerome Powell, who has resisted Trump's suggestion that the president should have a say in monetary policy decisions. No one knows quite what to expect from the Federal Reserve in 2025, least of all officials at the Fed itself.After cutting interest rates three times in as many meetings, the Fed is entering a new phase in its fight against inflation as it goes into 2025. The Fed is waiting to see what will happen with inflation, whether incoming President Donald Trump will impose heavy tariffs, and what effect those tariffs will have on the economy. That's not to mention changes to tax\n\nSource: https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271\nTitle: What's Ahead For The Federal Reserve In 2025?\nContent: Key Takeaways The Federal Reserve's plans for interest rate cuts in 2025 are up in the air as officials wait to see what policy President-elect Donald Trump will impose and what effect it has on the economy.In particular, Trump's plans to impose tariffs have raised eyebrows at the central bank because they could push up inflation.The year ahead could bring conflict between Trump and Fed Chair Jerome Powell, who has resisted Trump's suggestion that the president should have a say in monetary policy decisions. No one knows quite what to expect from the Federal Reserve in 2025, least of all officials at the Fed itself.After cutting interest rates three times in as many meetings, the Fed is entering a new phase in its fight against inflation as it goes into 2025. The Fed is waiting to see what will happen with inflation, whether incoming President Donald Trump will impose heavy tariffs, and what effect those tariffs will have on the economy. That's not to mention changes to tax policy,\n\nSource: https://www.investopedia.com/what-s-ahead-for-the-fed-in-2025-8765271\nTitle: What's Ahead For The Federal Reserve In 2025?\nContent: Key Takeaways The Federal Reserve's plans for interest rate cuts in 2025 are up in the air as officials wait to see what policy President-elect Donald Trump will impose and what effect it has on the economy.In particular, Trump's plans to impose tariffs have raised eyebrows at the central bank because they could push up inflation.The year ahead could bring conflict between Trump and Fed Chair Jerome Powell, who has resisted Trump's suggestion that the president should have a say in monetary policy decisions. No one knows quite what to expect from the Federal Reserve in 2025, least of all officials at the Fed itself.After cutting interest rates three times in as many meetings, the Fed is entering a new phase in its fight against inflation as it goes into 2025. The Fed is waiting to see what will happen with inflation, whether incoming President Donald Trump will impose heavy tariffs, and what effect those tariffs will have on the economy. That's not to mention changes to tax policy,\n\nSource: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: However, perhaps the most closely watched economic variable in 2025 will be unemployment. As of November 2024, the unemployment rate stood at 4.2%. Policymakers don\u2019t expect it to move up much more in 2025, and they anticipate any increase in unemployment to be at a relatively measured pace. An abrupt increase in employment could lead to more aggressive interest rate cuts. Inflation Inflation will also be closely watched, with current expectations that it will remain above the FOMC\u2019s 2% annual inflation goal for 2025 but not by much. If inflation were to accelerate materially, then that would be a concern for the FOMC. However, that\u2019s not currently in most forecasts, with even the most hawkish projections showing interest rates being held steady in 2025 rather than increasing. However, if inflation did return to 2% or below that level, then we could see more than two interest rate cuts. What To Expect From The Fed For 2025 The year 2025 is expected to see interest rate cuts from the\n\nSource: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: However, perhaps the most closely watched economic variable in 2025 will be unemployment. As of November 2024, the unemployment rate stood at 4.2%. Policymakers don\u2019t expect it to move up much more in 2025, and they anticipate any increase in unemployment to be at a relatively measured pace. An abrupt increase in employment could lead to more aggressive interest rate cuts. Inflation Inflation will also be closely watched, with current expectations that it will remain above the FOMC\u2019s 2% annual inflation goal for 2025 but not by much. If inflation were to accelerate materially, then that would be a concern for the FOMC. However, that\u2019s not currently in most forecasts, with even the most hawkish projections showing interest rates being held steady in 2025 rather than increasing. However, if inflation did return to 2% or below that level, then we could see more than two interest rate cuts. What To Expect From The Fed For 2025 The year 2025 is expected to see interest rate cuts from the\n\nSource: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: Inflation will also be closely watched, with current expectations that it will remain above the FOMC\u2019s 2% annual inflation goal for 2025 but not by much. If inflation were to accelerate materially, then that would be a concern for the FOMC.\nHowever, that\u2019s not currently in most forecasts, with even the most hawkish projections showing interest rates being held steady in 2025 rather than increasing. However, if inflation did return to 2% or below that level, then we could see more than two interest rate cuts.\nWhat To Expect From The Fed For 2025\nThe year 2025 is expected to see interest rate cuts from the FOMC, but at a relatively slow rate, with two cuts the most likely scenario. There\u2019s more uncertainty in the second half of the year when rising unemployment or disinflation could prompt Fed officials to reduce rates more than currently estimated.\nFollow me on Twitter or LinkedIn. Check out my website or some of my other work here.\nFollow me on\nEditorial StandardsForbes Accolades\n\nSource: https://www.forbes.com/sites/simonmoore/2025/01/05/heres-the-feds-2025-meeting-schedule-and-what-to-expect-for-interest-rates/\nTitle: Here\u2019s The Fed\u2019s 2025 Meeting Schedule And What To Expect For Interest Rates\nContent: with even the most hawkish projections showing interest rates being held steady in 2025 rather than increasing. However, if inflation did return to 2% or below that level, then we could see more than two interest rate cuts. What To Expect From The Fed For 2025 The year 2025 is expected to see interest rate cuts from the FOMC, but at a relatively slow rate, with two cuts the most likely scenario. There\u2019s more uncertainty in the second half of the year when rising unemployment or disinflation could prompt Fed officials to reduce rates more than currently estimated. Follow me on Twitter or LinkedIn. Check out my website or some of my other work here. Simon MooreFollowingEditorial StandardsForbes AccoladesJoin The ConversationComments One Community. Many Voices. Create a free account to share your thoughts. Read our community guidelines here.Forbes Community GuidelinesOur community is about connecting people through open and thoughtful conversations. We want our readers to share their\n\nSource: https://apnews.com/article/federal-reserve-interest-rates-loans-consumers-borrowing-f2cc94978bb7909de9fe49a3280473dd\nTitle: What Federal Reserve rate cuts in 2025 could mean for you | AP News\nContent: Business The Fed expects to cut rates more slowly in 2025. What that could mean for mortgages, debt and more The Fed expects to cut rates more slowly in 2025. What that could mean for mortgages, debt and more 1 of 2 | The Federal Reserve cut its key interest rate Wednesday by a quarter-point \u2014 its third cut this year \u2014 but also signaled that it expects to reduce rates more slowly next year than it previously envisioned, largely because of still-elevated inflation. Read More 2 of 2 | The seal of the Federal Reserve Board is seen at the building in Washington, Dec. 8, 2024. (AP Photo/Jose Luis Magana, File) Read More By CORA LEWIS Share Share Copy Link copied Email Facebook X Reddit LinkedIn Pinterest Flipboard Print NEW YORK (AP) \u2014 The Federal Reserve\u2019s third interest rate cut of the year will likely have consequences for debt, savings, auto loans, mortgages and other forms of borrowing by consumers and businesses.But with inflation pressures still elevated and with concern that\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:59:47.344388",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "subquery_context_window",
+ "output": "\ud83d\udcc3 Source: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate,\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate,\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025. Financial services industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully.\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025. Financial services industry outlooks Read more from the Deloitte Center for Financial Services' 2025 industry outlooks collection Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully.\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects. The US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2 However, in 2025, economic growth is expected to decelerate and interest rates to drop meaningfully. Deloitte\u2019s latest United States economic forecast anticipates a soft landing, with US GDP likely to grow at 1.5% in its baseline scenario.3 Moderating consumer spending, a rising unemployment rate, and weak business investment could dampen\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: Bank executives will be welcoming 2025 with mixed emotions, unsure how the year will unfold and reshape banks\u2019 fortunes. While inflationary pressures have subsided and interest rates are dropping, subpar economic growth, continuing geopolitical shocks, and regulatory uncertainty will likely give bank CEOs anxiety. Adapting to a low-growth, low-rate environment will be a challenge. But many will be happy to close the chapter on 2024, a year that was remarkable in many respects.\nThe US economy will likely have performed better than expected in 2024, with annual GDP growth estimated to end at 2.7%,1 higher than forecast at the beginning of the year.2\n\nSource: https://www2.deloitte.com/us/en/insights/industry/financial-services/financial-services-industry-outlooks/banking-industry-outlook.html\nTitle: 2025 banking industry outlook | Deloitte Insights\nContent: How\u2014and to what extent\u2014will macroeconomic shifts impact US banks in 2025? Key messages: Macroeconomic and geopolitical uncertainties should keep bank executives on their toes.Higher deposit costs will keep net interest income in check.Noninterest income could offer a bright spot for topline growth.Higher compensation expenses and technology investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025.\nHow\u2014and to what extent\u2014will macroeconomic shifts impact US banks in 2025? Key messages: Macroeconomic and geopolitical uncertainties should keep bank executives on their toes.Higher deposit costs will keep net interest income in check.Noninterest income could offer a bright spot for topline growth.Higher compensation expenses and technology investments should keep expenses elevated.Credit quality is expected to normalize but could edge higher in 2025.\n",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:59:47.359899",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "research_step_finalized",
+ "output": "Finalized research step.\n\ud83d\udcb8 Total Research Costs: $0.024176760000000002",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T16:59:47.384452",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "writing_report",
+ "output": "\u270d\ufe0f Writing report for 'Whats the latest happening news in US economy and related to Banking sector '...",
+ "metadata": null
+ }
+ },
+ {
+ "timestamp": "2025-02-03T17:00:54.780760",
+ "type": "event",
+ "data": {
+ "type": "logs",
+ "content": "report_written",
+ "output": "\ud83d\udcdd Report written for 'Whats the latest happening news in US economy and related to Banking sector '",
+ "metadata": null
+ }
+ }
+ ],
+ "content": {
+ "query": "",
+ "sources": [],
+ "context": [],
+ "report": "",
+ "costs": 0.0,
+ "type": "report",
+ "content": "selected_images",
+ "output": "USB) earnings dates, call summary & reports*. [https://www.tipranks.com/stocks/usb/earnings](https://www.tipranks.com/stocks/usb/earnings)",
+ "metadata": [
+ "https://www.atlantafed.org/-/media/Images/news/conferences-and-events/conferences/2025/02/banking-outlook-conference/banner.png",
+ "https://experianacademy.com/wp-content/uploads/2025/01/GettyImages-485618716-e1738140763381.jpg?w=1050&h=340&crop=1",
+ "https://experianacademy.com/wp-content/uploads/2025/01/Cover-image.png?w=1024",
+ "https://experianacademy.com/wp-content/uploads/2025/01/Section-2-image.png?w=1024",
+ "https://experianacademy.com/wp-content/uploads/2025/01/section-4-image.png?w=1024",
+ "https://www.marketbeat.com/logos/articles/thumb_20241104115452_options-traders-bet-big-on-these-3-tech-stocks.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20241101152430_how-to-play-new-options-trading-with-bitcoin-etfs.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240718150215_how-to-execute-the-wheel-strategy-to-generate-opti.jpg",
+ "https://www.marketbeat.com/logos/articles/thumb_20240626075418_3-options-strategies-to-play-a-stocks-uptrend-if-b.jpg",
+ "https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-1970581575.jpg?c=16x9&q=h_833,w_1480,c_fill"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/poetry.toml b/poetry.toml
new file mode 100644
index 0000000000000000000000000000000000000000..d8e0ae09cbe5eecc3c51303631b9703bb91212cb
--- /dev/null
+++ b/poetry.toml
@@ -0,0 +1,2 @@
+[virtualenvs]
+in-project = true
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..62d726bb545f1c815ba91da5ca8655075c185a71
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,55 @@
+[tool.poetry]
+name = "gpt-researcher"
+version = "0.8.5"
+description = "GPT Researcher is an autonomous agent designed for comprehensive online research on a variety of tasks."
+authors = ["Assaf Elovic "]
+license = "MIT"
+readme = "README.md"
+
+[tool.poetry.dependencies]
+python = ">=3.10,<3.12"
+beautifulsoup4 = ">=4.12.2"
+colorama = ">=0.4.6"
+duckduckgo_search = ">=4.1.1"
+md2pdf = ">=1.0.1"
+openai = ">=1.3.3"
+python-dotenv = ">=1.0.0"
+pyyaml = ">=6.0.1"
+uvicorn = ">=0.24.0.post1"
+pydantic = ">=2.5.1"
+fastapi = ">=0.104.1"
+python-multipart = ">=0.0.6"
+markdown = ">=3.5.1"
+langchain = "^0.2"
+langgraph = ">=0.0.29,<0.3"
+langchain_community = "^0.2"
+langchain-openai = "^0.1"
+tavily-python = ">=0.2.8"
+permchain = ">=0.0.6"
+arxiv = ">=2.0.0"
+PyMuPDF = ">=1.23.6"
+requests = ">=2.31.0"
+jinja2 = ">=3.1.2"
+aiofiles = ">=23.2.1"
+SQLAlchemy = ">=2.0.28"
+mistune = "^3.0.2"
+htmldocx = "^0.0.6"
+python-docx = "^1.1.0"
+lxml = { version = ">=4.9.2", extras = ["html_clean"] }
+unstructured = ">=0.13,<0.16"
+tiktoken = ">=0.7.0"
+json-repair = "^0.29.8"
+json5 = "^0.9.25"
+loguru = "^0.7.2"
+websockets = "^13.1"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.pytest.ini_options]
+asyncio_mode = "strict"
+addopts = "-v"
+testpaths = ["tests"]
+python_files = "test_*.py"
+asyncio_fixture_loop_scope = "function"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c4eb0dc7f4b487b7c60f098c04f815158e0f7026
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,36 @@
+# dependencies
+beautifulsoup4
+colorama
+md2pdf
+python-dotenv
+pyyaml
+uvicorn
+pydantic
+fastapi
+python-multipart
+markdown
+langchain
+langchain_community
+langchain-openai
+langchain-ollama
+langgraph
+tiktoken
+gpt-researcher
+arxiv
+PyMuPDF
+requests
+jinja2
+aiofiles
+mistune
+python-docx
+htmldocx
+lxml_html_clean
+websockets
+unstructured
+json_repair
+json5
+loguru
+
+# uncomment for testing
+# pytest
+# pytest-asyncio
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcab36ed56560074ae8afb17a08f474ba99b54f9
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,46 @@
+from setuptools import find_packages, setup
+
+LATEST_VERSION = "0.10.10"
+
+exclude_packages = [
+ "selenium",
+ "webdriver",
+ "fastapi",
+ "fastapi.*",
+ "uvicorn",
+ "jinja2",
+ "gpt-researcher",
+ "langgraph"
+]
+
+with open(r"README.md", "r", encoding="utf-8") as f:
+ long_description = f.read()
+
+with open("requirements.txt", "r") as f:
+ reqs = [line.strip() for line in f if not any(pkg in line for pkg in exclude_packages)]
+
+setup(
+ name="gpt-researcher",
+ version=LATEST_VERSION,
+ description="GPT Researcher is an autonomous agent designed for comprehensive web research on any task",
+ package_dir={'gpt_researcher': 'gpt_researcher'},
+ packages=find_packages(exclude=exclude_packages),
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/assafelovic/gpt-researcher",
+ author="Assaf Elovic",
+ author_email="assaf.elovic@gmail.com",
+ license="MIT",
+ classifiers=[
+ "License :: OSI Approved :: MIT License",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Education",
+ "Intended Audience :: Science/Research",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ ],
+ install_requires=reqs,
+
+
+)
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/docs/doc.pdf b/tests/docs/doc.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..c448a903a90bdd82fd43f7bb036028e369191070
--- /dev/null
+++ b/tests/docs/doc.pdf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bdfaa68d8984f0dc02beaca527b76f207d99b666d31d1da728ee0728182df697
+size 2215244
diff --git a/tests/documents-report-source.py b/tests/documents-report-source.py
new file mode 100644
index 0000000000000000000000000000000000000000..f29244857ed07170cd2400fde51877d1b038e957
--- /dev/null
+++ b/tests/documents-report-source.py
@@ -0,0 +1,54 @@
+import os
+import asyncio
+import pytest
+# Ensure this path is correct
+from gpt_researcher import GPTResearcher
+from dotenv import load_dotenv
+load_dotenv()
+
+# Define the report types to test
+report_types = [
+ "research_report",
+ "custom_report",
+ "subtopic_report",
+ "summary_report",
+ "detailed_report",
+ "quick_report"
+]
+
+# Define a common query and sources for testing
+query = "What can you tell me about myself based on my documents?"
+
+# Define the output directory
+output_dir = "./outputs"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("report_type", report_types)
+async def test_gpt_researcher(report_type):
+ # Ensure the output directory exists
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
+ # Create an instance of GPTResearcher with report_source set to "documents"
+ researcher = GPTResearcher(
+ query=query, report_type=report_type, report_source="documents")
+
+ # Conduct research and write the report
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ # Define the expected output filenames
+ pdf_filename = os.path.join(output_dir, f"{report_type}.pdf")
+ docx_filename = os.path.join(output_dir, f"{report_type}.docx")
+
+ # Check if the PDF and DOCX files are created
+ # assert os.path.exists(pdf_filename), f"PDF file not found for report type: {report_type}"
+ # assert os.path.exists(docx_filename), f"DOCX file not found for report type: {report_type}"
+
+ # Clean up the generated files (optional)
+ # os.remove(pdf_filename)
+ # os.remove(docx_filename)
+
+if __name__ == "__main__":
+ pytest.main()
diff --git a/tests/gptr-logs-handler.py b/tests/gptr-logs-handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad9351a47ce01df6f01d59dfddf09ed0d7f1f35d
--- /dev/null
+++ b/tests/gptr-logs-handler.py
@@ -0,0 +1,34 @@
+import logging
+from typing import List, Dict, Any
+import asyncio
+from gpt_researcher import GPTResearcher
+from src.logs_handler import CustomLogsHandler # Update import
+
+async def run() -> None:
+ """Run the research process and generate a report."""
+ query = "What happened in the latest burning man floods?"
+ report_type = "research_report"
+ report_source = "online"
+ tone = "informative"
+ config_path = None
+
+ custom_logs_handler = CustomLogsHandler(query=query) # Pass query parameter
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type=report_type,
+ report_source=report_source,
+ tone=tone,
+ config_path=config_path,
+ websocket=custom_logs_handler
+ )
+
+ await researcher.conduct_research() # Conduct the research
+ report = await researcher.write_report() # Write the research report
+ logging.info("Report generated successfully.") # Log report generation
+
+ return report
+
+# Run the asynchronous function using asyncio
+if __name__ == "__main__":
+ asyncio.run(run())
diff --git a/tests/report-types.py b/tests/report-types.py
new file mode 100644
index 0000000000000000000000000000000000000000..05707ecd53bd307d217cd97bd1b282bf5fa1827d
--- /dev/null
+++ b/tests/report-types.py
@@ -0,0 +1,49 @@
+import os
+import asyncio
+import pytest
+from gpt_researcher.agent import GPTResearcher
+from src.logs_handler import CustomLogsHandler # Update import
+from typing import List, Dict, Any
+
+# Define the report types to test
+report_types = [
+ "research_report",
+ "subtopic_report"
+]
+
+# Define a common query and sources for testing
+query = "What are the latest advancements in AI?"
+# sources = ["https://en.wikipedia.org/wiki/Artificial_intelligence", "https://www.ibm.com/watson/ai"]
+
+# Define the output directory
+output_dir = "./outputs"
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("report_type", report_types)
+async def test_gpt_researcher(report_type):
+ # Ensure the output directory exists
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
+ custom_logs_handler = CustomLogsHandler(query=query)
+ # Create an instance of GPTResearcher
+ researcher = GPTResearcher(query=query, report_type=report_type, websocket=custom_logs_handler)
+
+ # Conduct research and write the report
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ # Define the expected output filenames
+ pdf_filename = os.path.join(output_dir, f"{report_type}.pdf")
+ docx_filename = os.path.join(output_dir, f"{report_type}.docx")
+
+ # Check if the PDF and DOCX files are created
+ # assert os.path.exists(pdf_filename), f"PDF file not found for report type: {report_type}"
+ # assert os.path.exists(docx_filename), f"DOCX file not found for report type: {report_type}"
+
+ # Clean up the generated files (optional)
+ # os.remove(pdf_filename)
+ # os.remove(docx_filename)
+
+if __name__ == "__main__":
+ pytest.main()
\ No newline at end of file
diff --git a/tests/research_test.py b/tests/research_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fd7de314ec54d2a519d30f4d43887a8042a0411
--- /dev/null
+++ b/tests/research_test.py
@@ -0,0 +1,110 @@
+"""
+Hi! The following test cases are for the new parameter `complement_source_urls` and fix on the functional error with `source_urls` in GPTResearcher class.
+
+The source_urls parameter was resetting each time in conduct_research function causing gptr to forget the given links. Now, that has been fixed and a new parameter is introduced.
+This parameter named will `complement_source_urls` allow GPTR to research on sources other than the provided sources via source_urls if set to True.
+Default is False, i.e., no additional research will be conducted on newer sources.
+"""
+
+## Notes:
+## Please uncomment the test case to run and comment the rest.
+## Thanks!
+
+
+
+#### Test case 1 (original test case as control from https://docs.gptr.dev/docs/gpt-researcher/tailored-research)
+
+from gpt_researcher.agent import GPTResearcher # Ensure this path is correct
+import asyncio
+import logging
+from typing import List, Dict, Any
+from src.logs_handler import CustomLogsHandler # Update import
+
+async def get_report(query: str, report_type: str, sources: list) -> str:
+ custom_logs_handler = CustomLogsHandler(query=query) # Pass query parameter
+ researcher = GPTResearcher(query=query,
+ report_type=report_type,
+ complement_source_urls=False,
+ websocket=custom_logs_handler)
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+ return report, researcher
+
+if __name__ == "__main__":
+ query = "Write an analysis on paul graham"
+ report_type = "research_report"
+ sources = ["https://www.paulgraham.com/when.html", "https://www.paulgraham.com/noob.html"] # query is related
+
+ report, researcher = asyncio.run(get_report(query, report_type, sources))
+ print(report)
+
+ print(f"\nLength of the context = {len(researcher.get_research_context())}") # Must say Non-zero value because the query is related to the contents of the page, so there will be relevant context present
+
+
+
+#### Test case 2 (Illustrating the problem, i.e., source_urls are not scoured. Hence, no relevant context)
+
+# from gpt_researcher.agent import GPTResearcher # Ensure this path is correct
+# import asyncio
+
+# async def get_report(query: str, report_type: str, sources: list) -> str:
+# researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources)
+# await researcher.conduct_research()
+# report = await researcher.write_report()
+# return report, researcher
+
+# if __name__ == "__main__":
+# query = "What is Microsoft's business model?"
+# report_type = "research_report"
+# sources = ["https://www.apple.com", "https://en.wikipedia.org/wiki/Olympic_Games"] # query is UNRELATED.
+
+# report, researcher = asyncio.run(get_report(query, report_type, sources))
+# print(report)
+
+# print(f"\nLength of the context = {len(researcher.get_research_context())}") # Must say 0 (zero) value because the query is UNRELATED to the contents of the pages, so there will be NO relevant context present
+
+
+
+#### Test case 3 (Suggested solution - complement_source_urls parameter allows GPTR to scour more of the web and not restrict to source_urls)
+
+# from gpt_researcher.agent import GPTResearcher # Ensure this path is correct
+# import asyncio
+
+# async def get_report(query: str, report_type: str, sources: list) -> str:
+# researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources, complement_source_urls=True)
+# await researcher.conduct_research()
+# report = await researcher.write_report()
+# return report, researcher
+
+# if __name__ == "__main__":
+# query = "What is Microsoft's business model?"
+# report_type = "research_report"
+# sources = ["https://www.apple.com", "https://en.wikipedia.org/wiki/Olympic_Games"] # query is UNRELATED
+
+# report, researcher = asyncio.run(get_report(query, report_type, sources))
+# print(report)
+
+# print(f"\nLength of the context = {len(researcher.get_research_context())}") # Must say Non-zero value because the query is UNRELATED to the contents of the page, but the complement_source_urls is set which should make gptr do default web search to gather contexts
+
+
+
+# #### Test case 4 (Furthermore, GPTR will create more context in addition to source_urls if the complement_source_urls parameter is set allowing for a larger research scope)
+
+# from gpt_researcher.agent import GPTResearcher # Ensure this path is correct
+# import asyncio
+
+# async def get_report(query: str, report_type: str, sources: list) -> str:
+# researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources, complement_source_urls=True)
+# await researcher.conduct_research()
+# report = await researcher.write_report()
+# return report, researcher
+
+# if __name__ == "__main__":
+# query = "What are the latest advancements in AI?"
+# report_type = "research_report"
+# sources = ["https://en.wikipedia.org/wiki/Artificial_intelligence", "https://www.ibm.com/watson/ai"] # query is related
+
+# report, researcher = asyncio.run(get_report(query, report_type, sources))
+# print(report)
+
+# print(f"\nLength of the context = {len(researcher.get_research_context())}") # Must say Non-zero value because the query is related to the contents of the page, and additionally the complement_source_urls is set which should make gptr do default web search to gather more contexts!
diff --git a/tests/test-loaders.py b/tests/test-loaders.py
new file mode 100644
index 0000000000000000000000000000000000000000..556550e3867c331ee7952ef259a51043f5751139
--- /dev/null
+++ b/tests/test-loaders.py
@@ -0,0 +1,17 @@
+from langchain_community.document_loaders import PyMuPDFLoader, UnstructuredCSVLoader
+
+# # Test PyMuPDFLoader
+pdf_loader = PyMuPDFLoader("my-docs/Elisha - Coding Career.pdf")
+try:
+ pdf_data = pdf_loader.load()
+ print("PDF Data:", pdf_data)
+except Exception as e:
+ print("Failed to load PDF:", e)
+
+# Test UnstructuredCSVLoader
+csv_loader = UnstructuredCSVLoader("my-docs/active_braze_protocols_from_bq.csv", mode="elements")
+try:
+ csv_data = csv_loader.load()
+ print("CSV Data:", csv_data)
+except Exception as e:
+ print("Failed to load CSV:", e)
\ No newline at end of file
diff --git a/tests/test-openai-llm.py b/tests/test-openai-llm.py
new file mode 100644
index 0000000000000000000000000000000000000000..667fb88ebc9eabc15fe8d0d4700f8260bdc2d0bc
--- /dev/null
+++ b/tests/test-openai-llm.py
@@ -0,0 +1,31 @@
+import asyncio
+from gpt_researcher.utils.llm import get_llm
+from gpt_researcher import GPTResearcher
+from dotenv import load_dotenv
+load_dotenv()
+
+async def main():
+
+ # Example usage of get_llm function
+ llm_provider = "openai"
+ model = "gpt-3.5-turbo"
+ temperature = 0.7
+ max_tokens = 1000
+
+ llm = get_llm(llm_provider, model=model, temperature=temperature, max_tokens=max_tokens)
+ print(f"LLM Provider: {llm_provider}, Model: {model}, Temperature: {temperature}, Max Tokens: {max_tokens}")
+ print('llm: ',llm)
+ await test_llm(llm=llm)
+
+
+async def test_llm(llm):
+ # Test the connection with a simple query
+ messages = [{"role": "user", "content": "sup?"}]
+ try:
+ response = await llm.get_chat_response(messages, stream=False)
+ print("LLM response:", response)
+ except Exception as e:
+ print(f"Error: {e}")
+
+# Run the async function
+asyncio.run(main())
\ No newline at end of file
diff --git a/tests/test-your-llm.py b/tests/test-your-llm.py
new file mode 100644
index 0000000000000000000000000000000000000000..d69f5507189ed97dbf4dbdc78473d32fa9801ccc
--- /dev/null
+++ b/tests/test-your-llm.py
@@ -0,0 +1,24 @@
+from gpt_researcher.config.config import Config
+from gpt_researcher.utils.llm import create_chat_completion
+import asyncio
+from dotenv import load_dotenv
+load_dotenv()
+
+async def main():
+ cfg = Config()
+
+ try:
+ report = await create_chat_completion(
+ model=cfg.smart_llm_model,
+ messages = [{"role": "user", "content": "sup?"}],
+ temperature=0.35,
+ llm_provider=cfg.smart_llm_provider,
+ stream=True,
+ max_tokens=cfg.smart_token_limit,
+ llm_kwargs=cfg.llm_kwargs
+ )
+ except Exception as e:
+ print(f"Error in calling LLM: {e}")
+
+# Run the async function
+asyncio.run(main())
\ No newline at end of file
diff --git a/tests/test-your-retriever.py b/tests/test-your-retriever.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea547f43c617316d95dc08441da30d1a9d312efd
--- /dev/null
+++ b/tests/test-your-retriever.py
@@ -0,0 +1,49 @@
+import asyncio
+from dotenv import load_dotenv
+from gpt_researcher.config.config import Config
+from gpt_researcher.actions.retriever import get_retrievers
+from gpt_researcher.skills.researcher import ResearchConductor
+import pprint
+# Load environment variables from .env file
+load_dotenv()
+
+async def test_scrape_data_by_query():
+ # Initialize the Config object
+ config = Config()
+
+ # Retrieve the retrievers based on the current configuration
+ retrievers = get_retrievers({}, config)
+ print("Retrievers:", retrievers)
+
+ # Create a mock researcher object with necessary attributes
+ class MockResearcher:
+ def init(self):
+ self.retrievers = retrievers
+ self.cfg = config
+ self.verbose = True
+ self.websocket = None
+ self.scraper_manager = None # Mock or implement scraper manager
+ self.vector_store = None # Mock or implement vector store
+
+ researcher = MockResearcher()
+ research_conductor = ResearchConductor(researcher)
+ # print('research_conductor',dir(research_conductor))
+ # print('MockResearcher',dir(researcher))
+ # Define a sub-query to test
+ sub_query = "design patterns for autonomous ai agents"
+
+ # Iterate through all retrievers
+ for retriever_class in retrievers:
+ # Instantiate the retriever with the sub-query
+ retriever = retriever_class(sub_query)
+
+ # Perform the search using the current retriever
+ search_results = await asyncio.to_thread(
+ retriever.search, max_results=10
+ )
+
+ print("\033[35mSearch results:\033[0m")
+ pprint.pprint(search_results, indent=4, width=80)
+
+if __name__ == "__main__":
+ asyncio.run(test_scrape_data_by_query())
\ No newline at end of file
diff --git a/tests/test_logging.py b/tests/test_logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f272d5bcfd67952f44e1401d1372c866b272847
--- /dev/null
+++ b/tests/test_logging.py
@@ -0,0 +1,61 @@
+import pytest
+from unittest.mock import AsyncMock
+from fastapi import WebSocket
+from src.logs_handler import CustomLogsHandler
+import os
+import json
+
+@pytest.mark.asyncio
+async def test_custom_logs_handler():
+ # Mock websocket
+ mock_websocket = AsyncMock()
+ mock_websocket.send_json = AsyncMock()
+
+ # Test initialization
+ handler = CustomLogsHandler(mock_websocket, "test_query")
+
+ # Verify log file creation
+ assert os.path.exists(handler.log_file)
+
+ # Test sending log data
+ test_data = {
+ "type": "logs",
+ "message": "Test log message"
+ }
+
+ await handler.send_json(test_data)
+
+ # Verify websocket was called with correct data
+ mock_websocket.send_json.assert_called_once_with(test_data)
+
+ # Verify log file contents
+ with open(handler.log_file, 'r') as f:
+ log_data = json.load(f)
+ assert len(log_data['events']) == 1
+ assert log_data['events'][0]['data'] == test_data
+
+@pytest.mark.asyncio
+async def test_content_update():
+ """Test handling of non-log type data that updates content"""
+ mock_websocket = AsyncMock()
+ mock_websocket.send_json = AsyncMock()
+
+ handler = CustomLogsHandler(mock_websocket, "test_query")
+
+ # Test content update
+ content_data = {
+ "query": "test query",
+ "sources": ["source1", "source2"],
+ "report": "test report"
+ }
+
+ await handler.send_json(content_data)
+
+ mock_websocket.send_json.assert_called_once_with(content_data)
+
+ # Verify log file contents
+ with open(handler.log_file, 'r') as f:
+ log_data = json.load(f)
+ assert log_data['content']['query'] == "test query"
+ assert log_data['content']['sources'] == ["source1", "source2"]
+ assert log_data['content']['report'] == "test report"
\ No newline at end of file
diff --git a/tests/test_logging_output.py b/tests/test_logging_output.py
new file mode 100644
index 0000000000000000000000000000000000000000..51c96c1a92bbb26f9969291cf0702d1bb02741ee
--- /dev/null
+++ b/tests/test_logging_output.py
@@ -0,0 +1,49 @@
+import pytest
+import asyncio
+from pathlib import Path
+import json
+import logging
+from fastapi import WebSocket
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+class TestWebSocket(WebSocket):
+ def __init__(self):
+ self.events = []
+
+ async def accept(self):
+ pass
+
+ async def send_json(self, event):
+ logger.info(f"WebSocket received event: {event}")
+ self.events.append(event)
+
+@pytest.mark.asyncio
+async def test_log_output_file():
+ """Test to verify logs are properly written to output file"""
+ from gpt_researcher.agent import GPTResearcher
+
+ # 1. Setup like the main app
+ websocket = TestWebSocket()
+ await websocket.accept()
+
+ # 2. Initialize researcher like main app
+ query = "What is the capital of France?"
+ researcher = GPTResearcher(query=query, websocket=websocket)
+
+ # 3. Run research
+ await researcher.conduct_research()
+
+ # 4. Verify events were captured
+ logger.info(f"Events captured: {len(websocket.events)}")
+ assert len(websocket.events) > 0, "No events were captured"
+
+ # 5. Check output file
+ output_dir = Path("outputs")
+ output_files = list(output_dir.glob(f"task_*_{query.replace(' ', '_')[:50]}.json"))
+ assert len(output_files) > 0, "No output file was created"
+
+ with open(output_files[-1]) as f:
+ data = json.load(f)
+ assert len(data.get('events', [])) > 0, "No events in output file"
\ No newline at end of file
diff --git a/tests/test_logs.py b/tests/test_logs.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eb1045900a1be17a71ceeecbe588fabd9092ae9
--- /dev/null
+++ b/tests/test_logs.py
@@ -0,0 +1,48 @@
+import os
+from pathlib import Path
+import sys
+
+# Add the project root to Python path
+project_root = Path(__file__).parent.parent
+sys.path.append(str(project_root))
+
+from src.logs_handler import CustomLogsHandler
+
+def test_logs_creation():
+ # Print current working directory
+ print(f"Current working directory: {os.getcwd()}")
+
+ # Print project root
+ print(f"Project root: {project_root}")
+
+ # Try to create logs directory directly
+ logs_dir = project_root / "logs"
+ print(f"Attempting to create logs directory at: {logs_dir}")
+
+ try:
+ # Create directory with full permissions
+ os.makedirs(logs_dir, mode=0o777, exist_ok=True)
+ print(f"✓ Created directory: {logs_dir}")
+
+ # Test file creation
+ test_file = logs_dir / "test.txt"
+ with open(test_file, 'w') as f:
+ f.write("Test log entry")
+ print(f"✓ Created test file: {test_file}")
+
+ # Initialize the handler
+ handler = CustomLogsHandler()
+ print("✓ CustomLogsHandler initialized")
+
+ # Test JSON logging
+ handler.logs.append({"test": "message"})
+ print("✓ Added test log entry")
+
+ except Exception as e:
+ print(f"❌ Error: {str(e)}")
+ print(f"Error type: {type(e)}")
+ import traceback
+ print(f"Traceback: {traceback.format_exc()}")
+
+if __name__ == "__main__":
+ test_logs_creation()
\ No newline at end of file
diff --git a/tests/test_researcher_logging.py b/tests/test_researcher_logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fc45f39d02a46e2bcd9b4ba099a33e2379f5b53
--- /dev/null
+++ b/tests/test_researcher_logging.py
@@ -0,0 +1,71 @@
+import pytest
+import asyncio
+from pathlib import Path
+import sys
+import logging
+
+# Add the project root to Python path
+project_root = Path(__file__).parent.parent
+sys.path.append(str(project_root))
+
+# Configure basic logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+@pytest.mark.asyncio
+async def test_researcher_logging(): # Renamed function to be more specific
+ """
+ Test suite for verifying the researcher's logging infrastructure.
+ Ensures proper creation and formatting of log files.
+ """
+ try:
+ # Import here to catch any import errors
+ from src.researcher import Researcher
+ logger.info("Successfully imported Researcher class")
+
+ # Create a researcher instance with a logging-focused query
+ researcher = Researcher(
+ query="Test query for logging verification",
+ report_type="research_report"
+ )
+ logger.info("Created Researcher instance")
+
+ # Run the research
+ report = await researcher.research()
+ logger.info("Research completed successfully!")
+ logger.info(f"Report length: {len(report)}")
+
+ # Basic report assertions
+ assert report is not None
+ assert len(report) > 0
+
+ # Detailed log file verification
+ logs_dir = Path(project_root) / "logs"
+ log_files = list(logs_dir.glob("research_*.log"))
+ json_files = list(logs_dir.glob("research_*.json"))
+
+ # Verify log files exist
+ assert len(log_files) > 0, "No log files were created"
+ assert len(json_files) > 0, "No JSON files were created"
+
+ # Log the findings
+ logger.info(f"\nFound {len(log_files)} log files:")
+ for log_file in log_files:
+ logger.info(f"- {log_file.name}")
+ # Could add additional checks for log file format/content here
+
+ logger.info(f"\nFound {len(json_files)} JSON files:")
+ for json_file in json_files:
+ logger.info(f"- {json_file.name}")
+ # Could add additional checks for JSON file structure here
+
+ except ImportError as e:
+ logger.error(f"Import error: {e}")
+ logger.error("Make sure gpt_researcher is installed and in your PYTHONPATH")
+ raise
+ except Exception as e:
+ logger.error(f"Error during research: {e}")
+ raise
+
+if __name__ == "__main__":
+ pytest.main([__file__])
\ No newline at end of file
diff --git a/tests/vector-store.py b/tests/vector-store.py
new file mode 100644
index 0000000000000000000000000000000000000000..7847682b5c9d1788a526a0c5cb9f51d38f053015
--- /dev/null
+++ b/tests/vector-store.py
@@ -0,0 +1,236 @@
+import asyncio
+import pytest
+from typing import List
+from gpt_researcher import GPTResearcher
+
+from langchain.text_splitter import CharacterTextSplitter
+from langchain_openai import OpenAIEmbeddings
+from langchain_community.vectorstores import FAISS, InMemoryVectorStore
+from langchain_core.documents import Document
+
+
+# taken from https://paulgraham.com/persistence.html
+essay = """
+The right kind of Stubborn
+
+July 2024
+
+Successful people tend to be persistent. New ideas often don't work at first, but they're not deterred. They keep trying and eventually find something that does.
+
+Mere obstinacy, on the other hand, is a recipe for failure. Obstinate people are so annoying. They won't listen. They beat their heads against a wall and get nowhere.
+
+But is there any real difference between these two cases? Are persistent and obstinate people actually behaving differently? Or are they doing the same thing, and we just label them later as persistent or obstinate depending on whether they turned out to be right or not?
+
+If that's the only difference then there's nothing to be learned from the distinction. Telling someone to be persistent rather than obstinate would just be telling them to be right rather than wrong, and they already know that. Whereas if persistence and obstinacy are actually different kinds of behavior, it would be worthwhile to tease them apart. [1]
+
+I've talked to a lot of determined people, and it seems to me that they're different kinds of behavior. I've often walked away from a conversation thinking either "Wow, that guy is determined" or "Damn, that guy is stubborn," and I don't think I'm just talking about whether they seemed right or not. That's part of it, but not all of it.
+
+There's something annoying about the obstinate that's not simply due to being mistaken. They won't listen. And that's not true of all determined people. I can't think of anyone more determined than the Collison brothers, and when you point out a problem to them, they not only listen, but listen with an almost predatory intensity. Is there a hole in the bottom of their boat? Probably not, but if there is, they want to know about it.
+
+It's the same with most successful people. They're never more engaged than when you disagree with them. Whereas the obstinate don't want to hear you. When you point out problems, their eyes glaze over, and their replies sound like ideologues talking about matters of doctrine. [2]
+
+The reason the persistent and the obstinate seem similar is that they're both hard to stop. But they're hard to stop in different senses. The persistent are like boats whose engines can't be throttled back. The obstinate are like boats whose rudders can't be turned. [3]
+
+In the degenerate case they're indistinguishable: when there's only one way to solve a problem, your only choice is whether to give up or not, and persistence and obstinacy both say no. This is presumably why the two are so often conflated in popular culture. It assumes simple problems. But as problems get more complicated, we can see the difference between them. The persistent are much more attached to points high in the decision tree than to minor ones lower down, while the obstinate spray "don't give up" indiscriminately over the whole tree.
+
+The persistent are attached to the goal. The obstinate are attached to their ideas about how to reach it.
+
+Worse still, that means they'll tend to be attached to their first ideas about how to solve a problem, even though these are the least informed by the experience of working on it. So the obstinate aren't merely attached to details, but disproportionately likely to be attached to wrong ones.
+
+
+
+Why are they like this? Why are the obstinate obstinate? One possibility is that they're overwhelmed. They're not very capable. They take on a hard problem. They're immediately in over their head. So they grab onto ideas the way someone on the deck of a rolling ship might grab onto the nearest handhold.
+
+That was my initial theory, but on examination it doesn't hold up. If being obstinate were simply a consequence of being in over one's head, you could make persistent people become obstinate by making them solve harder problems. But that's not what happens. If you handed the Collisons an extremely hard problem to solve, they wouldn't become obstinate. If anything they'd become less obstinate. They'd know they had to be open to anything.
+
+Similarly, if obstinacy were caused by the situation, the obstinate would stop being obstinate when solving easier problems. But they don't. And if obstinacy isn't caused by the situation, it must come from within. It must be a feature of one's personality.
+
+Obstinacy is a reflexive resistance to changing one's ideas. This is not identical with stupidity, but they're closely related. A reflexive resistance to changing one's ideas becomes a sort of induced stupidity as contrary evidence mounts. And obstinacy is a form of not giving up that's easily practiced by the stupid. You don't have to consider complicated tradeoffs; you just dig in your heels. It even works, up to a point.
+
+The fact that obstinacy works for simple problems is an important clue. Persistence and obstinacy aren't opposites. The relationship between them is more like the relationship between the two kinds of respiration we can do: aerobic respiration, and the anaerobic respiration we inherited from our most distant ancestors. Anaerobic respiration is a more primitive process, but it has its uses. When you leap suddenly away from a threat, that's what you're using.
+
+The optimal amount of obstinacy is not zero. It can be good if your initial reaction to a setback is an unthinking "I won't give up," because this helps prevent panic. But unthinking only gets you so far. The further someone is toward the obstinate end of the continuum, the less likely they are to succeed in solving hard problems. [4]
+
+
+
+Obstinacy is a simple thing. Animals have it. But persistence turns out to have a fairly complicated internal structure.
+
+One thing that distinguishes the persistent is their energy. At the risk of putting too much weight on words, they persist rather than merely resisting. They keep trying things. Which means the persistent must also be imaginative. To keep trying things, you have to keep thinking of things to try.
+
+Energy and imagination make a wonderful combination. Each gets the best out of the other. Energy creates demand for the ideas produced by imagination, which thus produces more, and imagination gives energy somewhere to go. [5]
+
+Merely having energy and imagination is quite rare. But to solve hard problems you need three more qualities: resilience, good judgement, and a focus on some kind of goal.
+
+Resilience means not having one's morale destroyed by setbacks. Setbacks are inevitable once problems reach a certain size, so if you can't bounce back from them, you can only do good work on a small scale. But resilience is not the same as obstinacy. Resilience means setbacks can't change your morale, not that they can't change your mind.
+
+Indeed, persistence often requires that one change one's mind. That's where good judgement comes in. The persistent are quite rational. They focus on expected value. It's this, not recklessness, that lets them work on things that are unlikely to succeed.
+
+There is one point at which the persistent are often irrational though: at the very top of the decision tree. When they choose between two problems of roughly equal expected value, the choice usually comes down to personal preference. Indeed, they'll often classify projects into deliberately wide bands of expected value in order to ensure that the one they want to work on still qualifies.
+
+Empirically this doesn't seem to be a problem. It's ok to be irrational near the top of the decision tree. One reason is that we humans will work harder on a problem we love. But there's another more subtle factor involved as well: our preferences among problems aren't random. When we love a problem that other people don't, it's often because we've unconsciously noticed that it's more important than they realize.
+
+Which leads to our fifth quality: there needs to be some overall goal. If you're like me you began, as a kid, merely with the desire to do something great. In theory that should be the most powerful motivator of all, since it includes everything that could possibly be done. But in practice it's not much use, precisely because it includes too much. It doesn't tell you what to do at this moment.
+
+So in practice your energy and imagination and resilience and good judgement have to be directed toward some fairly specific goal. Not too specific, or you might miss a great discovery adjacent to what you're searching for, but not too general, or it won't work to motivate you. [6]
+
+When you look at the internal structure of persistence, it doesn't resemble obstinacy at all. It's so much more complex. Five distinct qualities — energy, imagination, resilience, good judgement, and focus on a goal — combine to produce a phenomenon that seems a bit like obstinacy in the sense that it causes you not to give up. But the way you don't give up is completely different. Instead of merely resisting change, you're driven toward a goal by energy and resilience, through paths discovered by imagination and optimized by judgement. You'll give way on any point low down in the decision tree, if its expected value drops sufficiently, but energy and resilience keep pushing you toward whatever you choose higher up.
+
+Considering what it's made of, it's not surprising that the right kind of stubbornness is so much rarer than the wrong kind, or that it gets so much better results. Anyone can do obstinacy. Indeed, kids and drunks and fools are best at it. Whereas very few people have enough of all five of the qualities that produce the right kind of stubbornness, but when they do the results are magical.
+
+
+
+
+
+
+
+Notes
+
+[1] I'm going to use "persistent" for the good kind of stubborn and "obstinate" for the bad kind, but I can't claim I'm simply following current usage. Conventional opinion barely distinguishes between good and bad kinds of stubbornness, and usage is correspondingly promiscuous. I could have invented a new word for the good kind, but it seemed better just to stretch "persistent."
+
+[2] There are some domains where one can succeed by being obstinate. Some political leaders have been notorious for it. But it won't work in situations where you have to pass external tests. And indeed the political leaders who are famous for being obstinate are famous for getting power, not for using it well.
+
+[3] There will be some resistance to turning the rudder of a persistent person, because there's some cost to changing direction.
+
+[4] The obstinate do sometimes succeed in solving hard problems. One way is through luck: like the stopped clock that's right twice a day, they seize onto some arbitrary idea, and it turns out to be right. Another is when their obstinacy cancels out some other form of error. For example, if a leader has overcautious subordinates, their estimates of the probability of success will always be off in the same direction. So if he mindlessly says "push ahead regardless" in every borderline case, he'll usually turn out to be right.
+
+[5] If you stop there, at just energy and imagination, you get the conventional caricature of an artist or poet.
+
+[6] Start by erring on the small side. If you're inexperienced you'll inevitably err on one side or the other, and if you err on the side of making the goal too broad, you won't get anywhere. Whereas if you err on the small side you'll at least be moving forward. Then, once you're moving, you expand the goal.
+"""
+
+
+def load_document():
+ document = [Document(page_content=essay)]
+ text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=30, separator="\n")
+ return text_splitter.split_documents(documents=document)
+
+
+def create_vectorstore(documents: List[Document]):
+ embeddings = OpenAIEmbeddings()
+ return FAISS.from_documents(documents, embeddings)
+
+@pytest.mark.asyncio
+async def test_gpt_researcher_with_vector_store():
+ docs = load_document()
+ vectorstore = create_vectorstore(docs)
+
+ query = """
+ Summarize the essay into 3 or 4 succint sections.
+ Make sure to include key points regarding the differences between
+ persistance vs obstinate.
+
+ Include some recommendations for entrepeneurs in the conclusion.
+ Recommend some ways to increase persistance in a healthy way.
+ """
+
+
+ # Create an instance of GPTResearcher
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ report_source="langchain_vectorstore",
+ vector_store=vectorstore,
+ )
+
+ # Conduct research and write the report
+ await researcher.conduct_research()
+ report = await researcher.write_report()
+
+ assert report is not None
+
+@pytest.mark.asyncio
+async def test_store_in_vector_store_web():
+ vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+ query = "Which one is the best LLM"
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ report_source="web",
+ vector_store=vector_store,
+ )
+
+ await researcher.conduct_research()
+
+ related_contexts = await vector_store.asimilarity_search("GPT-4", k=2)
+
+ assert len(related_contexts) == 2
+ # Add more assertions as needed to verify the results
+
+
+@pytest.mark.asyncio
+async def test_store_in_vector_store_urls():
+ vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+ query = "Who won the world cup in 2022"
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ vector_store=vector_store,
+ source_urls=["https://en.wikipedia.org/wiki/FIFA_World_Cup"]
+ )
+
+ await researcher.conduct_research()
+
+ related_contexts = await vector_store.asimilarity_search("GPT-4", k=2)
+
+ assert len(related_contexts) == 2
+
+
+@pytest.mark.asyncio
+async def test_store_in_vector_store_langchain_docs():
+ vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+ docs = load_document()
+ query = "What does successful people tend to do?"
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ vector_store=vector_store,
+ report_source="langchain_documents",
+ documents=docs
+ )
+
+ await researcher.conduct_research()
+
+ related_contexts = await vector_store.asimilarity_search("GPT-4", k=2)
+
+ assert len(related_contexts) == 2
+
+@pytest.mark.asyncio
+async def test_store_in_vector_store_locals():
+ vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+ query = "What is transformer?"
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ vector_store=vector_store,
+ report_source="local",
+ config_path= "test_local"
+ )
+
+ await researcher.conduct_research()
+
+ related_contexts = await vector_store.asimilarity_search("GPT-4", k=2)
+
+ assert len(related_contexts) == 2
+
+@pytest.mark.asyncio
+async def test_store_in_vector_store_hybrids():
+ vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
+ query = "What is transformer?"
+
+ researcher = GPTResearcher(
+ query=query,
+ report_type="research_report",
+ vector_store=vector_store,
+ report_source="hybrid",
+ config_path= "test_local"
+ )
+
+ await researcher.conduct_research()
+
+ related_contexts = await vector_store.asimilarity_search("GPT-4", k=2)
+
+ assert len(related_contexts) == 2