Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
Dataset Viewer
Auto-converted to Parquet
repo
stringlengths
5
51
instance_id
stringlengths
11
56
base_commit
stringlengths
40
40
patch
stringlengths
400
333k
test_patch
stringlengths
0
895k
problem_statement
stringlengths
27
55.6k
hints_text
stringlengths
0
72k
created_at
int64
1,447B
1,739B
labels
sequencelengths
0
7
βŒ€
category
stringclasses
4 values
edit_functions
sequencelengths
1
10
added_functions
sequencelengths
0
20
edit_functions_length
int64
1
10
joselc/life-sim-first-try
joselc__life-sim-first-try-2
77d5c01d3090655b776faf9a0c14b352a36b9695
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..56b8df8 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,25 @@ +# Description + +Please include a summary of the changes and the related issue. + +Fixes # (issue) + +## Type of change + +Please delete options that are not relevant. + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] This change requires a documentation update + +## Checklist: + +- [ ] My code follows the style guidelines of this project +- [ ] I have performed a self-review of my code +- [ ] I have commented my code, particularly in hard-to-understand areas +- [ ] I have made corresponding changes to the documentation +- [ ] My changes generate no new warnings +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] New and existing unit tests pass locally with my changes +- [ ] Any dependent changes have been merged and published in downstream modules \ No newline at end of file diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..1afba64 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,39 @@ +codecov: + require_ci_to_pass: true + notify: + wait_for_ci: true + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + target: 90% + threshold: 1% + patch: + default: + target: 90% + threshold: 1% + changes: false + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "reach,diff,flags,tree" + behavior: default + require_changes: false + require_base: false + require_head: true + show_carryforward_flags: true + +github_checks: + annotations: true \ No newline at end of file diff --git a/src/config.py b/src/config.py index 129b79d..9951070 100644 --- a/src/config.py +++ b/src/config.py @@ -12,6 +12,7 @@ GRID_COLUMNS (int): Number of columns in the hexagonal grid GRID_ROWS (int): Number of rows in the hexagonal grid PLANT_SPAWN_PROBABILITY (float): Probability (0-1) of spawning a plant cell + SEED_SURVIVAL_THRESHOLD (float): Default probability (0-1) of a seed surviving COLORS (Dict[str, Tuple[int, int, int]]): Dictionary of RGB color definitions for various elements in the simulation @@ -27,6 +28,7 @@ GRID_COLUMNS = 16 GRID_ROWS = 12 PLANT_SPAWN_PROBABILITY = 0.5 +SEED_SURVIVAL_THRESHOLD = 0.7 # 70% chance of survival by default # Colors COLORS = { diff --git a/src/hexagons/plant_states.py b/src/hexagons/plant_states.py index 2862f39..d7a918b 100644 --- a/src/hexagons/plant_states.py +++ b/src/hexagons/plant_states.py @@ -1,6 +1,8 @@ """Plant states and state management for the life simulation.""" from enum import Enum, auto +import random +from ..config import SEED_SURVIVAL_THRESHOLD class PlantState(Enum): @@ -31,6 +33,7 @@ class PlantStateManager: time_in_state (float): How long the plant has been in current state health (float): Plant's health from 0.0 to 1.0 growth (float): Plant's growth progress from 0.0 to 1.0 + seed_survival_threshold (float): Probability (0-1) of a seed surviving to growing phase """ # State transition thresholds @@ -45,6 +48,38 @@ def __init__(self): self.time_in_state = 0.0 self.health = 1.0 self.growth = 0.0 + self.seed_survival_threshold = SEED_SURVIVAL_THRESHOLD + + @property + def seed_survival_threshold(self) -> float: + """Get the probability threshold for seed survival. + + Returns: + float: The probability (0-1) that a seed will survive to growing phase + """ + return self._seed_survival_threshold + + @seed_survival_threshold.setter + def seed_survival_threshold(self, value: float) -> None: + """Set the seed survival threshold. + + Args: + value (float): The new threshold value (0-1) + + Raises: + ValueError: If value is not between 0 and 1 + """ + if not 0 <= value <= 1: + raise ValueError("Seed survival threshold must be between 0 and 1") + self._seed_survival_threshold = value + + def _check_seed_survival(self) -> bool: + """Check if the seed survives to growing phase. + + Returns: + bool: True if the seed survives, False if it dies + """ + return random.random() < self.seed_survival_threshold def update(self, dt: float) -> None: """Update the plant's state based on time passed. @@ -55,9 +90,13 @@ def update(self, dt: float) -> None: self.time_in_state += dt if self.state == PlantState.SEED: - # Stay in seed state for SEED_DURATION + # Check for state transition at the end of seed phase if self.time_in_state >= self.SEED_DURATION: - self.state = PlantState.GROWING + # Determine if seed survives + if self._check_seed_survival(): + self.state = PlantState.GROWING + else: + self.state = PlantState.DYING self.time_in_state = 0.0 elif self.state == PlantState.GROWING: diff --git a/src/mesh/hex_mesh.py b/src/mesh/hex_mesh.py index 5f98cb7..9871aa2 100644 --- a/src/mesh/hex_mesh.py +++ b/src/mesh/hex_mesh.py @@ -2,10 +2,11 @@ import math import random -from typing import List, Union +from typing import List, Union, Tuple from ..hexagons.plant import PlantHexagon from ..hexagons.ground import GroundHexagon from ..config import PLANT_SPAWN_PROBABILITY +from ..hexagons.plant_states import PlantState class HexMesh: @@ -17,6 +18,10 @@ class HexMesh: Attributes: hexagons (List[Union[PlantHexagon, GroundHexagon]]): List of all hexagonal cells in the grid + num_columns (int): Number of columns in the grid + num_rows (int): Number of rows in the grid + grid_bounds (Tuple[float, float, float, float]): Grid boundaries (left, right, top, bottom) + cell_size (float): Size of hexagon cells (side length) """ def __init__(self, num_columns: int, num_rows: int, display_width: int, display_height: int) -> None: @@ -31,36 +36,95 @@ def __init__(self, num_columns: int, num_rows: int, display_width: int, display_ display_width (int): Width of the display surface in pixels display_height (int): Height of the display surface in pixels """ + self.num_columns = num_columns + self.num_rows = num_rows + # Calculate side length 'a' so that vertical span fits display_height exactly - a = display_height / (num_rows * math.sqrt(3)) # side length + self.cell_size = display_height / (num_rows * math.sqrt(3)) # side length # Total grid width = 2*a + (num_columns - 1)*1.5*a - grid_width = 2 * a + (num_columns - 1) * (1.5 * a) - offset_x = (display_width - grid_width) / 2 - offset_y = 0 # even columns will touch top + grid_width = 2 * self.cell_size + (num_columns - 1) * (1.5 * self.cell_size) + self.offset_x = (display_width - grid_width) / 2 + self.offset_y = 0 # even columns will touch top + + # Calculate grid boundaries + self.grid_bounds = ( + self.offset_x, # left + self.offset_x + grid_width, # right + self.offset_y, # top + display_height # bottom + ) self.hexagons: List[Union[PlantHexagon, GroundHexagon]] = [] for i in range(num_columns): for j in range(num_rows): # Compute center of hexagon for column i, row j - cx = offset_x + a + i * (1.5 * a) + cx = self.offset_x + self.cell_size + i * (1.5 * self.cell_size) if i % 2 == 0: - cy = offset_y + (a * math.sqrt(3) / 2) + j * (a * math.sqrt(3)) + cy = self.offset_y + (self.cell_size * math.sqrt(3) / 2) + j * (self.cell_size * math.sqrt(3)) else: - cy = offset_y + a * math.sqrt(3) + j * (a * math.sqrt(3)) + cy = self.offset_y + self.cell_size * math.sqrt(3) + j * (self.cell_size * math.sqrt(3)) # Randomly choose between a plant hexagon and ground hexagon if random.random() < PLANT_SPAWN_PROBABILITY: - hexagon = PlantHexagon(cx, cy, a) + hexagon = PlantHexagon(cx, cy, self.cell_size) else: - hexagon = GroundHexagon(cx, cy, a) + hexagon = GroundHexagon(cx, cy, self.cell_size) self.hexagons.append(hexagon) + def _is_position_valid(self, cx: float, cy: float) -> bool: + """Check if a position is within the grid bounds. + + Args: + cx (float): X-coordinate of the position + cy (float): Y-coordinate of the position + + Returns: + bool: True if the position is within bounds, False otherwise + """ + left, right, top, bottom = self.grid_bounds + # Add a small margin (half a cell) to account for hexagon extent + margin = self.cell_size / 2 + return (left - margin <= cx <= right + margin and + top - margin <= cy <= bottom + margin) + + def _convert_plants_to_ground(self, dead_plants: List[Tuple[int, PlantHexagon]]) -> None: + """Convert multiple dead plants to ground hexagons in bulk. + + Args: + dead_plants (List[Tuple[int, PlantHexagon]]): List of (index, plant) pairs to convert + + Raises: + ValueError: If any plant's position is outside the grid bounds + """ + # First validate all positions + for index, plant in dead_plants: + if not self._is_position_valid(plant.cx, plant.cy): + raise ValueError( + f"Plant position ({plant.cx}, {plant.cy}) is outside grid bounds {self.grid_bounds}" + ) + + # Then perform all conversions + for index, plant in dead_plants: + self.hexagons[index] = GroundHexagon(plant.cx, plant.cy, plant.a) + def update(self, t: float) -> None: """Update all cells in the grid. + Updates each cell and converts dead plants to ground in bulk. + Args: t (float): Current simulation time in seconds """ + # First update all hexagons for hexagon in self.hexagons: - hexagon.update(t) \ No newline at end of file + hexagon.update(t) + + # Then identify and convert dead plants in bulk + dead_plants = [ + (i, hexagon) for i, hexagon in enumerate(self.hexagons) + if isinstance(hexagon, PlantHexagon) and hexagon.state_manager.state == PlantState.DEAD + ] + + if dead_plants: + self._convert_plants_to_ground(dead_plants) \ No newline at end of file
diff --git a/tests/hexagons/test_plant.py b/tests/hexagons/test_plant.py index cd752d1..fbaa086 100644 --- a/tests/hexagons/test_plant.py +++ b/tests/hexagons/test_plant.py @@ -1,6 +1,7 @@ """Unit tests for the PlantHexagon class.""" import unittest +from unittest.mock import Mock, patch from src.hexagons.plant import PlantHexagon from src.hexagons.plant_states import PlantState from tests.test_config import MOCK_CELL_SIZE, MOCK_COLORS @@ -16,76 +17,77 @@ def test_initialization(self): self.assertEqual(self.plant.cx, 50) self.assertEqual(self.plant.cy, 50) self.assertEqual(self.plant.a, MOCK_CELL_SIZE) - self.assertEqual(self.plant.state_manager.state, PlantState.SEED) - self.assertEqual(self.plant.state_manager.time_in_state, 0.0) - self.assertEqual(self.plant.state_manager.health, 1.0) - self.assertEqual(self.plant.state_manager.growth, 0.0) - - def test_lifecycle(self): - """Test that plant goes through its lifecycle states correctly.""" - # Start as seed - self.assertEqual(self.plant.state_manager.state, PlantState.SEED) - - # Update should stay in seed state initially + # Only verify that a state manager exists, not its internal state + self.assertIsNotNone(self.plant.state_manager) + + def test_update_delegates_to_state_manager(self): + """Test that update properly delegates to the state manager.""" + # Create a mock state manager + mock_state_manager = Mock() + self.plant.state_manager = mock_state_manager + + # Call update self.plant.update(0.1) - self.assertEqual(self.plant.state_manager.state, PlantState.SEED) - - # After SEED_DURATION, should transition to growing - self.plant.update(self.plant.state_manager.SEED_DURATION) - self.assertEqual(self.plant.state_manager.state, PlantState.GROWING) - - # Should be growing for a while - self.plant.update(self.plant.state_manager.GROWTH_THRESHOLD / 2) - self.assertEqual(self.plant.state_manager.state, PlantState.GROWING) - self.assertGreater(self.plant.state_manager.growth, 0.0) - - # After growth threshold, should be mature - self.plant.update(self.plant.state_manager.GROWTH_THRESHOLD) # Complete growth - self.assertEqual(self.plant.state_manager.state, PlantState.MATURE) - - # Should stay mature for a while - self.plant.update(self.plant.state_manager.MATURE_MAX_TIME - 0.1) - self.assertEqual(self.plant.state_manager.state, PlantState.MATURE) - - # Eventually should start dying - self.plant.update(0.2) # Push over max time - self.assertEqual(self.plant.state_manager.state, PlantState.DYING) - - # Finally should die - self.plant.update(self.plant.state_manager.DYING_DURATION) - self.assertEqual(self.plant.state_manager.state, PlantState.DEAD) - - def test_color_transitions(self): - """Test that color changes appropriately with state transitions.""" - # Start as seed (should be brown with yellow dot) - self.assertEqual(self.plant.state_manager.state, PlantState.SEED) - self.assertEqual(self.plant.base_color, MOCK_COLORS['BROWN']) - - # Growing should be brown with green dot - self.plant.update(self.plant.state_manager.SEED_DURATION + 0.1) - self.assertEqual(self.plant.state_manager.state, PlantState.GROWING) - self.assertEqual(self.plant.base_color, MOCK_COLORS['BROWN']) - - # Progress growth - self.plant.update(self.plant.state_manager.GROWTH_THRESHOLD / 2) - self.assertGreater(self.plant.state_manager.growth, 0.0) - self.assertLess(self.plant.state_manager.growth, 1.0) - - # Mature should be solid green - self.plant.update(self.plant.state_manager.GROWTH_THRESHOLD) # Complete growth - self.assertEqual(self.plant.state_manager.state, PlantState.MATURE) - self.assertEqual(self.plant.base_color, MOCK_COLORS['MATURE']) - - # Start dying - self.plant.update(self.plant.state_manager.MATURE_MAX_TIME + 0.1) - self.assertEqual(self.plant.state_manager.state, PlantState.DYING) - self.assertEqual(self.plant.base_color, MOCK_COLORS['DYING']) - - def assertColorCloserTo(self, color, target, other): - """Assert that color is closer to target than to other color.""" - target_dist = sum((c1 - c2) ** 2 for c1, c2 in zip(color, target)) - other_dist = sum((c1 - c2) ** 2 for c1, c2 in zip(color, other)) - self.assertLess(target_dist, other_dist) + + # Verify the state manager's update was called with correct time + mock_state_manager.update.assert_called_once_with(0.1) + + def test_base_color_mapping(self): + """Test that base colors are correctly mapped to states.""" + # Create a mock state manager + mock_state_manager = Mock() + self.plant.state_manager = mock_state_manager + + # Test each state + state_color_map = { + PlantState.SEED: MOCK_COLORS['BROWN'], + PlantState.GROWING: MOCK_COLORS['BROWN'], + PlantState.MATURE: MOCK_COLORS['MATURE'], + PlantState.DYING: MOCK_COLORS['DYING'], + PlantState.DEAD: MOCK_COLORS['DEAD'] + } + + for state, expected_color in state_color_map.items(): + mock_state_manager.state = state + self.assertEqual(self.plant.base_color, expected_color) + + def test_detail_color_mapping(self): + """Test that detail colors are correctly mapped to states.""" + # Create a mock state manager + mock_state_manager = Mock() + self.plant.state_manager = mock_state_manager + + # Test each state + state_color_map = { + PlantState.SEED: MOCK_COLORS['YELLOW'], + PlantState.GROWING: MOCK_COLORS['GROWING'], + PlantState.MATURE: (0, 0, 0), # Black (invisible) + PlantState.DYING: (0, 0, 0), # Black (invisible) + PlantState.DEAD: (0, 0, 0) # Black (invisible) + } + + for state, expected_color in state_color_map.items(): + mock_state_manager.state = state + self.assertEqual(self.plant.detail_color, expected_color) + + def test_detail_radius_mapping(self): + """Test that detail radius is correctly mapped to states.""" + # Create a mock state manager + mock_state_manager = Mock() + self.plant.state_manager = mock_state_manager + + # Test each state + state_radius_map = { + PlantState.SEED: self.plant.SEED_DOT_RADIUS, + PlantState.GROWING: self.plant.GROWING_DOT_RADIUS, + PlantState.MATURE: 0.0, + PlantState.DYING: 0.0, + PlantState.DEAD: 0.0 + } + + for state, expected_radius in state_radius_map.items(): + mock_state_manager.state = state + self.assertEqual(self.plant.detail_radius, expected_radius) if __name__ == '__main__': diff --git a/tests/hexagons/test_plant_states.py b/tests/hexagons/test_plant_states.py index 13e0266..e84c73d 100644 --- a/tests/hexagons/test_plant_states.py +++ b/tests/hexagons/test_plant_states.py @@ -1,7 +1,9 @@ """Unit tests for the plant state management system.""" import unittest +from unittest.mock import patch from src.hexagons.plant_states import PlantState, PlantStateManager +from src.config import SEED_SURVIVAL_THRESHOLD class TestPlantStateManager(unittest.TestCase): @@ -15,89 +17,191 @@ def test_initialization(self): self.assertEqual(self.manager.time_in_state, 0.0) self.assertEqual(self.manager.health, 1.0) self.assertEqual(self.manager.growth, 0.0) + self.assertEqual(self.manager.seed_survival_threshold, SEED_SURVIVAL_THRESHOLD) + + def test_seed_survival_check(self): + """Test that seed survival check works correctly.""" + # Test with random value below threshold (survives) + with patch('random.random', return_value=0.5): + self.manager.seed_survival_threshold = 0.7 + self.assertTrue(self.manager._check_seed_survival()) + + # Test with random value above threshold (dies) + with patch('random.random', return_value=0.8): + self.manager.seed_survival_threshold = 0.7 + self.assertFalse(self.manager._check_seed_survival()) + + # Test edge cases + with patch('random.random', return_value=0.0): + self.manager.seed_survival_threshold = 0.1 + self.assertTrue(self.manager._check_seed_survival()) # Just survives + + with patch('random.random', return_value=1.0): + self.manager.seed_survival_threshold = 0.9 + self.assertFalse(self.manager._check_seed_survival()) # Just dies + + def test_seed_survival_transition(self): + """Test state transitions based on seed survival.""" + # Test survival case + with patch('random.random', return_value=0.5): + self.manager.seed_survival_threshold = 0.7 # Will survive + self.assertEqual(self.manager.state, PlantState.SEED) + self.manager.update(self.manager.SEED_DURATION + 0.1) # Trigger check + self.assertEqual(self.manager.state, PlantState.GROWING) + self.assertEqual(self.manager.time_in_state, 0.0) + + # Test death case + manager2 = PlantStateManager() # Fresh instance for death test + with patch('random.random', return_value=0.9): + manager2.seed_survival_threshold = 0.7 # Will die + self.assertEqual(manager2.state, PlantState.SEED) + manager2.update(manager2.SEED_DURATION + 0.1) # Trigger check + self.assertEqual(manager2.state, PlantState.DYING) + self.assertEqual(manager2.time_in_state, 0.0) + + def test_seed_death_progression(self): + """Test that a dying seed progresses through states correctly.""" + with patch('random.random', return_value=0.9): # Will die + self.manager.seed_survival_threshold = 0.7 + + # Progress to dying state + self.manager.update(self.manager.SEED_DURATION + 0.1) + self.assertEqual(self.manager.state, PlantState.DYING) + self.assertEqual(self.manager.health, 1.0) # Starts with full health + + # Half-way through dying + self.manager.update(self.manager.DYING_DURATION / 2) + self.assertEqual(self.manager.state, PlantState.DYING) + self.assertAlmostEqual(self.manager.health, 0.5, places=2) + + # Complete death + self.manager.update(self.manager.DYING_DURATION / 2) + self.assertEqual(self.manager.state, PlantState.DEAD) + self.assertEqual(self.manager.health, 0.0) + + def test_seed_survival_threshold_validation(self): + """Test that seed survival threshold validates input correctly.""" + # Test valid values + valid_values = [0.0, 0.5, 1.0] + for value in valid_values: + self.manager.seed_survival_threshold = value + self.assertEqual(self.manager.seed_survival_threshold, value) + + # Test invalid values + invalid_values = [-0.1, 1.1, -1, 2] + for value in invalid_values: + with self.assertRaises(ValueError): + self.manager.seed_survival_threshold = value + + def test_seed_survival_threshold_persistence(self): + """Test that seed survival threshold persists through state changes.""" + # Change threshold + new_threshold = 0.8 + self.manager.seed_survival_threshold = new_threshold + + # Progress through states + self.manager.update(self.manager.SEED_DURATION + 0.1) # To GROWING + self.assertEqual(self.manager.seed_survival_threshold, new_threshold) + + self.manager.update(self.manager.GROWTH_THRESHOLD) # To MATURE + self.assertEqual(self.manager.seed_survival_threshold, new_threshold) + + self.manager.update(self.manager.MATURE_MAX_TIME + 0.1) # To DYING + self.assertEqual(self.manager.seed_survival_threshold, new_threshold) + + self.manager.update(self.manager.DYING_DURATION) # To DEAD + self.assertEqual(self.manager.seed_survival_threshold, new_threshold) def test_seed_to_growing_transition(self): """Test transition from seed to growing state after SEED_DURATION.""" - self.assertEqual(self.manager.state, PlantState.SEED) - # Update with less than SEED_DURATION - self.manager.update(0.1) - self.assertEqual(self.manager.state, PlantState.SEED) - # Update past SEED_DURATION - self.manager.update(self.manager.SEED_DURATION) - self.assertEqual(self.manager.state, PlantState.GROWING) - self.assertEqual(self.manager.time_in_state, 0.0) + with patch('random.random', return_value=0.5): # Will survive with default threshold of 0.7 + self.assertEqual(self.manager.state, PlantState.SEED) + # Update with less than SEED_DURATION + self.manager.update(0.1) + self.assertEqual(self.manager.state, PlantState.SEED) + # Update past SEED_DURATION + self.manager.update(self.manager.SEED_DURATION) + self.assertEqual(self.manager.state, PlantState.GROWING) + self.assertEqual(self.manager.time_in_state, 0.0) def test_growth_progression(self): """Test that growth progresses correctly over time.""" - # Get to growing state - self.manager.update(self.manager.SEED_DURATION + 0.1) - self.assertEqual(self.manager.state, PlantState.GROWING) + with patch('random.random', return_value=0.5): # Will survive with default threshold + # Get to growing state + self.manager.update(self.manager.SEED_DURATION + 0.1) + self.assertEqual(self.manager.state, PlantState.GROWING) - # Partial growth - self.manager.update(self.manager.GROWTH_THRESHOLD / 2) - self.assertEqual(self.manager.state, PlantState.GROWING) - self.assertAlmostEqual(self.manager.growth, 0.5, places=2) + # Partial growth + self.manager.update(self.manager.GROWTH_THRESHOLD / 2) + self.assertEqual(self.manager.state, PlantState.GROWING) + self.assertAlmostEqual(self.manager.growth, 0.5, places=2) - # Complete growth - self.manager.update(self.manager.GROWTH_THRESHOLD / 2) - self.assertEqual(self.manager.state, PlantState.MATURE) - self.assertEqual(self.manager.growth, 1.0) + # Complete growth + self.manager.update(self.manager.GROWTH_THRESHOLD / 2) + self.assertEqual(self.manager.state, PlantState.MATURE) + self.assertEqual(self.manager.growth, 1.0) def test_mature_to_dying_transition(self): """Test transition from mature to dying state.""" - # Get to mature state - self.manager.update(self.manager.SEED_DURATION + 0.1) # Seed -> Growing - self.manager.update(self.manager.GROWTH_THRESHOLD) # Complete growth - self.assertEqual(self.manager.state, PlantState.MATURE) - - # Not enough time passed for dying - self.manager.update(self.manager.MATURE_MAX_TIME - 0.1) - self.assertEqual(self.manager.state, PlantState.MATURE) - - # Enough time passed, should start dying - self.manager.update(0.2) # Push over the threshold - self.assertEqual(self.manager.state, PlantState.DYING) + with patch('random.random', return_value=0.5): # Will survive with default threshold + # Get to mature state + self.manager.update(self.manager.SEED_DURATION + 0.1) # Seed -> Growing + self.manager.update(self.manager.GROWTH_THRESHOLD) # Complete growth + self.assertEqual(self.manager.state, PlantState.MATURE) + self.assertEqual(self.manager.time_in_state, 0.0) + + # Update with less than MATURE_MAX_TIME + self.manager.update(self.manager.MATURE_MAX_TIME / 2) + self.assertEqual(self.manager.state, PlantState.MATURE) + + # Update past MATURE_MAX_TIME + self.manager.update(self.manager.MATURE_MAX_TIME / 2 + 0.1) + self.assertEqual(self.manager.state, PlantState.DYING) + self.assertEqual(self.manager.time_in_state, 0.0) def test_dying_progression(self): """Test that health decreases correctly while dying.""" - # Get to dying state - self.manager.update(self.manager.SEED_DURATION + 0.1) # Seed -> Growing - self.manager.update(self.manager.GROWTH_THRESHOLD) # Complete growth - self.manager.update(self.manager.MATURE_MAX_TIME + 0.1) # Start dying - self.assertEqual(self.manager.state, PlantState.DYING) - self.assertEqual(self.manager.health, 1.0) - - # Half-way through dying - self.manager.update(self.manager.DYING_DURATION / 2) - self.assertAlmostEqual(self.manager.health, 0.5, places=2) - - # Complete death - self.manager.update(self.manager.DYING_DURATION / 2) - self.assertEqual(self.manager.state, PlantState.DEAD) - self.assertEqual(self.manager.health, 0.0) + with patch('random.random', return_value=0.5): # Will survive with default threshold + # Get to dying state + self.manager.update(self.manager.SEED_DURATION + 0.1) # Seed -> Growing + self.manager.update(self.manager.GROWTH_THRESHOLD) # Complete growth + self.manager.update(self.manager.MATURE_MAX_TIME + 0.1) # Start dying + self.assertEqual(self.manager.state, PlantState.DYING) + self.assertEqual(self.manager.health, 1.0) # Full health at start of dying + + # Update halfway through dying duration + self.manager.update(self.manager.DYING_DURATION / 2) + self.assertEqual(self.manager.state, PlantState.DYING) + self.assertAlmostEqual(self.manager.health, 0.5, places=2) + + # Complete death + self.manager.update(self.manager.DYING_DURATION / 2) + self.assertEqual(self.manager.state, PlantState.DEAD) + self.assertEqual(self.manager.health, 0.0) def test_color_factor(self): """Test that color factor is appropriate for each state.""" - # Seed state - self.assertEqual(self.manager.color_factor, 0.0) + with patch('random.random', return_value=0.5): # Will survive with default threshold + # Seed state + self.assertEqual(self.manager.color_factor, 0.0) - # Growing state - self.manager.update(self.manager.SEED_DURATION + 0.1) # Start growing - self.manager.update(self.manager.GROWTH_THRESHOLD / 2) # Half growth - self.assertAlmostEqual(self.manager.color_factor, 0.5, places=2) + # Growing state + self.manager.update(self.manager.SEED_DURATION + 0.1) # Start growing + self.manager.update(self.manager.GROWTH_THRESHOLD / 2) # Half growth + self.assertAlmostEqual(self.manager.color_factor, 0.5, places=2) - # Mature state - self.manager.update(self.manager.GROWTH_THRESHOLD / 2) # Complete growth - self.assertEqual(self.manager.color_factor, 1.0) + # Mature state + self.manager.update(self.manager.GROWTH_THRESHOLD / 2) # Complete growth + self.assertEqual(self.manager.color_factor, 1.0) - # Dying state - self.manager.update(self.manager.MATURE_MAX_TIME + 0.1) # Start dying - self.manager.update(self.manager.DYING_DURATION / 2) # Half dead - self.assertAlmostEqual(self.manager.color_factor, 0.5, places=2) + # Dying state + self.manager.update(self.manager.MATURE_MAX_TIME + 0.1) # Start dying + self.manager.update(self.manager.DYING_DURATION / 2) # Half dead + self.assertAlmostEqual(self.manager.color_factor, 0.5, places=2) - # Dead state - self.manager.update(self.manager.DYING_DURATION / 2) # Complete death - self.assertEqual(self.manager.color_factor, 0.0) + # Dead state + self.manager.update(self.manager.DYING_DURATION / 2) # Complete death + self.assertEqual(self.manager.color_factor, 0.0) def test_time_tracking(self): """Test that time in state is tracked correctly.""" @@ -117,32 +221,33 @@ def test_time_tracking(self): def test_state_sequence(self): """Test that states always follow the correct sequence.""" - states_seen = [] - last_state = None - - # Run through entire lifecycle with smaller time steps - while self.manager.state != PlantState.DEAD: - current_state = self.manager.state - # Only record state when it changes - if current_state != last_state: - states_seen.append(current_state) - last_state = current_state - self.manager.update(0.5) # Larger time step for quicker transitions - - # Add final state - if self.manager.state not in states_seen: - states_seen.append(PlantState.DEAD) - - # Verify sequence - expected_sequence = [ - PlantState.SEED, - PlantState.GROWING, - PlantState.MATURE, - PlantState.DYING, - PlantState.DEAD - ] - - self.assertEqual(states_seen, expected_sequence) + with patch('random.random', return_value=0.5): # Will survive with default threshold + states_seen = [] + last_state = None + + # Run through entire lifecycle with smaller time steps + while self.manager.state != PlantState.DEAD: + current_state = self.manager.state + # Only record state when it changes + if current_state != last_state: + states_seen.append(current_state) + last_state = current_state + self.manager.update(0.5) # Larger time step for quicker transitions + + # Add final state + if self.manager.state not in states_seen: + states_seen.append(PlantState.DEAD) + + # Verify sequence + expected_sequence = [ + PlantState.SEED, + PlantState.GROWING, + PlantState.MATURE, + PlantState.DYING, + PlantState.DEAD + ] + + self.assertEqual(states_seen, expected_sequence) if __name__ == '__main__': diff --git a/tests/mesh/test_hex_mesh.py b/tests/mesh/test_hex_mesh.py index 989aa5f..4f59a1f 100644 --- a/tests/mesh/test_hex_mesh.py +++ b/tests/mesh/test_hex_mesh.py @@ -13,6 +13,7 @@ MOCK_COLUMNS, MOCK_ROWS ) +from unittest.mock import patch, Mock class TestHexMesh(unittest.TestCase): @@ -39,11 +40,7 @@ def test_hexagon_types(self): self.assertTrue(isinstance(hexagon, (PlantHexagon, GroundHexagon))) def test_grid_dimensions(self): - """Test that the grid has reasonable dimensions. - - The grid should roughly fit within the screen bounds, allowing for some - overlap at the edges due to hexagon geometry and offset calculations. - """ + """Test that the grid has reasonable dimensions.""" # Find the leftmost, rightmost, topmost, and bottommost points left = min(h.cx - h.a for h in self.mesh.hexagons) right = max(h.cx + h.a for h in self.mesh.hexagons) @@ -66,27 +63,23 @@ def test_grid_dimensions(self): def test_update_propagation(self): """Test that update calls are propagated to all hexagons.""" - # First update - should stay in seed state - self.mesh.update(0.1) - - # Second update - transition to growing - self.mesh.update(2.0) # SEED_DURATION - - # Third update - allow for growth - self.mesh.update(1.5) # Half of GROWTH_THRESHOLD - - # Check that all plant hexagons have been updated - plant_count = 0 + # Replace all hexagons with mocks + mock_hexagons = [] for hexagon in self.mesh.hexagons: - if isinstance(hexagon, PlantHexagon): - plant_count += 1 - # Plants should have moved past SEED state and started growing - self.assertEqual(hexagon.state_manager.state, PlantState.GROWING) - self.assertGreater(hexagon.state_manager.growth, 0.0) - self.assertLess(hexagon.state_manager.growth, 1.0) - - # Ensure we actually tested some plants - self.assertGreater(plant_count, 0) + mock = Mock() + mock.cx = hexagon.cx + mock.cy = hexagon.cy + mock.a = hexagon.a + mock_hexagons.append(mock) + self.mesh.hexagons = mock_hexagons + + # Update the mesh + update_time = 0.1 + self.mesh.update(update_time) + + # Verify that update was called on each hexagon + for mock in mock_hexagons: + mock.update.assert_called_once_with(update_time) def test_rendering(self): """Test that all hexagons can be rendered.""" @@ -102,6 +95,219 @@ def test_rendering(self): self.assertTrue(show_grid) self.assertTrue(isinstance(drawn_hexagon, (PlantHexagon, GroundHexagon))) + def test_dead_plant_conversion(self): + """Test that dead plants are converted to ground.""" + # Find a plant hexagon + plant_index = None + for i, hexagon in enumerate(self.mesh.hexagons): + if isinstance(hexagon, PlantHexagon): + plant_index = i + break + + if plant_index is None: + # Create a plant if none exists + plant = PlantHexagon(50, 50, 10) + self.mesh.hexagons[0] = plant + plant_index = 0 + + plant = self.mesh.hexagons[plant_index] + original_position = (plant.cx, plant.cy, plant.a) + + # Set plant to dead state + plant.state_manager.state = PlantState.DEAD + + # Update should trigger conversion + self.mesh.update(0.1) + + # Verify conversion + converted = self.mesh.hexagons[plant_index] + self.assertIsInstance(converted, GroundHexagon) + self.assertEqual((converted.cx, converted.cy, converted.a), original_position) + + def test_multiple_dead_plant_conversions(self): + """Test that multiple dead plants are converted to ground correctly.""" + # Create a mesh with only plants + with patch('random.random', return_value=0.0): # Ensure all cells are plants + test_mesh = HexMesh(2, 2, MOCK_SCREEN_WIDTH, MOCK_SCREEN_HEIGHT) + + # Set some plants to dead state + dead_indices = [0, 2] # First and third plants + for i in dead_indices: + plant = test_mesh.hexagons[i] + plant.state_manager.state = PlantState.DEAD + + # Update should convert dead plants + test_mesh.update(0.1) + + # Verify conversions + for i in dead_indices: + self.assertIsInstance(test_mesh.hexagons[i], GroundHexagon) + + # Verify other plants remain unchanged + for i in range(len(test_mesh.hexagons)): + if i not in dead_indices: + self.assertIsInstance(test_mesh.hexagons[i], PlantHexagon) + + def test_position_preservation_after_conversion(self): + """Test that converted ground hexagons maintain the same position as the original plant.""" + # Create a plant and record its position + plant = PlantHexagon(50, 50, 10) + self.mesh.hexagons[0] = plant + original_points = plant.points.copy() + + # Set to dead state and update + plant.state_manager.state = PlantState.DEAD + self.mesh.update(0.1) + + # Verify position is maintained + ground = self.mesh.hexagons[0] + self.assertIsInstance(ground, GroundHexagon) + self.assertEqual(ground.points, original_points) + + def test_grid_bounds_initialization(self): + """Test that grid boundaries are correctly initialized.""" + # Check that bounds are properly set + left, right, top, bottom = self.mesh.grid_bounds + + # Verify basic bounds properties + self.assertLess(left, right) # Left bound should be less than right + self.assertLess(top, bottom) # Top bound should be less than bottom + self.assertEqual(bottom, MOCK_SCREEN_HEIGHT) # Bottom should match screen height + self.assertEqual(top, 0) # Top should be 0 + + # Verify grid is centered horizontally + grid_width = right - left + expected_left = (MOCK_SCREEN_WIDTH - grid_width) / 2 + self.assertAlmostEqual(left, expected_left) + + def test_position_validation_within_bounds(self): + """Test that positions within grid bounds are validated correctly.""" + # Get a known valid position (center of first hexagon) + valid_hexagon = self.mesh.hexagons[0] + + # Test center position + self.assertTrue( + self.mesh._is_position_valid(valid_hexagon.cx, valid_hexagon.cy), + "Center position should be valid" + ) + + # Test positions near the edges but within bounds + left, right, top, bottom = self.mesh.grid_bounds + margin = self.mesh.cell_size / 2 + + # Test various valid positions + valid_positions = [ + (left + margin, top + margin), # Top-left with margin + (right - margin, top + margin), # Top-right with margin + (left + margin, bottom - margin), # Bottom-left with margin + (right - margin, bottom - margin), # Bottom-right with margin + ((left + right) / 2, (top + bottom) / 2), # Center of grid + ] + + for x, y in valid_positions: + self.assertTrue( + self.mesh._is_position_valid(x, y), + f"Position ({x}, {y}) should be valid" + ) + + def test_position_validation_outside_bounds(self): + """Test that positions outside grid bounds are invalidated correctly.""" + left, right, top, bottom = self.mesh.grid_bounds + margin = self.mesh.cell_size / 2 + + # Test various invalid positions + invalid_positions = [ + (left - margin * 2, top), # Too far left + (right + margin * 2, top), # Too far right + (left, top - margin * 2), # Too far up + (right, bottom + margin * 2), # Too far down + (float('-inf'), 0), # Extreme left + (float('inf'), 0), # Extreme right + (0, float('-inf')), # Extreme top + (0, float('inf')), # Extreme bottom + ] + + for x, y in invalid_positions: + self.assertFalse( + self.mesh._is_position_valid(x, y), + f"Position ({x}, {y}) should be invalid" + ) + + def test_convert_invalid_position(self): + """Test that converting a plant with invalid position raises ValueError.""" + # Create a plant with invalid position + left, right, top, bottom = self.mesh.grid_bounds + invalid_plant = PlantHexagon( + left - self.mesh.cell_size * 2, # Way outside left bound + top, + self.mesh.cell_size + ) + invalid_plant.state_manager.state = PlantState.DEAD + + # Try to convert the invalid plant + with self.assertRaises(ValueError) as context: + self.mesh._convert_plants_to_ground([(0, invalid_plant)]) + + # Verify error message contains position and bounds information + error_msg = str(context.exception) + self.assertIn("outside grid bounds", error_msg) + self.assertIn(str(self.mesh.grid_bounds), error_msg) + + def test_convert_edge_case_positions(self): + """Test conversion of plants at edge positions within the margin.""" + left, right, top, bottom = self.mesh.grid_bounds + margin = self.mesh.cell_size / 2 + + # Create plants at edge positions but within margin + edge_positions = [ + (left - margin + 0.1, (top + bottom) / 2), # Just inside left margin + (right + margin - 0.1, (top + bottom) / 2), # Just inside right margin + ((left + right) / 2, top - margin + 0.1), # Just inside top margin + ((left + right) / 2, bottom + margin - 0.1), # Just inside bottom margin + ] + + for i, (x, y) in enumerate(edge_positions): + # Create a plant at the edge position + plant = PlantHexagon(x, y, self.mesh.cell_size) + plant.state_manager.state = PlantState.DEAD + + # Verify conversion succeeds + try: + self.mesh._convert_plants_to_ground([(i, plant)]) + success = True + except ValueError: + success = False + + self.assertTrue( + success, + f"Conversion should succeed for position ({x}, {y})" + ) + + def test_bulk_conversion_validation(self): + """Test that bulk conversion validates all positions before converting any.""" + # Create a mix of valid and invalid plants + valid_plant = self.mesh.hexagons[0] + if not isinstance(valid_plant, PlantHexagon): + valid_plant = PlantHexagon(valid_plant.cx, valid_plant.cy, valid_plant.a) + + left, right, top, bottom = self.mesh.grid_bounds + invalid_plant = PlantHexagon( + right + self.mesh.cell_size * 2, # Way outside right bound + top, + self.mesh.cell_size + ) + + # Set both plants as dead + valid_plant.state_manager.state = PlantState.DEAD + invalid_plant.state_manager.state = PlantState.DEAD + + # Try to convert both plants + with self.assertRaises(ValueError): + self.mesh._convert_plants_to_ground([(0, valid_plant), (1, invalid_plant)]) + + # Verify that no conversions took place (the valid plant should not have been converted) + self.assertIsInstance(self.mesh.hexagons[0], PlantHexagon) + if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/tests/test_integration.py b/tests/test_integration.py index 209951d..1fb8153 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -3,6 +3,7 @@ import unittest import pygame import time +import random from unittest.mock import patch from src.mesh.hex_mesh import HexMesh from src.game_state import GameStateManager, GameState @@ -19,6 +20,8 @@ class TestGameIntegration(unittest.TestCase): def setUp(self): """Set up test fixtures before each test method.""" + # Set fixed seed for consistent plant generation + random.seed(12345) pygame.init() self.renderer = PygameRenderer() self.renderer.setup(MOCK_SCREEN_WIDTH, MOCK_SCREEN_HEIGHT) @@ -203,6 +206,8 @@ def test_state_transitions_integration(self): def tearDown(self): """Clean up after each test method.""" self.renderer.cleanup() + # Reset random seed + random.seed() if __name__ == '__main__':
Plants seeds can die before growing A plant seed can die before evolving to a growing plant. This behavior will be decided randomly during the seed phase of the plant. A threshold will be established to decide if the seed evolves or not. The seed will be able to evolve at the end of its seed phase, where it will be decided randomly. A seed that succesfully evolves will go to growing phase. A seed that dies, will evolve directly to ground (last plant status). Plants seeds can die before growing A plant seed can die before evolving to a growing plant. This behavior will be decided randomly during the seed phase of the plant. A threshold will be established to decide if the seed evolves or not. The seed will be able to evolve at the end of its seed phase, where it will be decided randomly. A seed that succesfully evolves will go to growing phase. A seed that dies, will evolve directly to ground (last plant status).
1,739,100,868,000
[]
Feature Request
[ "src/hexagons/plant_states.py:PlantStateManager.__init__", "src/hexagons/plant_states.py:PlantStateManager.update", "src/mesh/hex_mesh.py:HexMesh.__init__", "src/mesh/hex_mesh.py:HexMesh.update" ]
[ "src/hexagons/plant_states.py:PlantStateManager.seed_survival_threshold", "src/hexagons/plant_states.py:PlantStateManager._check_seed_survival", "src/mesh/hex_mesh.py:HexMesh._is_position_valid", "src/mesh/hex_mesh.py:HexMesh._convert_plants_to_ground" ]
4
UXARRAY/uxarray
UXARRAY__uxarray-1117
fe4cae1311db7fb21187b505e06018334a015c48
diff --git a/uxarray/grid/connectivity.py b/uxarray/grid/connectivity.py index 78e936117..54bd1017e 100644 --- a/uxarray/grid/connectivity.py +++ b/uxarray/grid/connectivity.py @@ -146,13 +146,14 @@ def _build_n_nodes_per_face(face_nodes, n_face, n_max_face_nodes): """Constructs ``n_nodes_per_face``, which contains the number of non-fill- value nodes for each face in ``face_node_connectivity``""" - # padding to shape [n_face, n_max_face_nodes + 1] - closed = np.ones((n_face, n_max_face_nodes + 1), dtype=INT_DTYPE) * INT_FILL_VALUE - - closed[:, :-1] = face_nodes.copy() - - n_nodes_per_face = np.argmax(closed == INT_FILL_VALUE, axis=1) - + n_face, n_max_face_nodes = face_nodes.shape + n_nodes_per_face = np.empty(n_face, dtype=INT_DTYPE) + for i in range(n_face): + c = 0 + for j in range(n_max_face_nodes): + if face_nodes[i, j] != INT_FILL_VALUE: + c += 1 + n_nodes_per_face[i] = c return n_nodes_per_face diff --git a/uxarray/grid/coordinates.py b/uxarray/grid/coordinates.py index 45e00ba42..2d78b978a 100644 --- a/uxarray/grid/coordinates.py +++ b/uxarray/grid/coordinates.py @@ -328,23 +328,25 @@ def _construct_face_centroids(node_x, node_y, node_z, face_nodes, n_nodes_per_fa tuple The x, y, and z coordinates of the centroids. """ + centroid_x = np.zeros((face_nodes.shape[0]), dtype=np.float64) centroid_y = np.zeros((face_nodes.shape[0]), dtype=np.float64) centroid_z = np.zeros((face_nodes.shape[0]), dtype=np.float64) - n_face = n_nodes_per_face.shape[0] - - for i_face in prange(n_face): - n_max_nodes = n_nodes_per_face[i_face] - x = np.mean(node_x[face_nodes[i_face, 0:n_max_nodes]]) - y = np.mean(node_y[face_nodes[i_face, 0:n_max_nodes]]) - z = np.mean(node_z[face_nodes[i_face, 0:n_max_nodes]]) + for face_idx in prange(face_nodes.shape[0]): + n_max_nodes = n_nodes_per_face[face_idx] + # Compute Cartesian Average + x = np.mean(node_x[face_nodes[face_idx, 0:n_max_nodes]]) + y = np.mean(node_y[face_nodes[face_idx, 0:n_max_nodes]]) + z = np.mean(node_z[face_nodes[face_idx, 0:n_max_nodes]]) + # Normalize coordinates x, y, z = _normalize_xyz_scalar(x, y, z) + # Store coordinates + centroid_x[face_idx] = x + centroid_y[face_idx] = y + centroid_z[face_idx] = z - centroid_x[i_face] = x - centroid_y[i_face] = y - centroid_z[i_face] = z return centroid_x, centroid_y, centroid_z
Optimize Face Centroid Calculations If `Grid.face_lon` does not exist, `_populate_face_centroids()`, actually `_construct_face_centroids()` in it, takes extremely long for large datasets. For instance, the benchmark/profiling below is for a ~4GB SCREAM dataset, around 5 mins: @rajeeja FYI: I'm already working on this and have gotten optimized results, which will be good for "cartesian" parts of the face center calculations, but you may want to look into the `Welzl` parts as well, i.e. `_populate_face_centerpoints()`. <img width="1065" alt="Image" src="https://github.com/user-attachments/assets/9aba545f-0fdb-4a4c-b2be-b8fb9ffe087e" />
1,734,798,627,000
[ "run-benchmark" ]
Performance Issue
[ "uxarray/grid/connectivity.py:_build_n_nodes_per_face", "uxarray/grid/coordinates.py:_construct_face_centroids" ]
[]
2
nautobot/nautobot
nautobot__nautobot-6832
903171e8d6ba05d4239b374dfc965ff7be09041c
diff --git a/changes/6808.changed b/changes/6808.changed new file mode 100644 index 0000000000..a363f1c45c --- /dev/null +++ b/changes/6808.changed @@ -0,0 +1,1 @@ +Improved returned data when syncing a Git repository via the REST API. diff --git a/nautobot/docs/user-guide/platform-functionality/gitrepository.md b/nautobot/docs/user-guide/platform-functionality/gitrepository.md index 2435dbc5a0..3ce939605d 100644 --- a/nautobot/docs/user-guide/platform-functionality/gitrepository.md +++ b/nautobot/docs/user-guide/platform-functionality/gitrepository.md @@ -437,14 +437,50 @@ curl -s -X POST \ -H "Authorization: Token $TOKEN" \ -H "Content-Type: application/json" \ -H "Accept: application/json; version=2.4" \ -http://nautobot/api/extras/git-repositories/2ecb8556-db58-466d-8278-860b8fd74627/sync/ +http://nautobot/api/extras/git-repositories/2ecb8556-db58-466d-8278-860b8fd74627/sync/ | jq '.' ``` Which returns, for example: ```no-highlight { - "message": "Repository demo-git-datasource sync job added to queue." + "message": "Repository demo-git-datasource sync job added to queue.", + "job_result": { + "id": "68500ca5-27c3-488c-a24a-e98858bf52a1", + "object_type": "extras.jobresult", + "display": "Git Repository: Sync started at 2025-01-30 19:35:14.120625+00:00 (PENDING)", + "url": "http://nautobot/api/extras/job-results/68500ca5-27c3-488c-a24a-e98858bf52a1/", + "natural_slug": "68500ca5-27c3-488c-a24a-e98858bf52a1_6850", + "status": { + "value": "PENDING", + "label": "PENDING" + }, + "name": "Git Repository: Sync", + "task_name": null, + "date_created": "2025-01-30T19:35:14.120625Z", + "date_done": null, + "result": null, + "worker": null, + "task_args": [], + "task_kwargs": {}, + "celery_kwargs": {}, + "traceback": null, + "meta": null, + "job_model": { + "id": "ad2b27c8-adf0-4e23-a4d3-a37fe3c42abd", + "object_type": "extras.job", + "url": "http://nautobot/api/extras/jobs/ad2b27c8-adf0-4e23-a4d3-a37fe3c42abd/" + }, + "user": { + "id": "569138fe-f0b9-4abf-9812-c85a7ec73bbd", + "object_type": "users.user", + "url": "http://nautobot/api/users/users/569138fe-f0b9-4abf-9812-c85a7ec73bbd/" + }, + "scheduled_job": null, + "custom_fields": {}, + "computed_fields": {}, + "files": [] + } } ``` @@ -457,7 +493,7 @@ curl -s -X GET \ -H "Authorization: Token $TOKEN" \ -H "Content-Type: application/json" \ -H "Accept: application/json; version=2.4" \ -http://nautobot/api/extras/jobs/?module_name__isw=demo_git_datasource. | jq . +http://nautobot/api/extras/jobs/?module_name__isw=demo_git_datasource. | jq '.' ``` Here are the first 20 lines of JSON returned: @@ -527,7 +563,7 @@ curl -s -X GET \ -H "Authorization: Token $TOKEN" \ -H "Content-Type: application/json" \ -H "Accept: application/json; version=2.4" \ -http://nautobot/api/extras/config-contexts/?owner_object_id=2ecb8556-db58-466d-8278-860b8fd74627 | jq . +http://nautobot/api/extras/config-contexts/?owner_object_id=2ecb8556-db58-466d-8278-860b8fd74627 | jq '.' ``` Here's the first part of the output: diff --git a/nautobot/extras/api/serializers.py b/nautobot/extras/api/serializers.py index a284604292..96ef7ebd71 100644 --- a/nautobot/extras/api/serializers.py +++ b/nautobot/extras/api/serializers.py @@ -1169,3 +1169,15 @@ def validate(self, attrs): raise serializers.ValidationError(conflicts) return validated_attrs + + +# +# More Git repositories +# + + +class GitRepositorySyncResponseSerializer(serializers.Serializer): + """Serializer representing responses from the GitRepository.sync() POST endpoint.""" + + message = serializers.CharField(read_only=True) + job_result = JobResultSerializer(read_only=True) diff --git a/nautobot/extras/api/views.py b/nautobot/extras/api/views.py index 37e7330429..ec2544e66a 100644 --- a/nautobot/extras/api/views.py +++ b/nautobot/extras/api/views.py @@ -425,7 +425,7 @@ class GitRepositoryViewSet(NautobotModelViewSet): serializer_class = serializers.GitRepositorySerializer filterset_class = filters.GitRepositoryFilterSet - @extend_schema(methods=["post"], request=serializers.GitRepositorySerializer) + @extend_schema(methods=["post"], responses={"200": serializers.GitRepositorySyncResponseSerializer}, request=None) # Since we are explicitly checking for `extras:change_gitrepository` in the API sync() method # We explicitly set the permission_classes to IsAuthenticated in the @action decorator # bypassing the default DRF permission check for `extras:add_gitrepository` and the permission check fall through to the function itself. @@ -441,8 +441,16 @@ def sync(self, request, pk): raise CeleryWorkerNotRunningException() repository = get_object_or_404(GitRepository, id=pk) - repository.sync(user=request.user) - return Response({"message": f"Repository {repository} sync job added to queue."}) + job_result = repository.sync(user=request.user) + + data = { + # Kept message for backward compatibility for now + "message": f"Repository {repository} sync job added to queue.", + "job_result": job_result, + } + + serializer = serializers.GitRepositorySyncResponseSerializer(data, context={"request": request}) + return Response(serializer.data, status=status.HTTP_200_OK) #
diff --git a/nautobot/extras/tests/test_api.py b/nautobot/extras/tests/test_api.py index af6efc332e..bb4f8ba759 100644 --- a/nautobot/extras/tests/test_api.py +++ b/nautobot/extras/tests/test_api.py @@ -1207,6 +1207,10 @@ def test_run_git_sync_with_permissions(self, _): url = reverse("extras-api:gitrepository-sync", kwargs={"pk": self.repos[0].id}) response = self.client.post(url, format="json", **self.header) self.assertHttpStatus(response, status.HTTP_200_OK) + self.assertIn("message", response.data) + self.assertIn("job_result", response.data) + self.assertEqual(response.data["message"], f"Repository {self.repos[0].name} sync job added to queue.") + self.assertIsInstance(response.data["job_result"], dict) def test_create_with_app_provided_contents(self): """Test that `provided_contents` published by an App works."""
Improve returned data when syncing a Git repository via the REST API ### As ... Austin - Network Automation Engineer ### I want ... to be able to see more useful return data when syncing a Git repository via the REST API. Today, e.g.: ``` curl -s -X POST \ -H "Authorization: Token $TOKEN" \ -H "Content-Type: application/json" \ -H "Accept: application/json; version=2.4" \ http://nautobot/api/extras/git-repositories/2ecb8556-db58-466d-8278-860b8fd74627/sync/ ``` returns ``` { "message": "Repository demo-git-datasource sync job added to queue." } ``` I would want to see more data in that return data, including, for example: - job-result ID - selected job queue - worker ID ### So that ... I can make better programmatic decisions based on the return data. ### I know this is done when... A more detailed JSON structure is returned, perhaps e.g.: ``` { "message": "Repository demo-git-datasource sync job added to queue.", "job-id": "XXXXXX", "job-queue": "XXXXXX" etc. } ``` ### Optional - Feature groups this request pertains to. - [ ] Automation - [ ] Circuits - [ ] DCIM - [ ] IPAM - [x] Misc (including Data Sources) - [ ] Organization - [x] Apps (and other Extensibility) - [ ] Security (Secrets, etc) - [ ] Image Management - [ ] UI/UX - [ ] Documentation - [ ] Other (not directly a platform feature) ### Database Changes Already exists. ### External Dependencies No Improve returned data when syncing a Git repository via the REST API ### As ... Austin - Network Automation Engineer ### I want ... to be able to see more useful return data when syncing a Git repository via the REST API. Today, e.g.: ``` curl -s -X POST \ -H "Authorization: Token $TOKEN" \ -H "Content-Type: application/json" \ -H "Accept: application/json; version=2.4" \ http://nautobot/api/extras/git-repositories/2ecb8556-db58-466d-8278-860b8fd74627/sync/ ``` returns ``` { "message": "Repository demo-git-datasource sync job added to queue." } ``` I would want to see more data in that return data, including, for example: - job-result ID - selected job queue - worker ID ### So that ... I can make better programmatic decisions based on the return data. ### I know this is done when... A more detailed JSON structure is returned, perhaps e.g.: ``` { "message": "Repository demo-git-datasource sync job added to queue.", "job-id": "XXXXXX", "job-queue": "XXXXXX" etc. } ``` ### Optional - Feature groups this request pertains to. - [ ] Automation - [ ] Circuits - [ ] DCIM - [ ] IPAM - [x] Misc (including Data Sources) - [ ] Organization - [x] Apps (and other Extensibility) - [ ] Security (Secrets, etc) - [ ] Image Management - [ ] UI/UX - [ ] Documentation - [ ] Other (not directly a platform feature) ### Database Changes Already exists. ### External Dependencies No
1,738,165,750,000
[]
Feature Request
[ "nautobot/extras/api/views.py:GitRepositoryViewSet.sync" ]
[]
1
gitlabform/gitlabform
gitlabform__gitlabform-929
ad13d613f5b41ac4fea8f44b6fbcca7c67274150
diff --git a/gitlabform/gitlab/projects.py b/gitlabform/gitlab/projects.py index 4cf21b23..649121ba 100644 --- a/gitlabform/gitlab/projects.py +++ b/gitlabform/gitlab/projects.py @@ -126,21 +126,6 @@ def get_project_settings(self, project_and_group_name): except NotFoundException: return dict() - def put_project_settings(self, project_and_group_name, project_settings): - # project_settings has to be like this: - # { - # 'setting1': value1, - # 'setting2': value2, - # } - # ..as documented at: https://docs.gitlab.com/ce/api/projects.html#edit-project - return self._make_requests_to_api( - "projects/%s", - project_and_group_name, - "PUT", - data=None, - json=project_settings, - ) - def get_groups_from_project(self, project_and_group_name): # couldn't find an API call that was giving me directly # the shared groups, so I'm using directly the GET /projects/:id call diff --git a/gitlabform/processors/project/project_settings_processor.py b/gitlabform/processors/project/project_settings_processor.py index 57364dee..9407e05c 100644 --- a/gitlabform/processors/project/project_settings_processor.py +++ b/gitlabform/processors/project/project_settings_processor.py @@ -1,12 +1,50 @@ +from logging import debug +from typing import Callable from gitlabform.gitlab import GitLab -from gitlabform.processors.single_entity_processor import SingleEntityProcessor +from gitlabform.processors.abstract_processor import AbstractProcessor +from gitlabform.processors.util.difference_logger import DifferenceLogger +from gitlab.v4.objects import Project -class ProjectSettingsProcessor(SingleEntityProcessor): + +class ProjectSettingsProcessor(AbstractProcessor): def __init__(self, gitlab: GitLab): - super().__init__( - "project_settings", - gitlab, - get_method_name="get_project_settings", - edit_method_name="put_project_settings", + super().__init__("project_settings", gitlab) + self.get_entity_in_gitlab: Callable = getattr(self, "get_project_settings") + + def _process_configuration(self, project_path: str, configuration: dict): + debug("Processing project settings...") + project: Project = self.gl.get_project_by_path_cached(project_path) + + project_settings_in_config = configuration.get("project_settings", {}) + project_settings_in_gitlab = project.asdict() + debug(project_settings_in_gitlab) + debug(f"project_settings BEFORE: ^^^") + + if self._needs_update(project_settings_in_gitlab, project_settings_in_config): + debug("Updating project settings") + for key, value in project_settings_in_config.items(): + debug(f"Updating setting {key} to value {value}") + setattr(project, key, value) + project.save() + + debug(project.asdict()) + debug(f"project_settings AFTER: ^^^") + + else: + debug("No update needed for project settings") + + def get_project_settings(self, project_path: str): + return self.gl.get_project_by_path_cached(project_path).asdict() + + def _print_diff( + self, project_or_project_and_group: str, entity_config, diff_only_changed: bool + ): + entity_in_gitlab = self.get_project_settings(project_or_project_and_group) + + DifferenceLogger.log_diff( + f"{self.configuration_name} changes", + entity_in_gitlab, + entity_config, + only_changed=diff_only_changed, )
Migrate `project_settings` config to use python-gitlab library Goal of this issue is to migrate the `project_settings` configuration section to use [python-gitlab library](https://python-gitlab.readthedocs.io/en/stable/) instead of using homegrown API in this project. High level Tasks: - Update `gitlabform/processors/project/project_settings_processor.py` to use the python-gitlab library - To use python-gitlab use `self.gl` instead of `self.gitlab`. For reference, see some of the other processors that are already using python-gitlab library (i.e. `branches_processor.py`) - Any cleanup necessary from switching to python-gitlab related to this processor
1,737,329,350,000
[]
Feature Request
[ "gitlabform/gitlab/projects.py:GitLabProjects.put_project_settings", "gitlabform/processors/project/project_settings_processor.py:ProjectSettingsProcessor.__init__" ]
[ "gitlabform/processors/project/project_settings_processor.py:ProjectSettingsProcessor._process_configuration", "gitlabform/processors/project/project_settings_processor.py:ProjectSettingsProcessor.get_project_settings", "gitlabform/processors/project/project_settings_processor.py:ProjectSettingsProcessor._print_diff" ]
2
ultralytics/ultralytics
ultralytics__ultralytics-17810
d8c43874ae830a36d2adeac4a44a8ce5697e972c
diff --git a/ultralytics/utils/ops.py b/ultralytics/utils/ops.py index 25e83c61c3a..ac53546ed1b 100644 --- a/ultralytics/utils/ops.py +++ b/ultralytics/utils/ops.py @@ -75,9 +75,8 @@ def segment2box(segment, width=640, height=640): (np.ndarray): the minimum and maximum x and y values of the segment. """ x, y = segment.T # segment xy - inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x = x[inside] - y = y[inside] + x = x.clip(0, width) + y = y.clip(0, height) return ( np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) if any(x)
Training labels not applied properly to training data ### Search before asking - [X] I have searched the Ultralytics YOLO [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report. ### Ultralytics YOLO Component Train ### Bug # Bug Labels are not included in the generated train_batch**X**.jpg images during training of a segmentation model. Code to reproduce at bottom of section including the example training data. ## Likely cause of bug I am not familiar with how the training label images are generated, however I highly suspect the issue is that if there are no points that define the polygon (label) in the image. This is caused when Yolo performs augmentation such as crop, resize, stretch, etc as it can morph the label such that all points defining the label are outside the image. This causes the mask to encompress up to the entire image but still not be included ### I do not know if this affects anything other than segmentation! ### This may actually affect the training data itself and not just the generated image examples, but I am not sure! ## Examples - All white parts of the images are included in the label, thus if they are unlabelled the bug has occured ![train_batch41](https://github.com/user-attachments/assets/ff8243c4-badb-4ea9-a5c0-64b9c28fbef6) ![train_batch42](https://github.com/user-attachments/assets/17895e1b-a967-4c6d-8a18-39b59962893d) ### Code to reproduce, instuctions in other section [GitIssues.zip](https://github.com/user-attachments/files/17916419/GitIssues.zip) ### Environment ``` Ultralytics 8.3.29 πŸš€ Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (NVIDIA GeForce RTX 4090, 24564MiB) Setup complete βœ… (32 CPUs, 15.5 GB RAM, 23.5/251.0 GB disk) OS Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.35 Environment Linux Python 3.10.12 Install pip RAM 15.47 GB Disk 23.5/251.0 GB CPU 13th Gen Intel Core(TM) i9-13900 CPU count 32 GPU NVIDIA GeForce RTX 4090, 24564MiB GPU count 1 CUDA 12.1 numpy βœ… 2.1.2>=1.23.0 matplotlib βœ… 3.9.2>=3.3.0 opencv-python βœ… 4.10.0.84>=4.6.0 pillow βœ… 10.4.0>=7.1.2 pyyaml βœ… 5.4.1>=5.3.1 requests βœ… 2.32.3>=2.23.0 scipy βœ… 1.14.1>=1.4.1 torch βœ… 2.4.1>=1.8.0 torchvision βœ… 0.19.1>=0.9.0 tqdm βœ… 4.66.5>=4.64.0 psutil βœ… 6.0.0 py-cpuinfo βœ… 9.0.0 pandas βœ… 2.2.3>=1.1.4 seaborn βœ… 0.13.2>=0.11.0 ultralytics-thop βœ… 2.0.11>=2.0.0 numpy βœ… 2.1.2<2.0.0; sys_platform == "darwin" torch βœ… 2.4.1!=2.4.0,>=1.8.0; sys_platform == "win32" ``` ### Minimal Reproducible Example # How to reproduce 1. Download & Extract provided training images, config (.yaml) and test_yolo.py file 2. Edit .yaml file such that the folder path is correct 3. Run test_yolo.py 4. Examine the generated train_batch**X**.jpg images to see if the bug occured (You may need to train more than once) ## What to look for - Any part that is white is labelled, so if any white pixels are unlabelled this bug has occured ### Examples ![train_batch0](https://github.com/user-attachments/assets/fe7f5b3f-1b00-4004-beb1-a50b5d5413b0) - In this case the bottom left image is clearly white, but unlabelled ![train_batch2](https://github.com/user-attachments/assets/25cd0a90-8e46-48e8-ba99-0d15cf620719) - Top right image does has white, but it isn't labelled ### Additional _No response_ ### Are you willing to submit a PR? - [ ] Yes I'd like to help by submitting a PR!
πŸ‘‹ Hello @TheOfficialOzone, thank you for bringing this to our attention πŸš€! We understand that you're encountering an issue with labels not being applied correctly during the training of a segmentation model on the Ultralytics repository. For us to assist you effectively, please ensure that you've provided a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) if it's not already included in your report. This will help us understand and address the issue more efficiently. It seems like you've already attached a code example and some test images, which is great! πŸ“„ In the meantime, we suggest ensuring all your dependencies are up-to-date. Upgrade to the latest `ultralytics` package, including all requirements, within a Python >=3.8 environment using PyTorch >=1.8 to see if the issue persists: ```bash pip install -U ultralytics ``` For further tips and tricks regarding custom training, please refer to our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/). Join our Ultralytics community for real-time support or discussions: - Head over to [Discord](https://ultralytics.com/discord) for chat support 🎧 - Visit [Discourse](https://community.ultralytics.com) for deeper discussions - Share experiences or get insightful feedback on our [Subreddit](https://reddit.com/r/ultralytics) Finally, an Ultralytics engineer will review the details of your issue soon and follow up with you for additional help. Thank you for your patience and cooperation! ## Environments In case you wish to shift your work to a more verified environment, you might consider: - **Notebooks** with free GPU access: <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"/></a> <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a> For more details on different environments, please refer to the [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/), [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/), or the [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/). We appreciate your engagement with the Ultralytics repository and hope to resolve your issue soon! 🌟 This issue persists after running `pip install -U ultralytics`. The version that was upgraded to was ultralytics 8.3.37. @TheOfficialOzone Thanks for reporting! I'm able to reproduce this with our dataset. I'll look into it!
1,732,632,265,000
[ "enhancement", "segment" ]
Bug Report
[ "ultralytics/utils/ops.py:segment2box" ]
[]
1
Chainlit/chainlit
Chainlit__chainlit-1575
8b2d4bacfd4fa2c8af72e2d140d527d20125b07b
diff --git a/backend/chainlit/config.py b/backend/chainlit/config.py index b90f162f07..18ee6be8db 100644 --- a/backend/chainlit/config.py +++ b/backend/chainlit/config.py @@ -311,6 +311,8 @@ class CodeSettings: @dataclass() class ProjectSettings(DataClassJsonMixin): allow_origins: List[str] = Field(default_factory=lambda: ["*"]) + # Socket.io client transports option + transports: Optional[List[str]] = None enable_telemetry: bool = True # List of environment variables to be provided by each user to use the app. If empty, no environment variables will be asked to the user. user_env: Optional[List[str]] = None diff --git a/backend/chainlit/server.py b/backend/chainlit/server.py index 5118f544a7..7aeabe5329 100644 --- a/backend/chainlit/server.py +++ b/backend/chainlit/server.py @@ -301,7 +301,10 @@ def get_html_template(): <meta property="og:url" content="{url}"> <meta property="og:root_path" content="{ROOT_PATH}">""" - js = f"""<script>{f"window.theme = {json.dumps(config.ui.theme.to_dict())}; " if config.ui.theme else ""}</script>""" + js = f"""<script> +{f"window.theme = {json.dumps(config.ui.theme.to_dict())}; " if config.ui.theme else ""} +{f"window.transports = {json.dumps(config.project.transports)}; " if config.project.transports else "undefined"} +</script>""" css = None if config.ui.custom_css: diff --git a/backend/chainlit/socket.py b/backend/chainlit/socket.py index d79c76c16e..5053262e2f 100644 --- a/backend/chainlit/socket.py +++ b/backend/chainlit/socket.py @@ -1,7 +1,6 @@ import asyncio import json import time -import uuid from typing import Any, Dict, Literal from urllib.parse import unquote @@ -77,24 +76,8 @@ def load_user_env(user_env): return user_env -def build_anon_user_identifier(environ): - scope = environ.get("asgi.scope", {}) - client_ip, _ = scope.get("client") - ip = environ.get("HTTP_X_FORWARDED_FOR", client_ip) - - try: - headers = scope.get("headers", {}) - user_agent = next( - (v.decode("utf-8") for k, v in headers if k.decode("utf-8") == "user-agent") - ) - return str(uuid.uuid5(uuid.NAMESPACE_DNS, user_agent + ip)) - - except StopIteration: - return str(uuid.uuid5(uuid.NAMESPACE_DNS, ip)) - - @sio.on("connect") -async def connect(sid, environ): +async def connect(sid, environ, auth): if ( not config.code.on_chat_start and not config.code.on_message @@ -110,8 +93,8 @@ async def connect(sid, environ): try: # Check if the authentication is required if login_required: - authorization_header = environ.get("HTTP_AUTHORIZATION") - token = authorization_header.split(" ")[1] if authorization_header else None + token = auth.get("token") + token = token.split(" ")[1] if token else None user = await get_current_user(token=token) except Exception: logger.info("Authentication failed") @@ -125,16 +108,16 @@ def emit_fn(event, data): def emit_call_fn(event: Literal["ask", "call_fn"], data, timeout): return sio.call(event, data, timeout=timeout, to=sid) - session_id = environ.get("HTTP_X_CHAINLIT_SESSION_ID") + session_id = auth.get("sessionId") if restore_existing_session(sid, session_id, emit_fn, emit_call_fn): return True - user_env_string = environ.get("HTTP_USER_ENV") + user_env_string = auth.get("userEnv") user_env = load_user_env(user_env_string) - client_type = environ.get("HTTP_X_CHAINLIT_CLIENT_TYPE") + client_type = auth.get("clientType") http_referer = environ.get("HTTP_REFERER") - url_encoded_chat_profile = environ.get("HTTP_X_CHAINLIT_CHAT_PROFILE") + url_encoded_chat_profile = auth.get("chatProfile") chat_profile = ( unquote(url_encoded_chat_profile) if url_encoded_chat_profile else None ) @@ -149,7 +132,7 @@ def emit_call_fn(event: Literal["ask", "call_fn"], data, timeout): user=user, token=token, chat_profile=chat_profile, - thread_id=environ.get("HTTP_X_CHAINLIT_THREAD_ID"), + thread_id=auth.get("threadId"), languages=environ.get("HTTP_ACCEPT_LANGUAGE"), http_referer=http_referer, ) @@ -162,13 +145,13 @@ def emit_call_fn(event: Literal["ask", "call_fn"], data, timeout): async def connection_successful(sid): context = init_ws_context(sid) - if context.session.restored: - return - await context.emitter.task_end() await context.emitter.clear("clear_ask") await context.emitter.clear("clear_call_fn") + if context.session.restored: + return + if context.session.thread_id_to_resume and config.code.on_chat_resume: thread = await resume_thread(context.session) if thread: @@ -312,17 +295,13 @@ async def message(sid, payload: MessagePayload): async def window_message(sid, data): """Handle a message send by the host window.""" session = WebsocketSession.require(sid) - context = init_ws_context(session) - - await context.emitter.task_start() + init_ws_context(session) if config.code.on_window_message: try: await config.code.on_window_message(data) except asyncio.CancelledError: pass - finally: - await context.emitter.task_end() @sio.on("audio_start") diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index cc80e03ac9..9238ca2519 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -42,6 +42,7 @@ declare global { light?: ThemOverride; dark?: ThemOverride; }; + transports?: string[] } } @@ -99,6 +100,7 @@ function App() { return; } else { connect({ + transports: window.transports, userEnv, accessToken }); diff --git a/libs/copilot/src/chat/index.tsx b/libs/copilot/src/chat/index.tsx index 5f0a0779e7..3cc4bd3289 100644 --- a/libs/copilot/src/chat/index.tsx +++ b/libs/copilot/src/chat/index.tsx @@ -12,6 +12,7 @@ export default function ChatWrapper() { useEffect(() => { if (session?.socket?.connected) return; connect({ + transports: window.transports, userEnv: {}, accessToken: `Bearer ${accessToken}` }); diff --git a/libs/react-client/src/useChatSession.ts b/libs/react-client/src/useChatSession.ts index 441e66d665..b1079179f0 100644 --- a/libs/react-client/src/useChatSession.ts +++ b/libs/react-client/src/useChatSession.ts @@ -78,16 +78,18 @@ const useChatSession = () => { // Use currentThreadId as thread id in websocket header useEffect(() => { if (session?.socket) { - session.socket.io.opts.extraHeaders!['X-Chainlit-Thread-Id'] = + session.socket.auth["threadId"] = currentThreadId || ''; } }, [currentThreadId]); const _connect = useCallback( ({ + transports, userEnv, accessToken }: { + transports?: string[] userEnv: Record<string, string>; accessToken?: string; }) => { @@ -100,16 +102,17 @@ const useChatSession = () => { const socket = io(uri, { path, - extraHeaders: { - Authorization: accessToken || '', - 'X-Chainlit-Client-Type': client.type, - 'X-Chainlit-Session-Id': sessionId, - 'X-Chainlit-Thread-Id': idToResume || '', - 'user-env': JSON.stringify(userEnv), - 'X-Chainlit-Chat-Profile': chatProfile - ? encodeURIComponent(chatProfile) - : '' - } + withCredentials: true, + transports, + auth: { + token: accessToken, + clientType: client.type, + sessionId, + threadId: idToResume || '', + userEnv: JSON.stringify(userEnv), + chatProfile: chatProfile ? encodeURIComponent(chatProfile) : '' + } + }); setSession((old) => { old?.socket?.removeAllListeners();
diff --git a/cypress/e2e/copilot/.chainlit/config.toml b/cypress/e2e/copilot/.chainlit/config.toml index e2a93af08f..9c42755715 100644 --- a/cypress/e2e/copilot/.chainlit/config.toml +++ b/cypress/e2e/copilot/.chainlit/config.toml @@ -13,7 +13,7 @@ session_timeout = 3600 cache = false # Authorized origins -allow_origins = ["*"] +allow_origins = ["http://127.0.0.1:8000"] # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) # follow_symlink = false
Security: allowed origins should not be * by default CORS headers should be restricted to the current domain at least, by default.
@dosu Where do we have to look in the settings/code to set this to a sensible/safe default value? <!-- Answer --> To set the allowed origins for CORS headers to a sensible/safe default value, you need to look at the `allow_origins` setting in the `config.toml` file. ```toml # Authorized origins allow_origins = ["*"] ``` You should replace `["*"]` with a list of specific origins that you want to allow. For example: ```toml # Authorized origins allow_origins = ["https://example.com", "https://anotherdomain.com"] ``` This setting can be found in multiple `config.toml` files within the repository, such as: - `cypress/e2e/copilot/.chainlit/config.toml` <sup>[[1]](https://github.com/Chainlit/chainlit/blob/main/cypress/e2e/copilot/.chainlit/config.toml)</sup> - `cypress/e2e/tasklist/.chainlit/config.toml` <sup>[[2]](https://github.com/Chainlit/chainlit/blob/main/cypress/e2e/tasklist/.chainlit/config.toml)</sup> - `cypress/e2e/elements/.chainlit/config.toml` <sup>[[3]](https://github.com/Chainlit/chainlit/blob/main/cypress/e2e/elements/.chainlit/config.toml)</sup> - `cypress/e2e/custom_build/.chainlit/config.toml` <sup>[[4]](https://github.com/Chainlit/chainlit/blob/main/cypress/e2e/custom_build/.chainlit/config.toml)</sup> - `copilot/.chainlit/config.toml` in the Chainlit's cookbook repo <sup>[[5]](https://github.com/Chainlit/cookbook/blob/main/copilot/.chainlit/config.toml)</sup>. Additionally, the `allow_origins` setting can be overridden by the `CHAINLIT_ALLOW_ORIGINS` environment variable <sup>[[6]](https://github.com/Chainlit/chainlit/blob/main/backend/chainlit/config.py)</sup>. <!-- Dosu Comment Footer --> *To continue talking to [Dosu](https://dosu.dev), mention @dosu.* @dosu Maybe we could make the default in the settings file 127.0.0.1 and localhost, then document users to change it for production to whatever website/domain they use? <!-- Answer --> Yes, you can set the default allowed origins in the settings file to `127.0.0.1` and `localhost`, and then document for users to change it for production to their specific domains. You can modify the `config.toml` file as follows: ```toml [project] # Authorized origins allow_origins = ["127.0.0.1", "localhost"] ``` This change can be made in the `config.toml` files located in various directories, such as: - `cypress/e2e/copilot/.chainlit/config.toml` - `cypress/e2e/tasklist/.chainlit/config.toml` - `cypress/e2e/elements/.chainlit/config.toml` - `cypress/e2e/custom_build/.chainlit/config.toml` - `copilot/.chainlit/config.toml` in the Chainlit's cookbook repo <sup>[[1]](https://github.com/Chainlit/chainlit/blob/main/backend/chainlit/config.py)</sup><sup>[[2]](https://github.com/Chainlit/cookbook/blob/main/copilot/.chainlit/config.toml)</sup>. Additionally, you should document this change and instruct users to update the `allow_origins` setting for their production environments to their specific domains. <!-- Dosu Comment Footer --> *To continue talking to [Dosu](https://dosu.dev), mention @dosu.*
1,733,733,602,000
[ "size:M" ]
Security Vulnerability
[ "backend/chainlit/server.py:get_html_template", "backend/chainlit/socket.py:build_anon_user_identifier", "backend/chainlit/socket.py:connect", "backend/chainlit/socket.py:connection_successful", "backend/chainlit/socket.py:window_message" ]
[]
5
strawberry-graphql/strawberry
strawberry-graphql__strawberry-3720
882e98940a7b5df01ea50180af27085bd1e443f5
diff --git a/.alexrc b/.alexrc index ea3756ce5a..0d9c4005d9 100644 --- a/.alexrc +++ b/.alexrc @@ -9,6 +9,7 @@ "executed", "executes", "execution", + "reject", "special", "primitive", "invalid", diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..2a0c1a0554 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,32 @@ +Release type: minor + +This release adds a new `on_ws_connect` method to all HTTP view integrations. +The method is called when a `graphql-transport-ws` or `graphql-ws` connection is +established and can be used to customize the connection acknowledgment behavior. + +This is particularly useful for authentication, authorization, and sending a +custom acknowledgment payload to clients when a connection is accepted. For +example: + +```python +class MyGraphQLView(GraphQLView): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret: + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgement payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgement payload + return await super().on_ws_connect(context) +``` + +Take a look at our documentation to learn more. diff --git a/docs/integrations/aiohttp.md b/docs/integrations/aiohttp.md index 64655d5fbf..2c7be1e725 100644 --- a/docs/integrations/aiohttp.md +++ b/docs/integrations/aiohttp.md @@ -53,6 +53,7 @@ methods: - `def decode_json(self, data: Union[str, bytes]) -> object` - `def encode_json(self, data: object) -> str` - `async def render_graphql_ide(self, request: Request) -> Response` +- `async def on_ws_connect(self, context: Context) -> Union[UnsetType, None, Dict[str, object]]` ### get_context @@ -199,3 +200,47 @@ class MyGraphQLView(GraphQLView): return Response(text=custom_html, content_type="text/html") ``` + +### on_ws_connect + +By overriding `on_ws_connect` you can customize the behavior when a `graphql-ws` +or `graphql-transport-ws` connection is established. This is particularly useful +for authentication and authorization. By default, all connections are accepted. + +To manually accept a connection, return `strawberry.UNSET` or a connection +acknowledgment payload. The acknowledgment payload will be sent to the client. + +Note that the legacy protocol does not support `None`/`null` acknowledgment +payloads, while the new protocol does. Our implementation will treat +`None`/`null` payloads the same as `strawberry.UNSET` in the context of the +legacy protocol. + +To reject a connection, raise a `ConnectionRejectionError`. You can optionally +provide a custom error payload that will be sent to the client when the legacy +GraphQL over WebSocket protocol is used. + +```python +from typing import Dict +from strawberry.exceptions import ConnectionRejectionError +from strawberry.aiohttp.views import GraphQLView + + +class MyGraphQLView(GraphQLView): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret": + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgment payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgment payload + return await super().on_ws_connect(context) +``` diff --git a/docs/integrations/asgi.md b/docs/integrations/asgi.md index 3cad706772..81b90b393c 100644 --- a/docs/integrations/asgi.md +++ b/docs/integrations/asgi.md @@ -53,6 +53,7 @@ methods: - `def decode_json(self, data: Union[str, bytes]) -> object` - `def encode_json(self, data: object) -> str` - `async def render_graphql_ide(self, request: Request) -> Response` +- `async def on_ws_connect(self, context: Context) -> Union[UnsetType, None, Dict[str, object]]` ### get_context @@ -241,3 +242,47 @@ class MyGraphQL(GraphQL): return HTMLResponse(custom_html) ``` + +### on_ws_connect + +By overriding `on_ws_connect` you can customize the behavior when a `graphql-ws` +or `graphql-transport-ws` connection is established. This is particularly useful +for authentication and authorization. By default, all connections are accepted. + +To manually accept a connection, return `strawberry.UNSET` or a connection +acknowledgment payload. The acknowledgment payload will be sent to the client. + +Note that the legacy protocol does not support `None`/`null` acknowledgment +payloads, while the new protocol does. Our implementation will treat +`None`/`null` payloads the same as `strawberry.UNSET` in the context of the +legacy protocol. + +To reject a connection, raise a `ConnectionRejectionError`. You can optionally +provide a custom error payload that will be sent to the client when the legacy +GraphQL over WebSocket protocol is used. + +```python +from typing import Dict +from strawberry.exceptions import ConnectionRejectionError +from strawberry.asgi import GraphQL + + +class MyGraphQL(GraphQL): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret": + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgment payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgment payload + return await super().on_ws_connect(context) +``` diff --git a/docs/integrations/channels.md b/docs/integrations/channels.md index 86c3041d10..6921fa7a5f 100644 --- a/docs/integrations/channels.md +++ b/docs/integrations/channels.md @@ -592,6 +592,51 @@ following methods: - `async def get_root_value(self, request: GraphQLWSConsumer) -> Optional[RootValue]` - `def decode_json(self, data: Union[str, bytes]) -> object` - `def encode_json(self, data: object) -> str` +- `async def on_ws_connect(self, context: Context) -> Union[UnsetType, None, Dict[str, object]]` + +### on_ws_connect + +By overriding `on_ws_connect` you can customize the behavior when a `graphql-ws` +or `graphql-transport-ws` connection is established. This is particularly useful +for authentication and authorization. By default, all connections are accepted. + +To manually accept a connection, return `strawberry.UNSET` or a connection +acknowledgment payload. The acknowledgment payload will be sent to the client. + +Note that the legacy protocol does not support `None`/`null` acknowledgment +payloads, while the new protocol does. Our implementation will treat +`None`/`null` payloads the same as `strawberry.UNSET` in the context of the +legacy protocol. + +To reject a connection, raise a `ConnectionRejectionError`. You can optionally +provide a custom error payload that will be sent to the client when the legacy +GraphQL over WebSocket protocol is used. + +```python +from typing import Dict +from strawberry.exceptions import ConnectionRejectionError +from strawberry.channels import GraphQLWSConsumer + + +class MyGraphQLWSConsumer(GraphQLWSConsumer): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret": + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgment payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgment payload + return await super().on_ws_connect(context) +``` ### Context diff --git a/docs/integrations/fastapi.md b/docs/integrations/fastapi.md index 1095dfca9f..6fbb7dbc2a 100644 --- a/docs/integrations/fastapi.md +++ b/docs/integrations/fastapi.md @@ -268,6 +268,7 @@ following methods: - `def decode_json(self, data: Union[str, bytes]) -> object` - `def encode_json(self, data: object) -> str` - `async def render_graphql_ide(self, request: Request) -> HTMLResponse` +- `async def on_ws_connect(self, context: Context) -> Union[UnsetType, None, Dict[str, object]]` ### process_result @@ -354,3 +355,47 @@ class MyGraphQLRouter(GraphQLRouter): return HTMLResponse(custom_html) ``` + +### on_ws_connect + +By overriding `on_ws_connect` you can customize the behavior when a `graphql-ws` +or `graphql-transport-ws` connection is established. This is particularly useful +for authentication and authorization. By default, all connections are accepted. + +To manually accept a connection, return `strawberry.UNSET` or a connection +acknowledgment payload. The acknowledgment payload will be sent to the client. + +Note that the legacy protocol does not support `None`/`null` acknowledgment +payloads, while the new protocol does. Our implementation will treat +`None`/`null` payloads the same as `strawberry.UNSET` in the context of the +legacy protocol. + +To reject a connection, raise a `ConnectionRejectionError`. You can optionally +provide a custom error payload that will be sent to the client when the legacy +GraphQL over WebSocket protocol is used. + +```python +from typing import Dict +from strawberry.exceptions import ConnectionRejectionError +from strawberry.fastapi import GraphQLRouter + + +class MyGraphQLRouter(GraphQLRouter): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret": + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgment payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgment payload + return await super().on_ws_connect(context) +``` diff --git a/strawberry/exceptions/__init__.py b/strawberry/exceptions/__init__.py index ee331af721..d492c7e9bb 100644 --- a/strawberry/exceptions/__init__.py +++ b/strawberry/exceptions/__init__.py @@ -1,7 +1,7 @@ from __future__ import annotations from functools import cached_property -from typing import TYPE_CHECKING, Optional, Set, Union +from typing import TYPE_CHECKING, Dict, Optional, Set, Union from graphql import GraphQLError @@ -157,6 +157,13 @@ class StrawberryGraphQLError(GraphQLError): """Use it when you want to override the graphql.GraphQLError in custom extensions.""" +class ConnectionRejectionError(Exception): + """Use it when you want to reject a WebSocket connection.""" + + def __init__(self, payload: Dict[str, object] = {}) -> None: + self.payload = payload + + __all__ = [ "StrawberryException", "UnableToFindExceptionSource", diff --git a/strawberry/http/async_base_view.py b/strawberry/http/async_base_view.py index 57a307b2e8..3363851509 100644 --- a/strawberry/http/async_base_view.py +++ b/strawberry/http/async_base_view.py @@ -21,7 +21,6 @@ from graphql import GraphQLError -from strawberry import UNSET from strawberry.exceptions import MissingQueryError from strawberry.file_uploads.utils import replace_placeholders_with_files from strawberry.http import ( @@ -39,6 +38,7 @@ from strawberry.subscriptions.protocols.graphql_ws.handlers import BaseGraphQLWSHandler from strawberry.types import ExecutionResult, SubscriptionExecutionResult from strawberry.types.graphql import OperationType +from strawberry.types.unset import UNSET, UnsetType from .base import BaseView from .exceptions import HTTPException @@ -279,6 +279,7 @@ async def run( if websocket_subprotocol == GRAPHQL_TRANSPORT_WS_PROTOCOL: await self.graphql_transport_ws_handler_class( + view=self, websocket=websocket, context=context, root_value=root_value, @@ -288,6 +289,7 @@ async def run( ).handle() elif websocket_subprotocol == GRAPHQL_WS_PROTOCOL: await self.graphql_ws_handler_class( + view=self, websocket=websocket, context=context, root_value=root_value, @@ -476,5 +478,10 @@ async def process_result( ) -> GraphQLHTTPResponse: return process_result(result) + async def on_ws_connect( + self, context: Context + ) -> Union[UnsetType, None, Dict[str, object]]: + return UNSET + __all__ = ["AsyncBaseHTTPView"] diff --git a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py index cdda39595a..647ab7ab3c 100644 --- a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py +++ b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py @@ -14,6 +14,7 @@ from graphql import GraphQLError, GraphQLSyntaxError, parse +from strawberry.exceptions import ConnectionRejectionError from strawberry.http.exceptions import ( NonJsonMessageReceived, NonTextMessageReceived, @@ -31,13 +32,14 @@ from strawberry.types import ExecutionResult from strawberry.types.execution import PreExecutionError from strawberry.types.graphql import OperationType +from strawberry.types.unset import UnsetType from strawberry.utils.debug import pretty_print_graphql_operation from strawberry.utils.operation import get_operation_type if TYPE_CHECKING: from datetime import timedelta - from strawberry.http.async_base_view import AsyncWebSocketAdapter + from strawberry.http.async_base_view import AsyncBaseHTTPView, AsyncWebSocketAdapter from strawberry.schema import BaseSchema from strawberry.schema.subscribe import SubscriptionResult @@ -47,6 +49,7 @@ class BaseGraphQLTransportWSHandler: def __init__( self, + view: AsyncBaseHTTPView, websocket: AsyncWebSocketAdapter, context: object, root_value: object, @@ -54,6 +57,7 @@ def __init__( debug: bool, connection_init_wait_timeout: timedelta, ) -> None: + self.view = view self.websocket = websocket self.context = context self.root_value = root_value @@ -66,7 +70,6 @@ def __init__( self.connection_timed_out = False self.operations: Dict[str, Operation] = {} self.completed_tasks: List[asyncio.Task] = [] - self.connection_params: Optional[Dict[str, object]] = None async def handle(self) -> None: self.on_request_accepted() @@ -169,15 +172,31 @@ async def handle_connection_init(self, message: ConnectionInitMessage) -> None: ) return - self.connection_params = payload - if self.connection_init_received: reason = "Too many initialisation requests" await self.websocket.close(code=4429, reason=reason) return self.connection_init_received = True - await self.send_message({"type": "connection_ack"}) + + if isinstance(self.context, dict): + self.context["connection_params"] = payload + elif hasattr(self.context, "connection_params"): + self.context.connection_params = payload + + try: + connection_ack_payload = await self.view.on_ws_connect(self.context) + except ConnectionRejectionError: + await self.websocket.close(code=4403, reason="Forbidden") + return + + if isinstance(connection_ack_payload, UnsetType): + await self.send_message({"type": "connection_ack"}) + else: + await self.send_message( + {"type": "connection_ack", "payload": connection_ack_payload} + ) + self.connection_acknowledged = True async def handle_ping(self, message: PingMessage) -> None: @@ -219,11 +238,6 @@ async def handle_subscribe(self, message: SubscribeMessage) -> None: message["payload"].get("variables"), ) - if isinstance(self.context, dict): - self.context["connection_params"] = self.connection_params - elif hasattr(self.context, "connection_params"): - self.context.connection_params = self.connection_params - operation = Operation( self, message["id"], diff --git a/strawberry/subscriptions/protocols/graphql_ws/handlers.py b/strawberry/subscriptions/protocols/graphql_ws/handlers.py index 03cf11b71f..95536bc4fd 100644 --- a/strawberry/subscriptions/protocols/graphql_ws/handlers.py +++ b/strawberry/subscriptions/protocols/graphql_ws/handlers.py @@ -10,6 +10,7 @@ cast, ) +from strawberry.exceptions import ConnectionRejectionError from strawberry.http.exceptions import NonTextMessageReceived, WebSocketDisconnected from strawberry.subscriptions.protocols.graphql_ws.types import ( ConnectionInitMessage, @@ -20,16 +21,18 @@ StopMessage, ) from strawberry.types.execution import ExecutionResult, PreExecutionError +from strawberry.types.unset import UnsetType from strawberry.utils.debug import pretty_print_graphql_operation if TYPE_CHECKING: - from strawberry.http.async_base_view import AsyncWebSocketAdapter + from strawberry.http.async_base_view import AsyncBaseHTTPView, AsyncWebSocketAdapter from strawberry.schema import BaseSchema class BaseGraphQLWSHandler: def __init__( self, + view: AsyncBaseHTTPView, websocket: AsyncWebSocketAdapter, context: object, root_value: object, @@ -38,6 +41,7 @@ def __init__( keep_alive: bool, keep_alive_interval: Optional[float], ) -> None: + self.view = view self.websocket = websocket self.context = context self.root_value = root_value @@ -48,7 +52,6 @@ def __init__( self.keep_alive_task: Optional[asyncio.Task] = None self.subscriptions: Dict[str, AsyncGenerator] = {} self.tasks: Dict[str, asyncio.Task] = {} - self.connection_params: Optional[Dict[str, object]] = None async def handle(self) -> None: try: @@ -92,9 +95,27 @@ async def handle_connection_init(self, message: ConnectionInitMessage) -> None: await self.websocket.close(code=1000, reason="") return - self.connection_params = payload + if isinstance(self.context, dict): + self.context["connection_params"] = payload + elif hasattr(self.context, "connection_params"): + self.context.connection_params = payload - await self.send_message({"type": "connection_ack"}) + try: + connection_ack_payload = await self.view.on_ws_connect(self.context) + except ConnectionRejectionError as e: + await self.send_message({"type": "connection_error", "payload": e.payload}) + await self.websocket.close(code=1011, reason="") + return + + if ( + isinstance(connection_ack_payload, UnsetType) + or connection_ack_payload is None + ): + await self.send_message({"type": "connection_ack"}) + else: + await self.send_message( + {"type": "connection_ack", "payload": connection_ack_payload} + ) if self.keep_alive: keep_alive_handler = self.handle_keep_alive() @@ -112,11 +133,6 @@ async def handle_start(self, message: StartMessage) -> None: operation_name = payload.get("operationName") variables = payload.get("variables") - if isinstance(self.context, dict): - self.context["connection_params"] = self.connection_params - elif hasattr(self.context, "connection_params"): - self.context.connection_params = self.connection_params - if self.debug: pretty_print_graphql_operation(operation_name, query, variables) diff --git a/strawberry/subscriptions/protocols/graphql_ws/types.py b/strawberry/subscriptions/protocols/graphql_ws/types.py index 56aa81ab1b..d29a6209fb 100644 --- a/strawberry/subscriptions/protocols/graphql_ws/types.py +++ b/strawberry/subscriptions/protocols/graphql_ws/types.py @@ -37,6 +37,7 @@ class ConnectionErrorMessage(TypedDict): class ConnectionAckMessage(TypedDict): type: Literal["connection_ack"] + payload: NotRequired[Dict[str, object]] class DataMessagePayload(TypedDict):
diff --git a/docs/integrations/litestar.md b/docs/integrations/litestar.md index 002fb5cbee..77af89472a 100644 --- a/docs/integrations/litestar.md +++ b/docs/integrations/litestar.md @@ -327,6 +327,7 @@ extended by overriding any of the following methods: - `def decode_json(self, data: Union[str, bytes]) -> object` - `def encode_json(self, data: object) -> str` - `async def render_graphql_ide(self, request: Request) -> Response` +- `async def on_ws_connect(self, context: Context) -> Union[UnsetType, None, Dict[str, object]]` ### process_result @@ -476,3 +477,63 @@ class MyGraphQLController(GraphQLController): return Response(custom_html, media_type=MediaType.HTML) ``` + +### on_ws_connect + +By overriding `on_ws_connect` you can customize the behavior when a `graphql-ws` +or `graphql-transport-ws` connection is established. This is particularly useful +for authentication and authorization. By default, all connections are accepted. + +To manually accept a connection, return `strawberry.UNSET` or a connection +acknowledgment payload. The acknowledgment payload will be sent to the client. + +Note that the legacy protocol does not support `None`/`null` acknowledgment +payloads, while the new protocol does. Our implementation will treat +`None`/`null` payloads the same as `strawberry.UNSET` in the context of the +legacy protocol. + +To reject a connection, raise a `ConnectionRejectionError`. You can optionally +provide a custom error payload that will be sent to the client when the legacy +GraphQL over WebSocket protocol is used. + +```python +import strawberry +from typing import Dict +from strawberry.exceptions import ConnectionRejectionError +from strawberry.litestar import make_graphql_controller + + +@strawberry.type +class Query: + @strawberry.field + def hello(self) -> str: + return "world" + + +schema = strawberry.Schema(Query) + +GraphQLController = make_graphql_controller( + schema, + path="/graphql", +) + + +class MyGraphQLController(GraphQLController): + async def on_ws_connect(self, context: Dict[str, object]): + connection_params = context["connection_params"] + + if not isinstance(connection_params, dict): + # Reject without a custom graphql-ws error payload + raise ConnectionRejectionError() + + if connection_params.get("password") != "secret": + # Reject with a custom graphql-ws error payload + raise ConnectionRejectionError({"reason": "Invalid password"}) + + if username := connection_params.get("username"): + # Accept with a custom acknowledgment payload + return {"message": f"Hello, {username}!"} + + # Accept without a acknowledgment payload + return await super().on_ws_connect(context) +``` diff --git a/tests/channels/test_testing.py b/tests/channels/test_testing.py index 99aa9dd6c8..f45f535214 100644 --- a/tests/channels/test_testing.py +++ b/tests/channels/test_testing.py @@ -52,4 +52,4 @@ async def test_graphql_error(communicator): async def test_simple_connection_params(communicator): async for res in communicator.subscribe(query="subscription { connectionParams }"): - assert res.data["connectionParams"] == "Hi" + assert res.data["connectionParams"]["strawberry"] == "Hi" diff --git a/tests/http/clients/aiohttp.py b/tests/http/clients/aiohttp.py index 89b0c718e8..12cb035937 100644 --- a/tests/http/clients/aiohttp.py +++ b/tests/http/clients/aiohttp.py @@ -15,6 +15,7 @@ from strawberry.http.ides import GraphQL_IDE from strawberry.types import ExecutionResult from tests.views.schema import Query, schema +from tests.websockets.views import OnWSConnectMixin from ..context import get_context from .base import ( @@ -29,14 +30,14 @@ ) -class GraphQLView(BaseGraphQLView): +class GraphQLView(OnWSConnectMixin, BaseGraphQLView[Dict[str, object], object]): result_override: ResultOverrideFunction = None graphql_transport_ws_handler_class = DebuggableGraphQLTransportWSHandler graphql_ws_handler_class = DebuggableGraphQLWSHandler async def get_context( self, request: web.Request, response: web.StreamResponse - ) -> object: + ) -> Dict[str, object]: context = await super().get_context(request, response) return get_context(context) diff --git a/tests/http/clients/asgi.py b/tests/http/clients/asgi.py index 7910e02f73..7d9b86ea8e 100644 --- a/tests/http/clients/asgi.py +++ b/tests/http/clients/asgi.py @@ -16,6 +16,7 @@ from strawberry.http.ides import GraphQL_IDE from strawberry.types import ExecutionResult from tests.views.schema import Query, schema +from tests.websockets.views import OnWSConnectMixin from ..context import get_context from .base import ( @@ -30,7 +31,7 @@ ) -class GraphQLView(BaseGraphQLView): +class GraphQLView(OnWSConnectMixin, BaseGraphQLView[Dict[str, object], object]): result_override: ResultOverrideFunction = None graphql_transport_ws_handler_class = DebuggableGraphQLTransportWSHandler graphql_ws_handler_class = DebuggableGraphQLWSHandler @@ -41,8 +42,8 @@ async def get_root_value(self, request: Union[WebSocket, Request]) -> Query: async def get_context( self, request: Union[Request, WebSocket], - response: Optional[StarletteResponse] = None, - ) -> object: + response: Union[StarletteResponse, WebSocket], + ) -> Dict[str, object]: context = await super().get_context(request, response) return get_context(context) diff --git a/tests/http/clients/channels.py b/tests/http/clients/channels.py index bde2364128..802fc263cc 100644 --- a/tests/http/clients/channels.py +++ b/tests/http/clients/channels.py @@ -19,6 +19,7 @@ from strawberry.http.ides import GraphQL_IDE from strawberry.http.typevars import Context, RootValue from tests.views.schema import Query, schema +from tests.websockets.views import OnWSConnectMixin from ..context import get_context from .base import ( @@ -113,7 +114,7 @@ def process_result( return super().process_result(request, result) -class DebuggableGraphQLWSConsumer(GraphQLWSConsumer): +class DebuggableGraphQLWSConsumer(OnWSConnectMixin, GraphQLWSConsumer): graphql_transport_ws_handler_class = DebuggableGraphQLTransportWSHandler graphql_ws_handler_class = DebuggableGraphQLWSHandler diff --git a/tests/http/clients/fastapi.py b/tests/http/clients/fastapi.py index b1b80625fa..70eded4049 100644 --- a/tests/http/clients/fastapi.py +++ b/tests/http/clients/fastapi.py @@ -15,6 +15,7 @@ from strawberry.http.ides import GraphQL_IDE from strawberry.types import ExecutionResult from tests.views.schema import Query, schema +from tests.websockets.views import OnWSConnectMixin from ..context import get_context from .asgi import AsgiWebSocketClient @@ -54,7 +55,7 @@ async def get_root_value( return Query() -class GraphQLRouter(BaseGraphQLRouter[Any, Any]): +class GraphQLRouter(OnWSConnectMixin, BaseGraphQLRouter[Any, Any]): result_override: ResultOverrideFunction = None graphql_transport_ws_handler_class = DebuggableGraphQLTransportWSHandler graphql_ws_handler_class = DebuggableGraphQLWSHandler diff --git a/tests/http/clients/litestar.py b/tests/http/clients/litestar.py index 2548dc563c..dc948e9868 100644 --- a/tests/http/clients/litestar.py +++ b/tests/http/clients/litestar.py @@ -15,6 +15,7 @@ from strawberry.litestar import make_graphql_controller from strawberry.types import ExecutionResult from tests.views.schema import Query, schema +from tests.websockets.views import OnWSConnectMixin from ..context import get_context from .base import ( @@ -67,7 +68,7 @@ def create_app(self, result_override: ResultOverrideFunction = None, **kwargs: A **kwargs, ) - class GraphQLController(BaseGraphQLController): + class GraphQLController(OnWSConnectMixin, BaseGraphQLController): graphql_transport_ws_handler_class = DebuggableGraphQLTransportWSHandler graphql_ws_handler_class = DebuggableGraphQLWSHandler diff --git a/tests/views/schema.py b/tests/views/schema.py index ab959fbe01..cb5d9959ef 100644 --- a/tests/views/schema.py +++ b/tests/views/schema.py @@ -248,8 +248,8 @@ async def listener_with_confirmation( @strawberry.subscription async def connection_params( self, info: strawberry.Info - ) -> AsyncGenerator[str, None]: - yield info.context["connection_params"]["strawberry"] + ) -> AsyncGenerator[strawberry.scalars.JSON, None]: + yield info.context["connection_params"] @strawberry.subscription async def long_finalizer( diff --git a/tests/websockets/test_graphql_transport_ws.py b/tests/websockets/test_graphql_transport_ws.py index 09e271f681..3a7b5849a3 100644 --- a/tests/websockets/test_graphql_transport_ws.py +++ b/tests/websockets/test_graphql_transport_ws.py @@ -247,6 +247,94 @@ async def test_too_many_initialisation_requests(ws: WebSocketClient): assert ws.close_reason == "Too many initialisation requests" +async def test_connections_are_accepted_by_default(ws_raw: WebSocketClient): + await ws_raw.send_message({"type": "connection_init"}) + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.close() + assert ws_raw.closed + + +@pytest.mark.parametrize("payload", [None, {"token": "secret"}]) +async def test_setting_a_connection_ack_payload(ws_raw: WebSocketClient, payload): + await ws_raw.send_message( + { + "type": "connection_init", + "payload": {"test-accept": True, "ack-payload": payload}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack", "payload": payload} + + await ws_raw.close() + assert ws_raw.closed + + +async def test_connection_ack_payload_may_be_unset(ws_raw: WebSocketClient): + await ws_raw.send_message( + { + "type": "connection_init", + "payload": {"test-accept": True}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.close() + assert ws_raw.closed + + +async def test_rejecting_connection_closes_socket_with_expected_code_and_message( + ws_raw: WebSocketClient, +): + await ws_raw.send_message( + {"type": "connection_init", "payload": {"test-reject": True}} + ) + + await ws_raw.receive(timeout=2) + assert ws_raw.closed + assert ws_raw.close_code == 4403 + assert ws_raw.close_reason == "Forbidden" + + +async def test_context_can_be_modified_from_within_on_ws_connect( + ws_raw: WebSocketClient, +): + await ws_raw.send_message( + { + "type": "connection_init", + "payload": {"test-modify": True}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.send_message( + { + "type": "subscribe", + "id": "demo", + "payload": { + "query": "subscription { connectionParams }", + }, + } + ) + + next_message: NextMessage = await ws_raw.receive_json() + assert next_message["type"] == "next" + assert next_message["id"] == "demo" + assert "data" in next_message["payload"] + assert next_message["payload"]["data"] == { + "connectionParams": {"test-modify": True, "modified": True} + } + + await ws_raw.close() + assert ws_raw.closed + + async def test_ping_pong(ws: WebSocketClient): await ws.send_message({"type": "ping"}) pong_message: PongMessage = await ws.receive_json() @@ -823,7 +911,7 @@ async def test_injects_connection_params(ws_raw: WebSocketClient): ) next_message: NextMessage = await ws.receive_json() - assert_next(next_message, "sub1", {"connectionParams": "rocks"}) + assert_next(next_message, "sub1", {"connectionParams": {"strawberry": "rocks"}}) await ws.send_message({"id": "sub1", "type": "complete"}) diff --git a/tests/websockets/test_graphql_ws.py b/tests/websockets/test_graphql_ws.py index 5564238ca3..26488c9911 100644 --- a/tests/websockets/test_graphql_ws.py +++ b/tests/websockets/test_graphql_ws.py @@ -107,6 +107,137 @@ async def test_operation_selection(ws: WebSocketClient): assert complete_message["id"] == "demo" +async def test_connections_are_accepted_by_default(ws_raw: WebSocketClient): + await ws_raw.send_legacy_message({"type": "connection_init"}) + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.close() + assert ws_raw.closed + + +async def test_setting_a_connection_ack_payload(ws_raw: WebSocketClient): + await ws_raw.send_legacy_message( + { + "type": "connection_init", + "payload": {"test-accept": True, "ack-payload": {"token": "secret"}}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == { + "type": "connection_ack", + "payload": {"token": "secret"}, + } + + await ws_raw.close() + assert ws_raw.closed + + +async def test_connection_ack_payload_may_be_unset(ws_raw: WebSocketClient): + await ws_raw.send_legacy_message( + { + "type": "connection_init", + "payload": {"test-accept": True}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.close() + assert ws_raw.closed + + +async def test_a_connection_ack_payload_of_none_is_treated_as_unset( + ws_raw: WebSocketClient, +): + await ws_raw.send_legacy_message( + { + "type": "connection_init", + "payload": {"test-accept": True, "ack-payload": None}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.close() + assert ws_raw.closed + + +async def test_rejecting_connection_results_in_error_message_and_socket_closure( + ws_raw: WebSocketClient, +): + await ws_raw.send_legacy_message( + {"type": "connection_init", "payload": {"test-reject": True}} + ) + + connection_error_message: ConnectionErrorMessage = await ws_raw.receive_json() + assert connection_error_message == {"type": "connection_error", "payload": {}} + + await ws_raw.receive(timeout=2) + assert ws_raw.closed + assert ws_raw.close_code == 1011 + assert not ws_raw.close_reason + + +async def test_rejecting_connection_with_custom_connection_error_payload( + ws_raw: WebSocketClient, +): + await ws_raw.send_legacy_message( + { + "type": "connection_init", + "payload": {"test-reject": True, "err-payload": {"custom": "error"}}, + } + ) + + connection_error_message: ConnectionErrorMessage = await ws_raw.receive_json() + assert connection_error_message == { + "type": "connection_error", + "payload": {"custom": "error"}, + } + + await ws_raw.receive(timeout=2) + assert ws_raw.closed + assert ws_raw.close_code == 1011 + assert not ws_raw.close_reason + + +async def test_context_can_be_modified_from_within_on_ws_connect( + ws_raw: WebSocketClient, +): + await ws_raw.send_legacy_message( + { + "type": "connection_init", + "payload": {"test-modify": True}, + } + ) + + connection_ack_message: ConnectionAckMessage = await ws_raw.receive_json() + assert connection_ack_message == {"type": "connection_ack"} + + await ws_raw.send_legacy_message( + { + "type": "start", + "id": "demo", + "payload": { + "query": "subscription { connectionParams }", + }, + } + ) + + data_message: DataMessage = await ws_raw.receive_json() + assert data_message["type"] == "data" + assert data_message["id"] == "demo" + assert data_message["payload"]["data"] == { + "connectionParams": {"test-modify": True, "modified": True} + } + + await ws_raw.close() + assert ws_raw.closed + + async def test_sends_keep_alive(aiohttp_app_client: HttpClient): aiohttp_app_client.create_app(keep_alive=True, keep_alive_interval=0.1) async with aiohttp_app_client.ws_connect( @@ -589,7 +720,9 @@ async def test_injects_connection_params(aiohttp_app_client: HttpClient): data_message: DataMessage = await ws.receive_json() assert data_message["type"] == "data" assert data_message["id"] == "demo" - assert data_message["payload"]["data"] == {"connectionParams": "rocks"} + assert data_message["payload"]["data"] == { + "connectionParams": {"strawberry": "rocks"} + } await ws.send_legacy_message({"type": "stop", "id": "demo"}) diff --git a/tests/websockets/views.py b/tests/websockets/views.py new file mode 100644 index 0000000000..eec511131e --- /dev/null +++ b/tests/websockets/views.py @@ -0,0 +1,30 @@ +from typing import Dict, Union + +from strawberry import UNSET +from strawberry.exceptions import ConnectionRejectionError +from strawberry.http.async_base_view import AsyncBaseHTTPView +from strawberry.types.unset import UnsetType + + +class OnWSConnectMixin(AsyncBaseHTTPView): + async def on_ws_connect( + self, context: Dict[str, object] + ) -> Union[UnsetType, None, Dict[str, object]]: + connection_params = context["connection_params"] + + if isinstance(connection_params, dict): + if connection_params.get("test-reject"): + if "err-payload" in connection_params: + raise ConnectionRejectionError(connection_params["err-payload"]) + raise ConnectionRejectionError() + + if connection_params.get("test-accept"): + if "ack-payload" in connection_params: + return connection_params["ack-payload"] + return UNSET + + if connection_params.get("test-modify"): + connection_params["modified"] = True + return UNSET + + return await super().on_ws_connect(context)
Add handler method to process websocket connections <!--- Provide a general summary of your changes in the title above. --> Allow an application to process the connection request for `graphql_transport_ws` and `graphql_ws` protocols, perform authentication, reject a connection and return custom response payload <!--- This template is entirely optional and can be removed, but is here to help both you and us. --> <!--- Anything on lines wrapped in comments like these will not show up in the final text. --> ## Description A previous PR, #2380 added support for `connection_params`. However, they were simply taken as provided and injected into the _context_ object. This PR adds an overridable hook in the `AsyncBaseHTTPView`, `on_ws_connect()` which allows an application to examine the connection parameters when a connection is made. The method receives an `WSConnectionParams` object, `params`. The application can then - choose to reject the connection (by calling `params.reject()`) - add additional information into the `params.connection_params` or modify it. - set `params.rsponse_params` to return a _payload_ to the client. This is based on similar behaviour in the reference implementation for `graphql_transport_ws`, namely this code here: https://github.com/enisdenjo/graphql-ws/blob/972ed07ea263624bb556970cfd85bb05ea2c6eac/src/server.ts#L614 except that we use a parameter object to operate on. This approach allows us to split **authentication** and **authorization** into two steps. Authentication can be done when establishing connection and authorization can be performed for each resolver. Documentation is modified and also a piece added about how to perform authentication for _single result operations_ (queries and mutations) since they can be invoked both via http and websockets. <!--- Describe your changes in detail here. --> ## Types of Changes <!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. --> - [x] Core - [ ] Bugfix - [ ] New feature - [x] Enhancement/optimization - [x] Documentation ## Issues Fixed or Closed by This PR * ## Checklist <!--- Go over all the following points, and put an `x` in all the boxes that apply. --> <!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! --> - [x] My code follows the code style of this project. - [x] My change requires a change to the documentation. - [x] I have updated the documentation accordingly. - [x] I have read the CONTRIBUTING document. - [x] I have added tests to cover my changes. - [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage). No standard way to intercept the "connect" websockets message ### Discussed in https://github.com/strawberry-graphql/strawberry/discussions/1640 <div type='discussions-op-text'> <sup>Originally posted by **kristjanvalur** February 13, 2022</sup> ... Alternatively, is there a way to intercept and handler the `connection_init` message and either accept or reject the conneciton based on the request headers (from the original connection) or the optional `payload` and attach custom data to the resulting `connection` object? Basically, I'm searching for the canonical way to do request authentication and authorization when working with requests that arrive via the `graphql-transport-ws` transport, be they subscriptions, mutations or queries. Cheers!</div> <!-- POLAR PLEDGE BADGE START --> ## Upvote & Fund - We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue. - We receive the funding once the issue is completed & confirmed by you. - Thank you in advance for helping prioritize & fund our backlog. <a href="https://polar.sh/strawberry-graphql/strawberry/issues/1703"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/1703/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/1703/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> Add hook functions, process_connection_payload() This PR adds a hook funciton in to the websocket handlers, `process_connection_payload()` which can be customized to validate/proecss the payload sent with the `connection_init` message and provide a payload to return with the `connection_ack` response. This allows validation of conneciton payload, as well as to pro ## Types of Changes <!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. --> - [ ] Core - [ ] Bugfix - [x] New feature - [ ] Enhancement/optimization - [ ] Documentation ## Issues Fixed or Closed by This PR See discussion #1640 and issue #1702 ## Checklist <!--- Go over all the following points, and put an `x` in all the boxes that apply. --> <!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! --> - [x] My code follows the code style of this project. - [ ] My change requires a change to the documentation. - [ ] I have updated the documentation accordingly. - [x] I have read the CONTRIBUTING document. - [ ] I have added tests to cover my changes. - [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage).
see #1652 Thanks for adding the `RELEASE.md` file! ![](https://media.giphy.com/media/xq1FxHkABwW7m/giphy.gif) Here's a preview of the changelog: --- Adding the possiblity to process `payload` sent with a websockets `connection_init` message, and provide a response payload included with the `connection_ack` message. This can be useful for customized authentication and to provide general information to the client. ## Example Here is how one might subclass the Router to provide a custom handler class. This example uses the `GraphQLWSHandler`, but the `GraphQLTransportWSHandler` case is identical. ```python from strawberry.fastapi import GraphQLRouter from strawberry.fastapi.handlers import GraphQLTransportWSHandler, GraphQLWSHandler class MyGraphQLWSHandler(GraphQLWSHandler): async def process_connection_payload(self, payload): if payload.get("name") == "bob": await self.close(4400, "Bob is banned") return return {"hello": "Welcome to our server!"} class MyRouter(GraphQLRouter): graphql_ws_handler_class = MyGraphQLWSHandler graphql_app = MyRouter(schema) ``` --- Here's the preview release card for twitter: ![](https://i.imgur.com/ipEvXbj.png) Here's the tweet text: ``` πŸ†• Release (next) is out! Thanks to @kristjanvalur for the PR πŸ‘ Get it here πŸ‘‰ https://github.com/strawberry-graphql/strawberry/releases/tag/(next) ``` <!-- action-check: release-file -->
1,733,764,416,000
[ "bot:has-release-file", "bot:release-type-minor" ]
Feature Request
[ "strawberry/http/async_base_view.py:AsyncBaseHTTPView.run", "strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py:BaseGraphQLTransportWSHandler.__init__", "strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py:BaseGraphQLTransportWSHandler.handle_connection_init", "strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py:BaseGraphQLTransportWSHandler.handle_subscribe", "strawberry/subscriptions/protocols/graphql_ws/handlers.py:BaseGraphQLWSHandler.__init__", "strawberry/subscriptions/protocols/graphql_ws/handlers.py:BaseGraphQLWSHandler.handle_connection_init", "strawberry/subscriptions/protocols/graphql_ws/handlers.py:BaseGraphQLWSHandler.handle_start" ]
[ "strawberry/exceptions/__init__.py:ConnectionRejectionError.__init__", "strawberry/http/async_base_view.py:AsyncBaseHTTPView.on_ws_connect" ]
7
WebOfTrust/keria
WebOfTrust__keria-334
912e68144ee0f31536c77d67f732ee82469d210c
diff --git a/src/keria/app/agenting.py b/src/keria/app/agenting.py index 3d6f96a4..1fc8a210 100644 --- a/src/keria/app/agenting.py +++ b/src/keria/app/agenting.py @@ -4,6 +4,7 @@ keria.app.agenting module """ +from base64 import b64decode import json import os import datetime @@ -53,7 +54,8 @@ def setup(name, bran, adminPort, bootPort, base='', httpPort=None, configFile=None, configDir=None, - keypath=None, certpath=None, cafilepath=None, cors=False, releaseTimeout=None, curls=None, iurls=None, durls=None): + keypath=None, certpath=None, cafilepath=None, cors=False, releaseTimeout=None, curls=None, + iurls=None, durls=None, bootUsername=None, bootPassword=None): """ Set up an ahab in Signify mode """ agency = Agency(name=name, base=base, bran=bran, configFile=configFile, configDir=configDir, releaseTimeout=releaseTimeout, curls=curls, iurls=iurls, durls=durls) @@ -66,7 +68,7 @@ def setup(name, bran, adminPort, bootPort, base='', httpPort=None, configFile=No if not bootServer.reopen(): raise RuntimeError(f"cannot create boot http server on port {bootPort}") bootServerDoer = http.ServerDoer(server=bootServer) - bootEnd = BootEnd(agency) + bootEnd = BootEnd(agency, username=bootUsername, password=bootPassword) bootApp.add_route("/boot", bootEnd) bootApp.add_route("/health", HealthEnd()) @@ -871,17 +873,44 @@ def loadEnds(app): class BootEnd: """ Resource class for creating datastore in cloud ahab """ - def __init__(self, agency): + def __init__(self, agency: Agency, username: str | None = None, password: str | None = None): """ Provides endpoints for initializing and unlocking an agent - Parameters: agency (Agency): Agency for managing agents - + username (str): username for boot request + password (str): password for boot request """ - self.authn = authing.Authenticater(agency=agency) + self.username = username + self.password = password self.agency = agency - def on_post(self, req, rep): + def authenticate(self, req: falcon.Request): + # Username AND Password is not set, so no need to authenticate + if self.username is None and self.password is None: + return + + if req.auth is None: + raise falcon.HTTPUnauthorized(title="Unauthorized") + + scheme, token = req.auth.split(' ') + if scheme != 'Basic': + raise falcon.HTTPUnauthorized(title="Unauthorized") + + try: + username, password = b64decode(token).decode('utf-8').split(':') + + if username is None or password is None: + raise falcon.HTTPUnauthorized(title="Unauthorized") + + if username == self.username and password == self.password: + return + + except Exception: + raise falcon.HTTPUnauthorized(title="Unauthorized") + + raise falcon.HTTPUnauthorized(title="Unauthorized") + + def on_post(self, req: falcon.Request, rep: falcon.Response): """ Inception event POST endpoint Give me a new Agent. Create Habery using ctrlPRE as database name, agentHab that anchors the caid and @@ -893,6 +922,8 @@ def on_post(self, req, rep): """ + self.authenticate(req) + body = req.get_media() if "icp" not in body: raise falcon.HTTPBadRequest(title="invalid inception", diff --git a/src/keria/app/cli/commands/start.py b/src/keria/app/cli/commands/start.py index 21272d48..c7f21661 100644 --- a/src/keria/app/cli/commands/start.py +++ b/src/keria/app/cli/commands/start.py @@ -68,6 +68,14 @@ help="Set log level to DEBUG | INFO | WARNING | ERROR | CRITICAL. Default is CRITICAL") parser.add_argument("--logfile", action="store", required=False, default=None, help="path of the log file. If not defined, logs will not be written to the file.") +parser.add_argument("--experimental-boot-password", + help="Experimental password for boot endpoint. Enables HTTP Basic Authentication for the boot endpoint. Only meant to be used for testing purposes.", + dest="bootPassword", + default=os.getenv("KERIA_EXPERIMENTAL_BOOT_PASSWORD")) +parser.add_argument("--experimental-boot-username", + help="Experimental username for boot endpoint. Enables HTTP Basic Authentication for the boot endpoint. Only meant to be used for testing purposes.", + dest="bootUsername", + default=os.getenv("KERIA_EXPERIMENTAL_BOOT_USERNAME")) def getListVariable(name): value = os.getenv(name) @@ -99,7 +107,9 @@ def launch(args): releaseTimeout=int(os.getenv("KERIA_RELEASER_TIMEOUT", "86400")), curls=getListVariable("KERIA_CURLS"), iurls=getListVariable("KERIA_IURLS"), - durls=getListVariable("KERIA_DURLS")) + durls=getListVariable("KERIA_DURLS"), + bootPassword=args.bootPassword, + bootUsername=args.bootUsername) directing.runController(doers=agency, expire=0.0)
diff --git a/tests/app/test_agenting.py b/tests/app/test_agenting.py index 6f4cf910..e0338f38 100644 --- a/tests/app/test_agenting.py +++ b/tests/app/test_agenting.py @@ -5,6 +5,7 @@ Testing the Mark II Agent """ +from base64 import b64encode import json import os import shutil @@ -248,7 +249,7 @@ def test_agency_with_urls_from_arguments(): assert agent.hby.cf.get()["iurls"] == iurls assert agent.hby.cf.get()["durls"] == durls -def test_boot_ends(helpers): +def test_unprotected_boot_ends(helpers): agency = agenting.Agency(name="agency", bran=None, temp=True) doist = doing.Doist(limit=1.0, tock=0.03125, real=True) doist.enter(doers=[agency]) @@ -281,6 +282,85 @@ def test_boot_ends(helpers): 'description': 'agent for controller EK35JRNdfVkO4JwhXaSTdV4qzB_ibk_tGJmSVcY4pZqx already exists' } +def test_protected_boot_ends(helpers): + agency = agenting.Agency(name="agency", bran=None, temp=True) + doist = doing.Doist(limit=1.0, tock=0.03125, real=True) + doist.enter(doers=[agency]) + + serder, sigers = helpers.controller() + assert serder.pre == helpers.controllerAID + + app = falcon.App() + client = testing.TestClient(app) + + username = "user" + password = "secret" + + bootEnd = agenting.BootEnd(agency, username=username, password=password) + app.add_route("/boot", bootEnd) + + body = dict( + icp=serder.ked, + sig=sigers[0].qb64, + salty=dict( + stem='signify:aid', pidx=0, tier='low', sxlt='OBXYZ', + icodes=[MtrDex.Ed25519_Seed], ncodes=[MtrDex.Ed25519_Seed] + ) + ) + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8")) + assert rep.status_code == 401 + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": "Something test"}) + assert rep.status_code == 401 + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": "Basic user:secret"}) + assert rep.status_code == 401 + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": f"Basic {b64encode(b'test:secret').decode('utf-8')}"} ) + assert rep.status_code == 401 + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": f"Basic {b64encode(b'user').decode('utf-8')}"} ) + assert rep.status_code == 401 + + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": f"Basic {b64encode(b'user:test').decode('utf-8')}"} ) + assert rep.status_code == 401 + + authorization = f"Basic {b64encode(b'user:secret').decode('utf-8')}" + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": authorization}) + assert rep.status_code == 202 + +def test_misconfigured_protected_boot_ends(helpers): + agency = agenting.Agency(name="agency", bran=None, temp=True) + doist = doing.Doist(limit=1.0, tock=0.03125, real=True) + doist.enter(doers=[agency]) + + serder, sigers = helpers.controller() + assert serder.pre == helpers.controllerAID + + app = falcon.App() + client = testing.TestClient(app) + + # No password set, should return 401 + bootEnd = agenting.BootEnd(agency, username="user", password=None) + app.add_route("/boot", bootEnd) + + body = dict( + icp=serder.ked, + sig=sigers[0].qb64, + salty=dict( + stem='signify:aid', pidx=0, tier='low', sxlt='OBXYZ', + icodes=[MtrDex.Ed25519_Seed], ncodes=[MtrDex.Ed25519_Seed] + ) + ) + + authorization = f"Basic {b64encode(b'user').decode('utf-8')}" + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": authorization}) + assert rep.status_code == 401 + + authorization = f"Basic {b64encode(b'user:secret').decode('utf-8')}" + rep = client.simulate_post("/boot", body=json.dumps(body).encode("utf-8"), headers={"Authorization": authorization}) + assert rep.status_code == 401 def test_witnesser(helpers): salt = b'0123456789abcdef'
Provide a way to protect the boot endpoint ### Feature request description/rationale I suggest there should be a way to protect the boot endpoint with some simple authentication. In my opinion, this is convenient even if the boot port is not exposed publicly on the internet. It would also make it easier for people to spin up a KERIA instance publicly without having to worry about unauthorized people booting agents. # Suggestion Implement a [Basic authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication) check by configuring the keria boot endpoint with a `username` and a `password`. Like this: ``` keria start --boot-username username --boot-password password --boot 3903 ``` Or, using environment variables ``` export KERIA_BOOT_USERNAME=username export KERIA_BOOT_PASSWORD=password keria start --boot 3903 ``` To boot an agent, you can then either: - Use basic auth in the Boot URL: `http://username:password@localhost:3903/boot`, most http clients will set the authorization header for you. - Set the Authorization header in the boot request manually to `Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=` We can also add a flag to completely disable the boot endpoint (https://github.com/WebOfTrust/keria/issues/203). E.g. ``` keria start --no-boot-endpoint ``` Or, ``` export KERIA_DISABLE_BOOT_ENDPOINT=1 ``` # Considerations There are probably much more sophisticated ways to protect the boot endpoint. Please share your thoughts. In my opinion, this could be sufficient and will allow users to more easily deploy keria instances on the internet for testing purposes. I have code and unit tests for this, so I can open a PR if we can agree this is a good idea.
This makes sense to me for test servers deployed publicly. Would just want to make sure a simple username/pass doesn't end up becoming the norm. I wrote a simple Spring Cloud Gateway service in Java that provisions one-time use boot URLs and emails them to clients. Used it to protect our infrastructure in an alpha program of our wallet, and it worked quite well. I can ask for it to be open sourced. There are probably even safer ways to do it but it's a start. > Would just want to make sure a simple username/pass doesn't end up becoming the norm. True. I think that can be handled by some clever naming of the configuration option. For example, a flag `--enable-test-boot-authentication` that must be set when `--boot-password` is set. Or, `--dangerously-set-boot-password <password>` or something like that. :) Another note. It is quite common to password protect resources within a private network anyway. You would normally have password protection on your database servers for example. So perhaps it shouldn't be too inconvenient to protect it like this.
1,733,756,077,000
[]
Feature Request
[ "src/keria/app/agenting.py:setup", "src/keria/app/agenting.py:BootEnd.__init__", "src/keria/app/agenting.py:BootEnd.on_post", "src/keria/app/cli/commands/start.py:launch" ]
[ "src/keria/app/agenting.py:BootEnd.authenticate" ]
4
huggingface/transformers
huggingface__transformers-22496
41d47db90fbe9937c0941f2f9cdb2ddd83e49a2e
diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index 91de6810b17e..96f91a0a43dd 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -34,7 +34,12 @@ SequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) from .configuration_whisper import WhisperConfig from .tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE @@ -1464,6 +1469,7 @@ def generate( task=None, language=None, is_multilingual=None, + prompt_ids: Optional[torch.Tensor] = None, **kwargs, ): """ @@ -1521,6 +1527,11 @@ def generate( find all the possible language tokens in the `model.generation_config.lang_to_id` dictionary. is_multilingual (`bool`, *optional*): Whether or not the model is multilingual. + prompt_ids (`torch.Tensor`, *optional*): + Rank-1 tensor of token IDs created by passing text to [`~WhisperProcessor.get_prompt_ids`] that is + provided as a prompt to each chunk. This can be used to provide or "prompt-engineer" a context for + transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those words + correctly. It cannot be used in conjunction with `decoder_start_token_id` as it overwrites this value. kwargs: Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder @@ -1567,8 +1578,21 @@ def generate( if task is not None: generation_config.task = task - forced_decoder_ids = [] - if task is not None or language is not None: + forced_decoder_ids = None + + # Legacy code for backward compatibility + if hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None: + forced_decoder_ids = self.config.forced_decoder_ids + elif ( + hasattr(self.generation_config, "forced_decoder_ids") + and self.generation_config.forced_decoder_ids is not None + ): + forced_decoder_ids = self.generation_config.forced_decoder_ids + else: + forced_decoder_ids = kwargs.get("forced_decoder_ids", None) + + if task is not None or language is not None or (forced_decoder_ids is None and prompt_ids is not None): + forced_decoder_ids = [] if hasattr(generation_config, "language"): if generation_config.language in generation_config.lang_to_id.keys(): language_token = generation_config.language @@ -1593,27 +1617,48 @@ def generate( raise ValueError( f"The `{generation_config.task}`task is not supported. The task should be one of `{TASK_IDS}`" ) - else: + elif hasattr(generation_config, "task_to_id"): forced_decoder_ids.append((2, generation_config.task_to_id["transcribe"])) # defaults to transcribe if hasattr(generation_config, "no_timestamps_token_id") and not generation_config.return_timestamps: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) - # Legacy code for backward compatibility - elif hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None: - forced_decoder_ids = self.config.forced_decoder_ids - elif ( - hasattr(self.generation_config, "forced_decoder_ids") - and self.generation_config.forced_decoder_ids is not None - ): - forced_decoder_ids = self.generation_config.forced_decoder_ids + if forced_decoder_ids is not None: + generation_config.forced_decoder_ids = forced_decoder_ids + + if prompt_ids is not None: + if kwargs.get("decoder_start_token_id") is not None: + raise ValueError( + "When specifying `prompt_ids`, you cannot also specify `decoder_start_token_id` as it gets overwritten." + ) + prompt_ids = prompt_ids.tolist() + decoder_start_token_id, *text_prompt_ids = prompt_ids + # Set the decoder_start_token_id to <|startofprev|> + kwargs.update({"decoder_start_token_id": decoder_start_token_id}) + + # Update the max generation length to include the prompt + specified_max_length = kwargs.pop("max_new_tokens", None) or kwargs.pop("max_length", None) + default_max_length = generation_config.max_new_tokens or generation_config.max_length + non_prompt_max_length = specified_max_length or default_max_length + kwargs["max_new_tokens"] = non_prompt_max_length + len(text_prompt_ids) + + # Reformat the forced_decoder_ids to incorporate the prompt + non_prompt_forced_decoder_ids = ( + kwargs.pop("forced_decoder_ids", None) or generation_config.forced_decoder_ids + ) + forced_decoder_ids = [ + # Slicing the text prompt ids in a manner consistent with the OpenAI implementation + # to accomodate context space for the prefix (see https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/decoding.py#L599) + *text_prompt_ids[-self.config.max_length // 2 - 1 :], + generation_config.decoder_start_token_id, + *[token for _rank, token in non_prompt_forced_decoder_ids], + ] + forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_decoder_ids)] + generation_config.forced_decoder_ids = forced_decoder_ids if generation_config.return_timestamps: logits_processor = [WhisperTimeStampLogitsProcessor(generation_config)] - if len(forced_decoder_ids) > 0: - generation_config.forced_decoder_ids = forced_decoder_ids - return super().generate( inputs, generation_config, diff --git a/src/transformers/models/whisper/processing_whisper.py b/src/transformers/models/whisper/processing_whisper.py index 8c158b041f7c..b0d0d6c95450 100644 --- a/src/transformers/models/whisper/processing_whisper.py +++ b/src/transformers/models/whisper/processing_whisper.py @@ -16,6 +16,7 @@ Speech processor class for Whisper """ + from ...processing_utils import ProcessorMixin @@ -91,3 +92,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + def get_prompt_ids(self, text: str, return_tensors="np"): + return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors) diff --git a/src/transformers/models/whisper/tokenization_whisper.py b/src/transformers/models/whisper/tokenization_whisper.py index 24eb72a0b0f9..4c7c9c89fd3b 100644 --- a/src/transformers/models/whisper/tokenization_whisper.py +++ b/src/transformers/models/whisper/tokenization_whisper.py @@ -606,6 +606,11 @@ def _decode( ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) + if skip_special_tokens: + prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>") + decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") + token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id) + filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT @@ -714,6 +719,31 @@ def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time time_precision=time_precision, ) + def get_prompt_ids(self, text: str, return_tensors="np"): + """Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`].""" + batch_encoding = self("<|startofprev|>", text.strip(), add_prefix_space=True, add_special_tokens=False) + + # Check for special tokens + prompt_text_ids = batch_encoding["input_ids"][1:] + special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None) + if special_token_id is not None: + token = self.convert_ids_to_tokens(special_token_id) + raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.") + + batch_encoding.convert_to_tensors(tensor_type=return_tensors) + return batch_encoding["input_ids"] + + @staticmethod + def _strip_prompt(token_ids: List[int], prompt_token_id: int, decoder_start_token_id: int): + has_prompt = isinstance(token_ids, list) and token_ids and token_ids[0] == prompt_token_id + if has_prompt: + if decoder_start_token_id in token_ids: + return token_ids[token_ids.index(decoder_start_token_id) :] + else: + return [] + + return token_ids + def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language, time_precision): """ diff --git a/src/transformers/models/whisper/tokenization_whisper_fast.py b/src/transformers/models/whisper/tokenization_whisper_fast.py index fb1bf89ed606..be4ad842a7f6 100644 --- a/src/transformers/models/whisper/tokenization_whisper_fast.py +++ b/src/transformers/models/whisper/tokenization_whisper_fast.py @@ -312,6 +312,11 @@ def decode( return text def _decode(self, *args, normalize: bool = False, **kwargs) -> str: + if kwargs["skip_special_tokens"]: + prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>") + decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") + kwargs["token_ids"] = self._strip_prompt(kwargs["token_ids"], prompt_token_id, decoder_start_token_id) + text = super()._decode(*args, **kwargs) if normalize: @@ -485,3 +490,30 @@ def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time return_language=return_language, time_precision=time_precision, ) + + # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_prompt_ids + def get_prompt_ids(self, text: str, return_tensors="np"): + """Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`].""" + batch_encoding = self("<|startofprev|>", text.strip(), add_prefix_space=True, add_special_tokens=False) + + # Check for special tokens + prompt_text_ids = batch_encoding["input_ids"][1:] + special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None) + if special_token_id is not None: + token = self.convert_ids_to_tokens(special_token_id) + raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.") + + batch_encoding.convert_to_tensors(tensor_type=return_tensors) + return batch_encoding["input_ids"] + + @staticmethod + # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._strip_prompt + def _strip_prompt(token_ids: List[int], prompt_token_id: int, decoder_start_token_id: int): + has_prompt = isinstance(token_ids, list) and token_ids and token_ids[0] == prompt_token_id + if has_prompt: + if decoder_start_token_id in token_ids: + return token_ids[token_ids.index(decoder_start_token_id) :] + else: + return [] + + return token_ids
diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 883a2021b9bb..98bbbb3214a7 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -1013,6 +1013,48 @@ def test_mask_time_prob(self): encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) + def test_generate_with_prompt_ids_and_task_and_language(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = WhisperForConditionalGeneration(config).eval().to(torch_device) + input_features = input_dict["input_features"] + prompt_ids = np.arange(5) + language = "<|de|>" + task = "translate" + lang_id = 6 + task_id = 7 + model.generation_config.__setattr__("lang_to_id", {language: lang_id}) + model.generation_config.__setattr__("task_to_id", {task: task_id}) + + output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) + + expected_output_start = [ + *prompt_ids.tolist(), + model.generation_config.decoder_start_token_id, + lang_id, + task_id, + ] + for row in output.tolist(): + self.assertListEqual(row[: len(expected_output_start)], expected_output_start) + + def test_generate_with_prompt_ids_and_forced_decoder_ids(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = WhisperForConditionalGeneration(config).eval().to(torch_device) + input_features = input_dict["input_features"] + prompt_ids = np.asarray(range(5)) + forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] + + output = model.generate( + input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids + ) + + expected_output_start = [ + *prompt_ids.tolist(), + model.generation_config.decoder_start_token_id, + *[token for _rank, token in forced_decoder_ids], + ] + for row in output.tolist(): + self.assertListEqual(row[: len(expected_output_start)], expected_output_start) + @require_torch @require_torchaudio @@ -1429,6 +1471,60 @@ def test_tiny_specaugment_librispeech(self): # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) + @slow + def test_generate_with_prompt_ids(self): + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") + model.to(torch_device) + input_speech = self._load_datasamples(4)[-1:] + input_features = processor(input_speech, return_tensors="pt").input_features + + output_without_prompt = model.generate(input_features) + prompt_ids = processor.get_prompt_ids("Leighton") + output_with_prompt = model.generate(input_features, prompt_ids=prompt_ids) + + expected_without_prompt = "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" + expected_with_prompt = "<|startofprev|> Leighton<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Leighton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" + self.assertEqual(processor.decode(output_without_prompt[0]), expected_without_prompt) + self.assertEqual(processor.decode(output_with_prompt[0]), expected_with_prompt) + + @slow + def test_generate_with_prompt_ids_and_forced_decoder_ids(self): + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") + model.to(torch_device) + input_speech = self._load_datasamples(1) + input_features = processor(input_speech, return_tensors="pt").input_features + task = "translate" + language = "de" + expected_tokens = [f"<|{task}|>", f"<|{language}|>"] + prompt = "test prompt" + prompt_ids = processor.get_prompt_ids(prompt) + + output = model.generate(input_features, task=task, language=language, prompt_ids=prompt_ids) + text = processor.decode(output[0]) + + self.assertTrue(prompt in text) + self.assertTrue(all([token in text for token in expected_tokens])) + + @slow + def test_generate_with_prompt_ids_and_no_non_prompt_forced_decoder_ids(self): + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model.to(torch_device) + input_speech = self._load_datasamples(1) + input_features = processor(input_speech, return_tensors="pt").input_features + prompt = "test prompt" + prompt_ids = processor.get_prompt_ids(prompt) + + model.generation_config.forced_decoder_ids = None + model.config.forced_decoder_ids = None + + output = model.generate(input_features, prompt_ids=prompt_ids, return_timestamps=True) + text = processor.decode(output[0]) + + self.assertTrue(prompt in text) + def prepare_whisper_encoder_inputs_dict(config, input_features, head_mask=None): if head_mask is None: diff --git a/tests/models/whisper/test_processor_whisper.py b/tests/models/whisper/test_processor_whisper.py index b844d433ed33..e96f4260e94c 100644 --- a/tests/models/whisper/test_processor_whisper.py +++ b/tests/models/whisper/test_processor_whisper.py @@ -16,6 +16,8 @@ import tempfile import unittest +import pytest + from transformers import WhisperTokenizer, is_speech_available from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio @@ -146,3 +148,32 @@ def test_get_decoder_prompt_ids(self): expected_ids = [TRANSCRIBE, NOTIMESTAMPS] self.assertListEqual([ids[-1] for ids in forced_decoder_ids], expected_ids) + + def test_get_prompt_ids(self): + processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + prompt_ids = processor.get_prompt_ids("Mr. Quilter") + decoded_prompt = processor.tokenizer.decode(prompt_ids) + + self.assertListEqual(prompt_ids.tolist(), [50360, 1770, 13, 2264, 346, 353]) + self.assertEqual(decoded_prompt, "<|startofprev|> Mr. Quilter") + + def test_empty_get_prompt_ids(self): + processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + prompt_ids = processor.get_prompt_ids("") + decoded_prompt = processor.tokenizer.decode(prompt_ids) + + self.assertListEqual(prompt_ids.tolist(), [50360, 220]) + self.assertEqual(decoded_prompt, "<|startofprev|> ") + + def test_get_prompt_ids_with_special_tokens(self): + processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + + def _test_prompt_error_raised_helper(prompt, special_token): + with pytest.raises(ValueError) as excinfo: + processor.get_prompt_ids(prompt) + expected = f"Encountered text in the prompt corresponding to disallowed special token: {special_token}." + self.assertEqual(expected, str(excinfo.value)) + + _test_prompt_error_raised_helper("<|startofprev|> test", "<|startofprev|>") + _test_prompt_error_raised_helper("test <|notimestamps|>", "<|notimestamps|>") + _test_prompt_error_raised_helper("test <|zh|> test <|transcribe|>", "<|zh|>") diff --git a/tests/models/whisper/test_tokenization_whisper.py b/tests/models/whisper/test_tokenization_whisper.py index 9ceef149fab9..5022d29b730e 100644 --- a/tests/models/whisper/test_tokenization_whisper.py +++ b/tests/models/whisper/test_tokenization_whisper.py @@ -194,6 +194,25 @@ def test_find_longest_common_subsequence(self): merge = _find_longest_common_sequence([seq1, seq2, seq3]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6, 7, 8]) + def test_skip_special_tokens_skips_prompt_ids(self): + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer() + # fmt: off + encoded_input = [ + 50361, 2221, 13, 2326, 388, 391, 50258, 50259, 50359, + 50363, 1282, 264, 2674, 9156, 295, 1523, 11, 2221, 13, + 2326, 388, 391, 13657, 365, 2681, 21296, 17711, 13, 50257, + ] + # fmt: on + expected_with_special_tokens = "<|startofprev|> Mr. Quilter<|startoftranscript|><|en|><|transcribe|><|notimestamps|> On the general principles of art, Mr. Quilter writes with equal lucidity.<|endoftext|>" + expected_without_special_tokens = " On the general principles of art, Mr. Quilter writes with equal lucidity." + self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) + self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens) + self.assertEqual(rust_tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) + self.assertEqual( + rust_tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens + ) + class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase): checkpoint_name = "openai/whisper-small.en"
Whisper Prompting ### Feature request Add prompting for the Whisper model to control the style/formatting of the generated text. ### Motivation During training, Whisper can be fed a "previous context window" to condition on longer passages of text. The original OpenAI Whisper implementation provides the user with the option of passing an [`initial_prompt`](https://github.com/openai/whisper/blob/6dea21fd7f7253bfe450f1e2512a0fe47ee2d258/whisper/transcribe.py#L96) to the model. This prompt is replaces the "previous context window" during inference. By passing the prompt as the "previous context window", the Whisper model conditions its generation on whatever text is passed as the prompt. This allows the user to control aspects of the generation, such as spellings of named entities and punctuation formatting (see https://github.com/openai/whisper/discussions/963#discussioncomment-4987057). This is possibly a cheaper way of adapting the Whisper model to specific decoding constraints than fine-tuning. This notebook demonstrates prompting with the initial codebase, and explains how this can be achieved for HF's Whisper: https://colab.research.google.com/drive/14FSeaoRvgs5arOTfiMQBnQ5NaLyma7Tq?usp=sharing The proposed API for prompting would look something as follows: 1. Encode prompt text to prompt token ids (`processor.get_prompt_ids`) - this method is a wrapper around `processor.tokenizer.__call__` that **doesn't** add the special token ids: ```python prompt = "IR, Newswire" prompt_ids = processor.get_prompt_ids(prompt) ``` 2. Pass the input audio and prompt token ids to the `.generate` method to get the predicted ids: ```python pred_ids = model.generate(input_features, prompt_ids=prompt_ids) ``` 3. Decode the predicted ids and 'slice' off the prompt (we can do this by passing the `prompt_ids`): ```python pred_str = processor.batch_decode(pred_ids, prompt_ids=prompt_ids) ``` => We would need to wrap all of this `forced_decoder_ids` logic into the generate method and update the processor/tokenizer accordingly. ### Your contribution Happy to guide the integration and review any PRs!
cc @hollance Hello, I'd like to pick up this issue!
1,680,278,096,000
[]
Feature Request
[ "src/transformers/models/whisper/modeling_whisper.py:WhisperForConditionalGeneration.generate", "src/transformers/models/whisper/tokenization_whisper.py:WhisperTokenizer._decode", "src/transformers/models/whisper/tokenization_whisper_fast.py:WhisperTokenizerFast._decode" ]
[ "src/transformers/models/whisper/processing_whisper.py:WhisperProcessor.get_prompt_ids", "src/transformers/models/whisper/tokenization_whisper.py:WhisperTokenizer.get_prompt_ids", "src/transformers/models/whisper/tokenization_whisper.py:WhisperTokenizer._strip_prompt", "src/transformers/models/whisper/tokenization_whisper_fast.py:WhisperTokenizerFast.get_prompt_ids", "src/transformers/models/whisper/tokenization_whisper_fast.py:WhisperTokenizerFast._strip_prompt" ]
3
scikit-learn/scikit-learn
scikit-learn__scikit-learn-24145
55af30d981ea2f72346ff93602f0b3b740cfe8d6
"diff --git a/doc/whats_new/v1.3.rst b/doc/whats_new/v1.3.rst\nindex 9cab0db995c5d..ec1301844b877 10(...TRUNCATED)
"diff --git a/sklearn/preprocessing/tests/test_polynomial.py b/sklearn/preprocessing/tests/test_poly(...TRUNCATED)
"Add sparse matrix output to SplineTransformer\n### Describe the workflow you want to enable\n\nAs B(...TRUNCATED)
1,659,969,522,000
[ "module:preprocessing" ]
Feature Request
["sklearn/preprocessing/_polynomial.py:SplineTransformer.__init__","sklearn/preprocessing/_polynomia(...TRUNCATED)
[]
3
End of preview. Expand in Data Studio

LOC-BENCH: A Benchmark for Code Localization

LOC-BENCH is a dataset specifically designed for evaluating code localization methods in software repositories. LOC-BENCH provides a diverse set of issues, including bug reports, feature requests, security vulnerabilities, and performance optimizations.

To reproduce the results reported in the paper, please refer to czlll/Loc-Bench_V0.

Loc-Bench is a refined benchmark for evaluating code localization methods. Compared to the V0, it improves data quality by filtering out examples that do not involve any function-level code modifications. We recommend using this dataset for a more accurate and reliable evaluation of code localization performance.

The table below shows the distribution of categories in the dataset.

category count
Bug Report 275
Feature Request 216
Performance Issue 140
Security Vulnerability 29

Code: https://github.com/gersteinlab/LocAgent

πŸ”§ How to Use

You can easily load LOC-BENCH using Hugging Face's datasets library:

from datasets import load_dataset

dataset = load_dataset("czlll/Loc-Bench_V2", split="test")

πŸ“„ Citation

If you use LOC-BENCH in your research, please cite our paper:

@article{chen2025locagent,
title={LocAgent: Graph-Guided LLM Agents for Code Localization},
author={Chen, Zhaoling and Tang,Xiangru and Deng,Gangda and Wu,Fang and Wu,Jialong and Jiang,Zhiwei and Prasanna,Viktor and Cohan,Arman and Wang,Xingyao},
journal={arXiv preprint arXiv:2503.09089},
year={2025}
}
Downloads last month
1,259