commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 10
2.94k
| new_contents
stringlengths 21
3.18k
| subject
stringlengths 16
444
| message
stringlengths 17
2.63k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43k
| ndiff
stringlengths 52
3.32k
| instruction
stringlengths 16
444
| content
stringlengths 133
4.32k
| fuzzy_diff
stringlengths 16
3.18k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ffd917c5ace8e815b185495aec17cf47b0a7648a | storage_service/administration/tests/test_languages.py | storage_service/administration/tests/test_languages.py | from django.contrib.auth.models import User
from django.test import TestCase, override_settings
class TestLanguageSwitching(TestCase):
@classmethod
def setUpClass(cls):
User.objects.create_user(
username="admin", password="admin", email="admin@example.com"
)
super(TestLanguageSwitching, cls).setUpClass()
def setUp(self):
self.client.login(username="admin", password="admin")
def test_displays_language_form(self):
self.client.get("/administration/language/")
self.assertTemplateUsed("language_form.html")
@override_settings(LANGUAGE_CODE="es")
def test_selects_correct_language_on_form(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="es-es")
def test_falls_back_to_generic_language(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="en-us")
def test_switch_language(self):
response = self.client.post(
"/i18n/setlang/",
{"language": "fr", "next": "/administration/language/"},
follow=True,
)
assert response.context["language_selection"] == "fr"
| from django.contrib.auth.models import User
from django.test import TestCase, override_settings
class TestLanguageSwitching(TestCase):
@classmethod
def setUpClass(cls):
super(TestLanguageSwitching, cls).setUpClass()
User.objects.create_user(
username="admin", password="admin", email="admin@example.com"
)
def setUp(self):
self.client.login(username="admin", password="admin")
def test_displays_language_form(self):
self.client.get("/administration/language/")
self.assertTemplateUsed("language_form.html")
@override_settings(LANGUAGE_CODE="es")
def test_selects_correct_language_on_form(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="es-es")
def test_falls_back_to_generic_language(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="en-us")
def test_switch_language(self):
response = self.client.post(
"/i18n/setlang/",
{"language": "fr", "next": "/administration/language/"},
follow=True,
)
assert response.context["language_selection"] == "fr"
| Fix integrity error reusing db in tests | Fix integrity error reusing db in tests
Base `setUpClass` needs to be called first so the transaction is initialized
before we mutate the data.
This solves a conflic raised when using `--reuse-db`.
| Python | agpl-3.0 | artefactual/archivematica-storage-service,artefactual/archivematica-storage-service,artefactual/archivematica-storage-service,artefactual/archivematica-storage-service | from django.contrib.auth.models import User
from django.test import TestCase, override_settings
class TestLanguageSwitching(TestCase):
@classmethod
def setUpClass(cls):
+ super(TestLanguageSwitching, cls).setUpClass()
User.objects.create_user(
username="admin", password="admin", email="admin@example.com"
)
- super(TestLanguageSwitching, cls).setUpClass()
def setUp(self):
self.client.login(username="admin", password="admin")
def test_displays_language_form(self):
self.client.get("/administration/language/")
self.assertTemplateUsed("language_form.html")
@override_settings(LANGUAGE_CODE="es")
def test_selects_correct_language_on_form(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="es-es")
def test_falls_back_to_generic_language(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="en-us")
def test_switch_language(self):
response = self.client.post(
"/i18n/setlang/",
{"language": "fr", "next": "/administration/language/"},
follow=True,
)
assert response.context["language_selection"] == "fr"
| Fix integrity error reusing db in tests | ## Code Before:
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
class TestLanguageSwitching(TestCase):
@classmethod
def setUpClass(cls):
User.objects.create_user(
username="admin", password="admin", email="admin@example.com"
)
super(TestLanguageSwitching, cls).setUpClass()
def setUp(self):
self.client.login(username="admin", password="admin")
def test_displays_language_form(self):
self.client.get("/administration/language/")
self.assertTemplateUsed("language_form.html")
@override_settings(LANGUAGE_CODE="es")
def test_selects_correct_language_on_form(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="es-es")
def test_falls_back_to_generic_language(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="en-us")
def test_switch_language(self):
response = self.client.post(
"/i18n/setlang/",
{"language": "fr", "next": "/administration/language/"},
follow=True,
)
assert response.context["language_selection"] == "fr"
## Instruction:
Fix integrity error reusing db in tests
## Code After:
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
class TestLanguageSwitching(TestCase):
@classmethod
def setUpClass(cls):
super(TestLanguageSwitching, cls).setUpClass()
User.objects.create_user(
username="admin", password="admin", email="admin@example.com"
)
def setUp(self):
self.client.login(username="admin", password="admin")
def test_displays_language_form(self):
self.client.get("/administration/language/")
self.assertTemplateUsed("language_form.html")
@override_settings(LANGUAGE_CODE="es")
def test_selects_correct_language_on_form(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="es-es")
def test_falls_back_to_generic_language(self):
response = self.client.get("/administration/language/")
assert response.context["language_selection"] == "es"
@override_settings(LANGUAGE_CODE="en-us")
def test_switch_language(self):
response = self.client.post(
"/i18n/setlang/",
{"language": "fr", "next": "/administration/language/"},
follow=True,
)
assert response.context["language_selection"] == "fr"
| // ... existing code ...
def setUpClass(cls):
super(TestLanguageSwitching, cls).setUpClass()
User.objects.create_user(
// ... modified code ...
)
// ... rest of the code ... |
50488976619795621b5eb6dd3e427f6f82188426 | peanut/template.py | peanut/template.py |
"""Template"""
import peanut
import jinja2
from os import path
from jinja2 import FileSystemLoader
from jinja2.exceptions import TemplateNotFound
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = SmartLoader(path)
self.env = jinja2.Environment(
loader=loader,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context"""
template = self.env.get_template(name)
return template.render(**context)
|
"""Template"""
import peanut
import jinja2
from os import path
from jinja2 import FileSystemLoader
from jinja2.exceptions import TemplateNotFound
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = SmartLoader(path)
self.env = jinja2.Environment(
loader=loader,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def update_context(self, **kwargs):
"""Update global context
"""
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context"""
template = self.env.get_template(name)
return template.render(**context)
| Add an interface to update global context | Add an interface to update global context
| Python | mit | zqqf16/Peanut,zqqf16/Peanut,zqqf16/Peanut |
"""Template"""
import peanut
import jinja2
from os import path
from jinja2 import FileSystemLoader
from jinja2.exceptions import TemplateNotFound
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = SmartLoader(path)
self.env = jinja2.Environment(
loader=loader,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
+ def update_context(self, **kwargs):
+ """Update global context
+ """
+ self.env.globals.update(kwargs)
+
def render(self, name, **context):
"""Render template with name and context"""
template = self.env.get_template(name)
return template.render(**context)
| Add an interface to update global context | ## Code Before:
"""Template"""
import peanut
import jinja2
from os import path
from jinja2 import FileSystemLoader
from jinja2.exceptions import TemplateNotFound
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = SmartLoader(path)
self.env = jinja2.Environment(
loader=loader,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context"""
template = self.env.get_template(name)
return template.render(**context)
## Instruction:
Add an interface to update global context
## Code After:
"""Template"""
import peanut
import jinja2
from os import path
from jinja2 import FileSystemLoader
from jinja2.exceptions import TemplateNotFound
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = SmartLoader(path)
self.env = jinja2.Environment(
loader=loader,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def update_context(self, **kwargs):
"""Update global context
"""
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context"""
template = self.env.get_template(name)
return template.render(**context)
| # ... existing code ...
def update_context(self, **kwargs):
"""Update global context
"""
self.env.globals.update(kwargs)
def render(self, name, **context):
# ... rest of the code ... |
f1e5e2cc7fd35e0446f105d619dc01d3ba837865 | byceps/blueprints/admin/party/forms.py | byceps/blueprints/admin/party/forms.py |
from wtforms import BooleanField, DateTimeField, IntegerField, StringField
from wtforms.validators import InputRequired, Length, Optional
from ....util.l10n import LocalizedForm
class UpdateForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
starts_at = DateTimeField('Beginn', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
ends_at = DateTimeField('Ende', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
max_ticket_quantity = IntegerField('Maximale Anzahl Tickets', validators=[Optional()])
shop_id = StringField('Shop-ID', validators=[Optional()])
archived = BooleanField('archiviert')
class CreateForm(UpdateForm):
id = StringField('ID', validators=[Length(min=1, max=40)])
|
from wtforms import BooleanField, DateTimeField, IntegerField, StringField
from wtforms.validators import InputRequired, Length, Optional
from ....util.l10n import LocalizedForm
class _BaseForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
starts_at = DateTimeField('Beginn', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
ends_at = DateTimeField('Ende', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
max_ticket_quantity = IntegerField('Maximale Anzahl Tickets', validators=[Optional()])
shop_id = StringField('Shop-ID', validators=[Optional()])
class CreateForm(_BaseForm):
id = StringField('ID', validators=[Length(min=1, max=40)])
class UpdateForm(_BaseForm):
archived = BooleanField('archiviert')
| Introduce base party form, limit `archived` flag to update form | Introduce base party form, limit `archived` flag to update form
| Python | bsd-3-clause | m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps |
from wtforms import BooleanField, DateTimeField, IntegerField, StringField
from wtforms.validators import InputRequired, Length, Optional
from ....util.l10n import LocalizedForm
- class UpdateForm(LocalizedForm):
+ class _BaseForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
starts_at = DateTimeField('Beginn', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
ends_at = DateTimeField('Ende', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
max_ticket_quantity = IntegerField('Maximale Anzahl Tickets', validators=[Optional()])
shop_id = StringField('Shop-ID', validators=[Optional()])
+
+
+ class CreateForm(_BaseForm):
+ id = StringField('ID', validators=[Length(min=1, max=40)])
+
+
+ class UpdateForm(_BaseForm):
archived = BooleanField('archiviert')
-
- class CreateForm(UpdateForm):
- id = StringField('ID', validators=[Length(min=1, max=40)])
- | Introduce base party form, limit `archived` flag to update form | ## Code Before:
from wtforms import BooleanField, DateTimeField, IntegerField, StringField
from wtforms.validators import InputRequired, Length, Optional
from ....util.l10n import LocalizedForm
class UpdateForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
starts_at = DateTimeField('Beginn', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
ends_at = DateTimeField('Ende', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
max_ticket_quantity = IntegerField('Maximale Anzahl Tickets', validators=[Optional()])
shop_id = StringField('Shop-ID', validators=[Optional()])
archived = BooleanField('archiviert')
class CreateForm(UpdateForm):
id = StringField('ID', validators=[Length(min=1, max=40)])
## Instruction:
Introduce base party form, limit `archived` flag to update form
## Code After:
from wtforms import BooleanField, DateTimeField, IntegerField, StringField
from wtforms.validators import InputRequired, Length, Optional
from ....util.l10n import LocalizedForm
class _BaseForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
starts_at = DateTimeField('Beginn', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
ends_at = DateTimeField('Ende', format='%d.%m.%Y %H:%M', validators=[InputRequired()])
max_ticket_quantity = IntegerField('Maximale Anzahl Tickets', validators=[Optional()])
shop_id = StringField('Shop-ID', validators=[Optional()])
class CreateForm(_BaseForm):
id = StringField('ID', validators=[Length(min=1, max=40)])
class UpdateForm(_BaseForm):
archived = BooleanField('archiviert')
| // ... existing code ...
class _BaseForm(LocalizedForm):
title = StringField('Titel', validators=[Length(min=1, max=40)])
// ... modified code ...
shop_id = StringField('Shop-ID', validators=[Optional()])
...
class CreateForm(_BaseForm):
id = StringField('ID', validators=[Length(min=1, max=40)])
class UpdateForm(_BaseForm):
archived = BooleanField('archiviert')
// ... rest of the code ... |
8ece892f01c4b32f7fa0a34c88bfdf8ea969e5ce | kobo/apps/__init__.py | kobo/apps/__init__.py | import kombu.exceptions
from django.apps import AppConfig
from django.core.checks import register, Tags
from kpi.utils.two_database_configuration_checker import \
TwoDatabaseConfigurationChecker
class KpiConfig(AppConfig):
name = 'kpi'
def ready(self, *args, **kwargs):
# Once it's okay to read from the database, apply the user-desired
# autoscaling configuration for Celery workers
from kobo.celery import update_concurrency_from_constance
try:
update_concurrency_from_constance.delay()
except kombu.exceptions.OperationalError as e:
# It's normal for Django to start without access to a message
# broker, e.g. while running `./manage.py collectstatic`
# during a Docker image build
pass
return super().ready(*args, **kwargs)
register(TwoDatabaseConfigurationChecker().as_check(), Tags.database)
| import kombu.exceptions
from django.apps import AppConfig
from django.core.checks import register, Tags
from kpi.utils.two_database_configuration_checker import \
TwoDatabaseConfigurationChecker
class KpiConfig(AppConfig):
name = 'kpi'
def ready(self, *args, **kwargs):
# Once it's okay to read from the database, apply the user-desired
# autoscaling configuration for Celery workers
from kobo.celery import update_concurrency_from_constance
try:
# Push this onto the task queue with `delay()` instead of calling
# it directly because a direct call in the absence of any Celery
# workers hangs indefinitely
update_concurrency_from_constance.delay()
except kombu.exceptions.OperationalError as e:
# It's normal for Django to start without access to a message
# broker, e.g. while running `./manage.py collectstatic`
# during a Docker image build
pass
return super().ready(*args, **kwargs)
register(TwoDatabaseConfigurationChecker().as_check(), Tags.database)
| Add explanatory comment for odd use of `delay()` | Add explanatory comment for odd use of `delay()`
| Python | agpl-3.0 | kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi | import kombu.exceptions
from django.apps import AppConfig
from django.core.checks import register, Tags
from kpi.utils.two_database_configuration_checker import \
TwoDatabaseConfigurationChecker
class KpiConfig(AppConfig):
name = 'kpi'
def ready(self, *args, **kwargs):
# Once it's okay to read from the database, apply the user-desired
# autoscaling configuration for Celery workers
from kobo.celery import update_concurrency_from_constance
try:
+ # Push this onto the task queue with `delay()` instead of calling
+ # it directly because a direct call in the absence of any Celery
+ # workers hangs indefinitely
update_concurrency_from_constance.delay()
except kombu.exceptions.OperationalError as e:
# It's normal for Django to start without access to a message
# broker, e.g. while running `./manage.py collectstatic`
# during a Docker image build
pass
return super().ready(*args, **kwargs)
register(TwoDatabaseConfigurationChecker().as_check(), Tags.database)
| Add explanatory comment for odd use of `delay()` | ## Code Before:
import kombu.exceptions
from django.apps import AppConfig
from django.core.checks import register, Tags
from kpi.utils.two_database_configuration_checker import \
TwoDatabaseConfigurationChecker
class KpiConfig(AppConfig):
name = 'kpi'
def ready(self, *args, **kwargs):
# Once it's okay to read from the database, apply the user-desired
# autoscaling configuration for Celery workers
from kobo.celery import update_concurrency_from_constance
try:
update_concurrency_from_constance.delay()
except kombu.exceptions.OperationalError as e:
# It's normal for Django to start without access to a message
# broker, e.g. while running `./manage.py collectstatic`
# during a Docker image build
pass
return super().ready(*args, **kwargs)
register(TwoDatabaseConfigurationChecker().as_check(), Tags.database)
## Instruction:
Add explanatory comment for odd use of `delay()`
## Code After:
import kombu.exceptions
from django.apps import AppConfig
from django.core.checks import register, Tags
from kpi.utils.two_database_configuration_checker import \
TwoDatabaseConfigurationChecker
class KpiConfig(AppConfig):
name = 'kpi'
def ready(self, *args, **kwargs):
# Once it's okay to read from the database, apply the user-desired
# autoscaling configuration for Celery workers
from kobo.celery import update_concurrency_from_constance
try:
# Push this onto the task queue with `delay()` instead of calling
# it directly because a direct call in the absence of any Celery
# workers hangs indefinitely
update_concurrency_from_constance.delay()
except kombu.exceptions.OperationalError as e:
# It's normal for Django to start without access to a message
# broker, e.g. while running `./manage.py collectstatic`
# during a Docker image build
pass
return super().ready(*args, **kwargs)
register(TwoDatabaseConfigurationChecker().as_check(), Tags.database)
| // ... existing code ...
try:
# Push this onto the task queue with `delay()` instead of calling
# it directly because a direct call in the absence of any Celery
# workers hangs indefinitely
update_concurrency_from_constance.delay()
// ... rest of the code ... |
73c616cc9e3d5351e0f4e41d60ff03bd58b85967 | scrapi/harvesters/scholarsbank.py | scrapi/harvesters/scholarsbank.py |
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.helpers import updated_schema
class ScholarsbankHarvester(OAIHarvester):
short_name = 'scholarsbank'
long_name = 'Scholars Bank University of Oregon'
url = 'http://scholarsbank.uoregon.edu'
timezone_granularity = True
base_url = 'http://scholarsbank.uoregon.edu/oai/request'
property_list = [
'type', 'source', 'format', 'relation',
'date', 'description', 'setSpec', 'identifier'
]
|
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema
def second_result(des):
return des[1] if len(des) > 1 else des[0] if des else ''
class ScholarsbankHarvester(OAIHarvester):
short_name = 'scholarsbank'
long_name = 'Scholars Bank University of Oregon'
url = 'http://scholarsbank.uoregon.edu'
timezone_granularity = True
base_url = 'http://scholarsbank.uoregon.edu/oai/request'
property_list = [
'type', 'source', 'format', 'relation',
'date', 'description', 'setSpec', 'identifier'
]
schema = updated_schema(OAISCHEMA, {
'description': ('//dc:description/node()', second_result)
})
| Update schoalrsbank to grab second description if there are two | Update schoalrsbank to grab second description if there are two
| Python | apache-2.0 | fabianvf/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,icereval/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi,ostwald/scrapi,felliott/scrapi,mehanig/scrapi,erinspace/scrapi,mehanig/scrapi,fabianvf/scrapi |
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
+ from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema
+
+
+ def second_result(des):
+ return des[1] if len(des) > 1 else des[0] if des else ''
class ScholarsbankHarvester(OAIHarvester):
short_name = 'scholarsbank'
long_name = 'Scholars Bank University of Oregon'
url = 'http://scholarsbank.uoregon.edu'
timezone_granularity = True
base_url = 'http://scholarsbank.uoregon.edu/oai/request'
property_list = [
'type', 'source', 'format', 'relation',
'date', 'description', 'setSpec', 'identifier'
]
+ schema = updated_schema(OAISCHEMA, {
+ 'description': ('//dc:description/node()', second_result)
+ })
+ | Update schoalrsbank to grab second description if there are two | ## Code Before:
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.helpers import updated_schema
class ScholarsbankHarvester(OAIHarvester):
short_name = 'scholarsbank'
long_name = 'Scholars Bank University of Oregon'
url = 'http://scholarsbank.uoregon.edu'
timezone_granularity = True
base_url = 'http://scholarsbank.uoregon.edu/oai/request'
property_list = [
'type', 'source', 'format', 'relation',
'date', 'description', 'setSpec', 'identifier'
]
## Instruction:
Update schoalrsbank to grab second description if there are two
## Code After:
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema
def second_result(des):
return des[1] if len(des) > 1 else des[0] if des else ''
class ScholarsbankHarvester(OAIHarvester):
short_name = 'scholarsbank'
long_name = 'Scholars Bank University of Oregon'
url = 'http://scholarsbank.uoregon.edu'
timezone_granularity = True
base_url = 'http://scholarsbank.uoregon.edu/oai/request'
property_list = [
'type', 'source', 'format', 'relation',
'date', 'description', 'setSpec', 'identifier'
]
schema = updated_schema(OAISCHEMA, {
'description': ('//dc:description/node()', second_result)
})
| # ... existing code ...
from scrapi.base import OAIHarvester
from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema
def second_result(des):
return des[1] if len(des) > 1 else des[0] if des else ''
# ... modified code ...
]
schema = updated_schema(OAISCHEMA, {
'description': ('//dc:description/node()', second_result)
})
# ... rest of the code ... |
68a7fd8a444a8c568d716db11849f58ad7a9dee5 | django_pesapal/views.py | django_pesapal/views.py |
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic.base import RedirectView
from django.db.models.loading import get_model
from .models import Transaction
import conf
class TransactionCompletedView(RedirectView):
permanent = False
url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
def get(self, request, *args, **kwargs):
'''
After Pesapal processes the transaction this will save the transaction and then redirect
to whatever reidrect URL in your settings as `PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL`.
For further processing just create a `post_save` signal on the `Transaction` model.
'''
transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
merchant_reference = request.GET.get('pesapal_merchant_reference', '')
if transaction_id and merchant_reference:
transaction, created = Transaction.objects.get_or_create(merchant_reference=merchant_reference, pesapal_transaction=transaction_id)
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
|
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.generic.base import RedirectView
from django.core.urlresolvers import NoReverseMatch
from .models import Transaction
import conf
class TransactionCompletedView(RedirectView):
permanent = False
url = None
def get(self, request, *args, **kwargs):
'''
After Pesapal processes the transaction this will save the transaction and then redirect
to whatever reidrect URL in your settings as `PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL`.
For further processing just create a `post_save` signal on the `Transaction` model.
'''
self.transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
self.merchant_reference = request.GET.get('pesapal_merchant_reference', '')
if self.transaction_id and self.merchant_reference:
transaction, created = Transaction.objects.get_or_create(merchant_reference=self.merchant_reference,
pesapal_transaction=self.transaction_id)
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
try:
url = reverse(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
except NoReverseMatch:
url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL,
kwargs={'merchant_reference': self.merchant_reference})
return url
| Add support for further processing of the payment while maintaining compatibility | Add support for further processing of the payment while maintaining compatibility
| Python | bsd-3-clause | odero/django-pesapal,odero/django-pesapal |
- from django.core.urlresolvers import reverse_lazy
+ from django.core.urlresolvers import reverse_lazy, reverse
- from django.contrib.auth.decorators import login_required
from django.views.generic.base import RedirectView
- from django.db.models.loading import get_model
+ from django.core.urlresolvers import NoReverseMatch
from .models import Transaction
import conf
class TransactionCompletedView(RedirectView):
permanent = False
- url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
+ url = None
def get(self, request, *args, **kwargs):
'''
After Pesapal processes the transaction this will save the transaction and then redirect
to whatever reidrect URL in your settings as `PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL`.
For further processing just create a `post_save` signal on the `Transaction` model.
'''
- transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
+ self.transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
- merchant_reference = request.GET.get('pesapal_merchant_reference', '')
+ self.merchant_reference = request.GET.get('pesapal_merchant_reference', '')
- if transaction_id and merchant_reference:
+ if self.transaction_id and self.merchant_reference:
- transaction, created = Transaction.objects.get_or_create(merchant_reference=merchant_reference, pesapal_transaction=transaction_id)
+ transaction, created = Transaction.objects.get_or_create(merchant_reference=self.merchant_reference,
+ pesapal_transaction=self.transaction_id)
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
+ def get_redirect_url(self, *args, **kwargs):
+
+ try:
+ url = reverse(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
+ except NoReverseMatch:
+ url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL,
+ kwargs={'merchant_reference': self.merchant_reference})
+ return url
+ | Add support for further processing of the payment while maintaining compatibility | ## Code Before:
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic.base import RedirectView
from django.db.models.loading import get_model
from .models import Transaction
import conf
class TransactionCompletedView(RedirectView):
permanent = False
url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
def get(self, request, *args, **kwargs):
'''
After Pesapal processes the transaction this will save the transaction and then redirect
to whatever reidrect URL in your settings as `PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL`.
For further processing just create a `post_save` signal on the `Transaction` model.
'''
transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
merchant_reference = request.GET.get('pesapal_merchant_reference', '')
if transaction_id and merchant_reference:
transaction, created = Transaction.objects.get_or_create(merchant_reference=merchant_reference, pesapal_transaction=transaction_id)
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
## Instruction:
Add support for further processing of the payment while maintaining compatibility
## Code After:
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.generic.base import RedirectView
from django.core.urlresolvers import NoReverseMatch
from .models import Transaction
import conf
class TransactionCompletedView(RedirectView):
permanent = False
url = None
def get(self, request, *args, **kwargs):
'''
After Pesapal processes the transaction this will save the transaction and then redirect
to whatever reidrect URL in your settings as `PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL`.
For further processing just create a `post_save` signal on the `Transaction` model.
'''
self.transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
self.merchant_reference = request.GET.get('pesapal_merchant_reference', '')
if self.transaction_id and self.merchant_reference:
transaction, created = Transaction.objects.get_or_create(merchant_reference=self.merchant_reference,
pesapal_transaction=self.transaction_id)
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
try:
url = reverse(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
except NoReverseMatch:
url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL,
kwargs={'merchant_reference': self.merchant_reference})
return url
| ...
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.generic.base import RedirectView
from django.core.urlresolvers import NoReverseMatch
...
permanent = False
url = None
...
'''
self.transaction_id = request.GET.get('pesapal_transaction_tracking_id', '')
self.merchant_reference = request.GET.get('pesapal_merchant_reference', '')
if self.transaction_id and self.merchant_reference:
transaction, created = Transaction.objects.get_or_create(merchant_reference=self.merchant_reference,
pesapal_transaction=self.transaction_id)
...
return super(TransactionCompletedView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
try:
url = reverse(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL)
except NoReverseMatch:
url = reverse_lazy(conf.PESAPAL_TRANSACTION_DEFAULT_REDIRECT_URL,
kwargs={'merchant_reference': self.merchant_reference})
return url
... |
c0596310d9281fc07d4db6e6fd2ed8433335edb9 | examples/build_examples.py | examples/build_examples.py |
import glob
import os
import platform
import subprocess
import sys
cx_path = sys.argv[1] if len(sys.argv) > 1 else "cx"
os.chdir(os.path.dirname(__file__))
for file in glob.glob("*.cx"):
if platform.system() == "Windows" and file == "tree.cx":
continue
extension = ".out" if platform.system() != "Windows" else ".exe"
output = os.path.splitext(file)[0] + extension
exit_status = subprocess.call([cx_path, file, "-o", output])
if exit_status != 0:
sys.exit(1)
print("All examples built successfully.")
|
import glob
import os
import platform
import subprocess
import sys
cx_path = sys.argv[1] if len(sys.argv) > 1 else "cx"
os.chdir(os.path.dirname(__file__))
for file in glob.glob("*.cx"):
if platform.system() == "Windows" and file == "tree.cx":
continue
extension = ".out" if platform.system() != "Windows" else ".exe"
output = os.path.splitext(file)[0] + extension
exit_status = subprocess.call([cx_path, file, "-o", output, "-Werror"])
if exit_status != 0:
sys.exit(1)
print("All examples built successfully.")
| Use -Werror for code examples | Use -Werror for code examples
| Python | mit | delta-lang/delta,delta-lang/delta,delta-lang/delta,delta-lang/delta |
import glob
import os
import platform
import subprocess
import sys
cx_path = sys.argv[1] if len(sys.argv) > 1 else "cx"
os.chdir(os.path.dirname(__file__))
for file in glob.glob("*.cx"):
if platform.system() == "Windows" and file == "tree.cx":
continue
extension = ".out" if platform.system() != "Windows" else ".exe"
output = os.path.splitext(file)[0] + extension
- exit_status = subprocess.call([cx_path, file, "-o", output])
+ exit_status = subprocess.call([cx_path, file, "-o", output, "-Werror"])
if exit_status != 0:
sys.exit(1)
print("All examples built successfully.")
| Use -Werror for code examples | ## Code Before:
import glob
import os
import platform
import subprocess
import sys
cx_path = sys.argv[1] if len(sys.argv) > 1 else "cx"
os.chdir(os.path.dirname(__file__))
for file in glob.glob("*.cx"):
if platform.system() == "Windows" and file == "tree.cx":
continue
extension = ".out" if platform.system() != "Windows" else ".exe"
output = os.path.splitext(file)[0] + extension
exit_status = subprocess.call([cx_path, file, "-o", output])
if exit_status != 0:
sys.exit(1)
print("All examples built successfully.")
## Instruction:
Use -Werror for code examples
## Code After:
import glob
import os
import platform
import subprocess
import sys
cx_path = sys.argv[1] if len(sys.argv) > 1 else "cx"
os.chdir(os.path.dirname(__file__))
for file in glob.glob("*.cx"):
if platform.system() == "Windows" and file == "tree.cx":
continue
extension = ".out" if platform.system() != "Windows" else ".exe"
output = os.path.splitext(file)[0] + extension
exit_status = subprocess.call([cx_path, file, "-o", output, "-Werror"])
if exit_status != 0:
sys.exit(1)
print("All examples built successfully.")
| // ... existing code ...
output = os.path.splitext(file)[0] + extension
exit_status = subprocess.call([cx_path, file, "-o", output, "-Werror"])
if exit_status != 0:
// ... rest of the code ... |
45c7e910f13a43427359801782eef7ce537d6f5f | delayed_assert/__init__.py | delayed_assert/__init__.py | from delayed_assert.delayed_assert import expect, assert_expectations | import sys
if sys.version_info > (3, 0): # Python 3 and above
from delayed_assert.delayed_assert import expect, assert_expectations
else: # for Python 2
from delayed_assert import expect, assert_expectations
| Support for python 2 and 3 | Support for python 2 and 3 | Python | unlicense | pr4bh4sh/python-delayed-assert | + import sys
+
+ if sys.version_info > (3, 0): # Python 3 and above
- from delayed_assert.delayed_assert import expect, assert_expectations
+ from delayed_assert.delayed_assert import expect, assert_expectations
+ else: # for Python 2
+ from delayed_assert import expect, assert_expectations
+ | Support for python 2 and 3 | ## Code Before:
from delayed_assert.delayed_assert import expect, assert_expectations
## Instruction:
Support for python 2 and 3
## Code After:
import sys
if sys.version_info > (3, 0): # Python 3 and above
from delayed_assert.delayed_assert import expect, assert_expectations
else: # for Python 2
from delayed_assert import expect, assert_expectations
| ...
import sys
if sys.version_info > (3, 0): # Python 3 and above
from delayed_assert.delayed_assert import expect, assert_expectations
else: # for Python 2
from delayed_assert import expect, assert_expectations
... |
fa4e6e849eaff2611a5d978c7f7727a16a8c301e | daedalus/attacks/sample_attack.py | daedalus/attacks/sample_attack.py | def attack(input={}, errors=[], results={}):
return {'errors': errors, 'results': results} | def attack(input={}):
return {'errors': errors, 'results': results}
| Remove extra parameters to "attack()" | Remove extra parameters to "attack()"
The `results` and `errors` structures aren't needed as input parameters.
All we need to ensure is that these are returned by `attack()`. | Python | mit | IEEE-NITK/Daedalus,IEEE-NITK/Daedalus,chinmaydd/NITK_IEEE_SaS,IEEE-NITK/Daedalus | - def attack(input={}, errors=[], results={}):
+ def attack(input={}):
return {'errors': errors, 'results': results}
+ | Remove extra parameters to "attack()" | ## Code Before:
def attack(input={}, errors=[], results={}):
return {'errors': errors, 'results': results}
## Instruction:
Remove extra parameters to "attack()"
## Code After:
def attack(input={}):
return {'errors': errors, 'results': results}
| // ... existing code ...
def attack(input={}):
return {'errors': errors, 'results': results}
// ... rest of the code ... |
09418ae8fa652a5f8d2d3b3058e4acc774cbcbe9 | genes/nginx/main.py | genes/nginx/main.py | from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
def main():
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
| from typing import Callable, Optional
from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
def main(config: Optional[Callable[[], None]]=None):
# Install nginx
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
# Then configure it
if config is not None:
config()
| Add config option for nginx | Add config option for nginx | Python | mit | hatchery/genepool,hatchery/Genepool2 | + from typing import Callable, Optional
from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
- def main():
+ def main(config: Optional[Callable[[], None]]=None):
+ # Install nginx
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
+
+ # Then configure it
+ if config is not None:
+ config()
| Add config option for nginx | ## Code Before:
from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
def main():
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
## Instruction:
Add config option for nginx
## Code After:
from typing import Callable, Optional
from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
def main(config: Optional[Callable[[], None]]=None):
# Install nginx
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
# Then configure it
if config is not None:
config()
| // ... existing code ...
from typing import Callable, Optional
from genes.apt import commands as apt
// ... modified code ...
def main(config: Optional[Callable[[], None]]=None):
# Install nginx
if is_ubuntu() or is_debian():
...
pass
# Then configure it
if config is not None:
config()
// ... rest of the code ... |
f253feac7a4c53bd17958b0c74adbec528ae2e17 | rethinkdb/setup-rethinkdb.py | rethinkdb/setup-rethinkdb.py | import rethinkdb as r
import argparse
parser = argparse.ArgumentParser(description='Set up RethinkDB locally')
args = parser.parse_args()
conn = r.connect()
r.db_create('muzhack').run(conn)
r.db('muzhack').table_create('users').run(conn)
r.db('muzhack').table_create('projects').run(conn)
r.db('muzhack').table_create('resetPasswordTokens').run(conn)
| import rethinkdb as r
import argparse
parser = argparse.ArgumentParser(description='Set up RethinkDB')
parser.add_argument('-H', '--host', default='localhost', help='Specify host')
args = parser.parse_args()
conn = r.connect(host=args.host)
r.db_create('muzhack').run(conn)
r.db('muzhack').table_create('users').run(conn)
r.db('muzhack').table_create('projects').run(conn)
r.db('muzhack').table_create('resetPasswordTokens').run(conn)
| Allow setting up rethinkdb remotely | Allow setting up rethinkdb remotely
| Python | mit | muzhack/musitechhub,muzhack/musitechhub,muzhack/musitechhub,muzhack/muzhack,muzhack/muzhack,muzhack/musitechhub,muzhack/muzhack,muzhack/muzhack | import rethinkdb as r
import argparse
- parser = argparse.ArgumentParser(description='Set up RethinkDB locally')
+ parser = argparse.ArgumentParser(description='Set up RethinkDB')
+ parser.add_argument('-H', '--host', default='localhost', help='Specify host')
args = parser.parse_args()
- conn = r.connect()
+ conn = r.connect(host=args.host)
r.db_create('muzhack').run(conn)
r.db('muzhack').table_create('users').run(conn)
r.db('muzhack').table_create('projects').run(conn)
r.db('muzhack').table_create('resetPasswordTokens').run(conn)
| Allow setting up rethinkdb remotely | ## Code Before:
import rethinkdb as r
import argparse
parser = argparse.ArgumentParser(description='Set up RethinkDB locally')
args = parser.parse_args()
conn = r.connect()
r.db_create('muzhack').run(conn)
r.db('muzhack').table_create('users').run(conn)
r.db('muzhack').table_create('projects').run(conn)
r.db('muzhack').table_create('resetPasswordTokens').run(conn)
## Instruction:
Allow setting up rethinkdb remotely
## Code After:
import rethinkdb as r
import argparse
parser = argparse.ArgumentParser(description='Set up RethinkDB')
parser.add_argument('-H', '--host', default='localhost', help='Specify host')
args = parser.parse_args()
conn = r.connect(host=args.host)
r.db_create('muzhack').run(conn)
r.db('muzhack').table_create('users').run(conn)
r.db('muzhack').table_create('projects').run(conn)
r.db('muzhack').table_create('resetPasswordTokens').run(conn)
| # ... existing code ...
parser = argparse.ArgumentParser(description='Set up RethinkDB')
parser.add_argument('-H', '--host', default='localhost', help='Specify host')
args = parser.parse_args()
# ... modified code ...
conn = r.connect(host=args.host)
r.db_create('muzhack').run(conn)
# ... rest of the code ... |
12f1024d559c300c7c04256362da78ec8d3a647b | data/models.py | data/models.py | from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
decay_feature = models.CharField(max_length=1000, null=True, blank=True)
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.exact_name
| import numpy
import ast
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
decay_feature = models.CharField(max_length=1000, null=True, blank=True)
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.exact_name
@classmethod
def get_data(cls):
data = DataPoint.objects.filter(band_gap__isnull=False,
exact_name__isnull=False,
decay_feature__isnull=False)
M = len(data)
HOMO = numpy.zeros((M, 1))
LUMO = numpy.zeros((M, 1))
GAP = numpy.zeros((M, 1))
vectors = []
for i, x in enumerate(data):
HOMO[i] = x.homo
LUMO[i] = x.lumo
GAP[i] = x.band_gap
vectors.append(ast.literal_eval(x.decay_feature))
FEATURE = numpy.matrix(vectors)
return FEATURE, HOMO, LUMO, GAP | Add method on DataPoint to get numpy matrices with all the ML data | Add method on DataPoint to get numpy matrices with all the ML data
| Python | mit | crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp | + import numpy
+ import ast
+
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
decay_feature = models.CharField(max_length=1000, null=True, blank=True)
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.exact_name
+ @classmethod
+ def get_data(cls):
+ data = DataPoint.objects.filter(band_gap__isnull=False,
+ exact_name__isnull=False,
+ decay_feature__isnull=False)
+ M = len(data)
+ HOMO = numpy.zeros((M, 1))
+ LUMO = numpy.zeros((M, 1))
+ GAP = numpy.zeros((M, 1))
+ vectors = []
+ for i, x in enumerate(data):
+ HOMO[i] = x.homo
+ LUMO[i] = x.lumo
+ GAP[i] = x.band_gap
+ vectors.append(ast.literal_eval(x.decay_feature))
+ FEATURE = numpy.matrix(vectors)
+ return FEATURE, HOMO, LUMO, GAP | Add method on DataPoint to get numpy matrices with all the ML data | ## Code Before:
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
decay_feature = models.CharField(max_length=1000, null=True, blank=True)
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.exact_name
## Instruction:
Add method on DataPoint to get numpy matrices with all the ML data
## Code After:
import numpy
import ast
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
decay_feature = models.CharField(max_length=1000, null=True, blank=True)
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.exact_name
@classmethod
def get_data(cls):
data = DataPoint.objects.filter(band_gap__isnull=False,
exact_name__isnull=False,
decay_feature__isnull=False)
M = len(data)
HOMO = numpy.zeros((M, 1))
LUMO = numpy.zeros((M, 1))
GAP = numpy.zeros((M, 1))
vectors = []
for i, x in enumerate(data):
HOMO[i] = x.homo
LUMO[i] = x.lumo
GAP[i] = x.band_gap
vectors.append(ast.literal_eval(x.decay_feature))
FEATURE = numpy.matrix(vectors)
return FEATURE, HOMO, LUMO, GAP | # ... existing code ...
import numpy
import ast
from django.db import models
# ... modified code ...
return self.exact_name
@classmethod
def get_data(cls):
data = DataPoint.objects.filter(band_gap__isnull=False,
exact_name__isnull=False,
decay_feature__isnull=False)
M = len(data)
HOMO = numpy.zeros((M, 1))
LUMO = numpy.zeros((M, 1))
GAP = numpy.zeros((M, 1))
vectors = []
for i, x in enumerate(data):
HOMO[i] = x.homo
LUMO[i] = x.lumo
GAP[i] = x.band_gap
vectors.append(ast.literal_eval(x.decay_feature))
FEATURE = numpy.matrix(vectors)
return FEATURE, HOMO, LUMO, GAP
# ... rest of the code ... |
7ca3b7f294b954dcd95880c938709240b268766f | test_url_runner.py | test_url_runner.py | import unittest
# This line is important so flake8 must ignore this one
from app import views # flake8: noqa
from app import mulungwishi_app
class URLTest(unittest.TestCase):
def setUp(self):
self.client = mulungwishi_app.test_client()
self.client.testing = True
def test_invalid_url_page_not_found(self):
result = self.client.get('/page/not/found')
self.assertEqual(result.status_code, 404)
def test_invalid_query(self):
result = self.client.get('/query?no_content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_empty(self):
result = self.client.get('/query?content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_none(self):
result = self.client.get('/query?')
self.assertEqual(result.status_code, 400)
def test_valid_url(self):
result = self.client.get('/')
self.assertEqual(result.status_code, 200)
def test_valid_query(self):
result = self.client.get('/query?content=farmer_sms')
self.assertEqual(result.status_code, 200)
| import unittest
# This line is important so flake8 must ignore this one
from app import views # flake8: noqa
from app import mulungwishi_app
class URLTest(unittest.TestCase):
def setUp(self):
self.client = mulungwishi_app.test_client()
self.client.testing = True
def test_invalid_url_page_not_found(self):
result = self.client.get('/page/not/found')
self.assertEqual(result.status_code, 404)
def test_invalid_query(self):
result = self.client.get('/query?no_content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_empty(self):
result = self.client.get('/query?content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_no_value_assigned(self):
result = self.client.get('/query?content=')
self.assertEqual(result.status_code, 400)
def test_invalid_query_none(self):
result = self.client.get('/query?')
self.assertEqual(result.status_code, 400)
def test_valid_url(self):
result = self.client.get('/')
self.assertEqual(result.status_code, 200)
def test_valid_query(self):
result = self.client.get('/query?content=farmer_sms')
self.assertEqual(result.status_code, 200)
| Add test for empty content string on query | Add test for empty content string on query
| Python | mit | engagespark/public-webhooks,engagespark/mulungwishi-webhook,engagespark/mulungwishi-webhook,admiral96/public-webhooks,engagespark/public-webhooks,admiral96/public-webhooks,admiral96/mulungwishi-webhook,admiral96/mulungwishi-webhook | import unittest
# This line is important so flake8 must ignore this one
from app import views # flake8: noqa
from app import mulungwishi_app
class URLTest(unittest.TestCase):
def setUp(self):
self.client = mulungwishi_app.test_client()
self.client.testing = True
def test_invalid_url_page_not_found(self):
result = self.client.get('/page/not/found')
self.assertEqual(result.status_code, 404)
def test_invalid_query(self):
result = self.client.get('/query?no_content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_empty(self):
result = self.client.get('/query?content')
self.assertEqual(result.status_code, 400)
+ def test_invalid_query_no_value_assigned(self):
+ result = self.client.get('/query?content=')
+ self.assertEqual(result.status_code, 400)
+
def test_invalid_query_none(self):
result = self.client.get('/query?')
self.assertEqual(result.status_code, 400)
def test_valid_url(self):
result = self.client.get('/')
self.assertEqual(result.status_code, 200)
def test_valid_query(self):
result = self.client.get('/query?content=farmer_sms')
self.assertEqual(result.status_code, 200)
| Add test for empty content string on query | ## Code Before:
import unittest
# This line is important so flake8 must ignore this one
from app import views # flake8: noqa
from app import mulungwishi_app
class URLTest(unittest.TestCase):
def setUp(self):
self.client = mulungwishi_app.test_client()
self.client.testing = True
def test_invalid_url_page_not_found(self):
result = self.client.get('/page/not/found')
self.assertEqual(result.status_code, 404)
def test_invalid_query(self):
result = self.client.get('/query?no_content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_empty(self):
result = self.client.get('/query?content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_none(self):
result = self.client.get('/query?')
self.assertEqual(result.status_code, 400)
def test_valid_url(self):
result = self.client.get('/')
self.assertEqual(result.status_code, 200)
def test_valid_query(self):
result = self.client.get('/query?content=farmer_sms')
self.assertEqual(result.status_code, 200)
## Instruction:
Add test for empty content string on query
## Code After:
import unittest
# This line is important so flake8 must ignore this one
from app import views # flake8: noqa
from app import mulungwishi_app
class URLTest(unittest.TestCase):
def setUp(self):
self.client = mulungwishi_app.test_client()
self.client.testing = True
def test_invalid_url_page_not_found(self):
result = self.client.get('/page/not/found')
self.assertEqual(result.status_code, 404)
def test_invalid_query(self):
result = self.client.get('/query?no_content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_empty(self):
result = self.client.get('/query?content')
self.assertEqual(result.status_code, 400)
def test_invalid_query_no_value_assigned(self):
result = self.client.get('/query?content=')
self.assertEqual(result.status_code, 400)
def test_invalid_query_none(self):
result = self.client.get('/query?')
self.assertEqual(result.status_code, 400)
def test_valid_url(self):
result = self.client.get('/')
self.assertEqual(result.status_code, 200)
def test_valid_query(self):
result = self.client.get('/query?content=farmer_sms')
self.assertEqual(result.status_code, 200)
| // ... existing code ...
def test_invalid_query_no_value_assigned(self):
result = self.client.get('/query?content=')
self.assertEqual(result.status_code, 400)
def test_invalid_query_none(self):
// ... rest of the code ... |
36c4e01f5bfb6ba00bd018f5bca4e8652a63ca8d | main.py | main.py |
import json, sys
if len(sys.argv) > 1:
inFn = sys.argv[1]
with open(inFn, 'r') as f:
try:
defs = json.load(f)
except:
sys.exit('{} has a syntax error'.format(inFn))
sort = sorted(defs, key=str.lower)
print('# My Dictionary')
print('\n## Definitions')
curLetter = None
for k in sort:
l = k[0].upper()
if curLetter != l:
curLetter = l
print('\n### {}'.format(curLetter))
word = k[0].upper() + k[1:]
print('* *{}* - {}'.format(word, defs[k]))
|
import json, sys
if len(sys.argv) > 1:
inFn = sys.argv[1]
with open(inFn, 'r') as f:
try:
defs = json.load(f)
except ValueError as e:
sys.exit('ValueError in {}: {}'.format(inFn, e))
sort = sorted(defs, key=str.lower)
print('# My Dictionary')
print('\n## Definitions')
curLetter = None
for k in sort:
l = k[0].upper()
if curLetter != l:
curLetter = l
print('\n### {}'.format(curLetter))
word = k[0].upper() + k[1:]
print('* *{}* - {}'.format(word, defs[k]))
| Print more explicit error reports for input file parsing | Print more explicit error reports for input file parsing
| Python | mit | JoshuaBrockschmidt/dictbuilder |
import json, sys
if len(sys.argv) > 1:
inFn = sys.argv[1]
with open(inFn, 'r') as f:
try:
defs = json.load(f)
- except:
- sys.exit('{} has a syntax error'.format(inFn))
+ except ValueError as e:
+ sys.exit('ValueError in {}: {}'.format(inFn, e))
sort = sorted(defs, key=str.lower)
print('# My Dictionary')
print('\n## Definitions')
curLetter = None
for k in sort:
l = k[0].upper()
if curLetter != l:
curLetter = l
print('\n### {}'.format(curLetter))
word = k[0].upper() + k[1:]
print('* *{}* - {}'.format(word, defs[k]))
| Print more explicit error reports for input file parsing | ## Code Before:
import json, sys
if len(sys.argv) > 1:
inFn = sys.argv[1]
with open(inFn, 'r') as f:
try:
defs = json.load(f)
except:
sys.exit('{} has a syntax error'.format(inFn))
sort = sorted(defs, key=str.lower)
print('# My Dictionary')
print('\n## Definitions')
curLetter = None
for k in sort:
l = k[0].upper()
if curLetter != l:
curLetter = l
print('\n### {}'.format(curLetter))
word = k[0].upper() + k[1:]
print('* *{}* - {}'.format(word, defs[k]))
## Instruction:
Print more explicit error reports for input file parsing
## Code After:
import json, sys
if len(sys.argv) > 1:
inFn = sys.argv[1]
with open(inFn, 'r') as f:
try:
defs = json.load(f)
except ValueError as e:
sys.exit('ValueError in {}: {}'.format(inFn, e))
sort = sorted(defs, key=str.lower)
print('# My Dictionary')
print('\n## Definitions')
curLetter = None
for k in sort:
l = k[0].upper()
if curLetter != l:
curLetter = l
print('\n### {}'.format(curLetter))
word = k[0].upper() + k[1:]
print('* *{}* - {}'.format(word, defs[k]))
| // ... existing code ...
defs = json.load(f)
except ValueError as e:
sys.exit('ValueError in {}: {}'.format(inFn, e))
// ... rest of the code ... |
b4d76c715810ddd30c0966df2614cd6ed7b03566 | tweets/views.py | tweets/views.py | from django.http import Http404
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.views.generic import ListView
from .models import Message
class MessageList(ListView):
template_name = "message_list.html"
model = Message
class MyMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class FilteredMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.filter(user__username=self.kwargs.get('username'))
if not queryset:
raise Http404(_('Username not found.'))
return queryset
| from django.http import Http404
from django.contrib.auth import get_user_model
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import ListView
from .models import Message
class MessageList(ListView):
template_name = "message_list.html"
model = Message
class MyMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class FilteredMessageList(MessageList):
def get_queryset(self):
# Check to see if user exists. 404 if not.
username = self.kwargs.get('username')
user = get_object_or_404(get_user_model(), username=username)
# Filter messages by the user as author.
queryset = super().get_queryset()
return queryset.filter(user=user)
return queryset
| Adjust user filtering logic to 404 only if user does not exist | Adjust user filtering logic to 404 only if user does not exist
| Python | mit | pennomi/openwest2015-twitter-clone,pennomi/openwest2015-twitter-clone,pennomi/openwest2015-twitter-clone | from django.http import Http404
+ from django.contrib.auth import get_user_model
- from django.shortcuts import render
+ from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import ListView
from .models import Message
class MessageList(ListView):
template_name = "message_list.html"
model = Message
class MyMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class FilteredMessageList(MessageList):
def get_queryset(self):
+ # Check to see if user exists. 404 if not.
+ username = self.kwargs.get('username')
+ user = get_object_or_404(get_user_model(), username=username)
+
+ # Filter messages by the user as author.
queryset = super().get_queryset()
+ return queryset.filter(user=user)
- queryset = queryset.filter(user__username=self.kwargs.get('username'))
- if not queryset:
- raise Http404(_('Username not found.'))
return queryset
| Adjust user filtering logic to 404 only if user does not exist | ## Code Before:
from django.http import Http404
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.views.generic import ListView
from .models import Message
class MessageList(ListView):
template_name = "message_list.html"
model = Message
class MyMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class FilteredMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.filter(user__username=self.kwargs.get('username'))
if not queryset:
raise Http404(_('Username not found.'))
return queryset
## Instruction:
Adjust user filtering logic to 404 only if user does not exist
## Code After:
from django.http import Http404
from django.contrib.auth import get_user_model
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import ListView
from .models import Message
class MessageList(ListView):
template_name = "message_list.html"
model = Message
class MyMessageList(MessageList):
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
class FilteredMessageList(MessageList):
def get_queryset(self):
# Check to see if user exists. 404 if not.
username = self.kwargs.get('username')
user = get_object_or_404(get_user_model(), username=username)
# Filter messages by the user as author.
queryset = super().get_queryset()
return queryset.filter(user=user)
return queryset
| // ... existing code ...
from django.http import Http404
from django.contrib.auth import get_user_model
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
// ... modified code ...
def get_queryset(self):
# Check to see if user exists. 404 if not.
username = self.kwargs.get('username')
user = get_object_or_404(get_user_model(), username=username)
# Filter messages by the user as author.
queryset = super().get_queryset()
return queryset.filter(user=user)
// ... rest of the code ... |
b3f7b677edb0a87abff2ef64dadb64547d757d6b | elasticsearch_django/migrations/0004_auto_20161129_1135.py | elasticsearch_django/migrations/0004_auto_20161129_1135.py |
from django.db import migrations
from ..db.fields import JSONField
class Migration(migrations.Migration):
dependencies = [("elasticsearch_django", "0003_auto_20160926_2021")]
operations = [
migrations.AlterField(
model_name="searchquery",
name="hits",
field=JSONField(
help_text="The list of meta info for each of the query matches returned."
),
),
migrations.AlterField(
model_name="searchquery",
name="query",
field=JSONField(help_text="The raw ElasticSearch DSL query."),
),
]
|
from django.contrib.postgres.fields import JSONField
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elasticsearch_django", "0003_auto_20160926_2021")]
operations = [
migrations.AlterField(
model_name="searchquery",
name="hits",
field=JSONField(
help_text="The list of meta info for each of the query matches returned."
),
),
migrations.AlterField(
model_name="searchquery",
name="query",
field=JSONField(help_text="The raw ElasticSearch DSL query."),
),
]
| Update migration to use native JSONField | Update migration to use native JSONField
| Python | mit | yunojuno/elasticsearch-django |
+ from django.contrib.postgres.fields import JSONField
from django.db import migrations
-
- from ..db.fields import JSONField
class Migration(migrations.Migration):
dependencies = [("elasticsearch_django", "0003_auto_20160926_2021")]
operations = [
migrations.AlterField(
model_name="searchquery",
name="hits",
field=JSONField(
help_text="The list of meta info for each of the query matches returned."
),
),
migrations.AlterField(
model_name="searchquery",
name="query",
field=JSONField(help_text="The raw ElasticSearch DSL query."),
),
]
| Update migration to use native JSONField | ## Code Before:
from django.db import migrations
from ..db.fields import JSONField
class Migration(migrations.Migration):
dependencies = [("elasticsearch_django", "0003_auto_20160926_2021")]
operations = [
migrations.AlterField(
model_name="searchquery",
name="hits",
field=JSONField(
help_text="The list of meta info for each of the query matches returned."
),
),
migrations.AlterField(
model_name="searchquery",
name="query",
field=JSONField(help_text="The raw ElasticSearch DSL query."),
),
]
## Instruction:
Update migration to use native JSONField
## Code After:
from django.contrib.postgres.fields import JSONField
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elasticsearch_django", "0003_auto_20160926_2021")]
operations = [
migrations.AlterField(
model_name="searchquery",
name="hits",
field=JSONField(
help_text="The list of meta info for each of the query matches returned."
),
),
migrations.AlterField(
model_name="searchquery",
name="query",
field=JSONField(help_text="The raw ElasticSearch DSL query."),
),
]
| // ... existing code ...
from django.contrib.postgres.fields import JSONField
from django.db import migrations
// ... rest of the code ... |
0c913d4bf94637da916b609b1b1d0d34b03776b7 | tests/test_logger.py | tests/test_logger.py | import pytest
from mugloar import dragon, logger
@pytest.fixture
def log_instance():
"""Returns a Logger instance"""
return logger.Logger()
@pytest.fixture
def knight():
return {'agility': 8, 'endurance': 8, 'armor': 0, 'attack': 4}
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def stats_map():
return {'attack': 'scaleThickness',
'armor': 'clawSharpness',
'agility': 'wingStrength',
'endurance': 'fireBreath'}
def test_comparison(log_instance, knight, dragon_instance, stats_map):
log_instance.comparison(knight, dragon_instance, stats_map)
| import pytest
from mugloar import dragon, logger
@pytest.fixture
def log_instance():
"""Returns a Logger instance"""
return logger.Logger()
@pytest.fixture
def knight():
return [('endurance', 8), ('attack', 8), ('armor', 0), ('agility', 4)]
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def stats_map():
return {'attack': 'scaleThickness',
'armor': 'clawSharpness',
'agility': 'wingStrength',
'endurance': 'fireBreath'}
def test_comparison(log_instance, knight, dragon_instance, stats_map):
dragon_instance.set_relative_stats((5, 5, 5, 5), knight)
log_instance.comparison(knight, dragon_instance, stats_map)
| Implement rudimentary unit tests for logger class | Implement rudimentary unit tests for logger class
| Python | mit | reinikai/mugloar | import pytest
from mugloar import dragon, logger
@pytest.fixture
def log_instance():
"""Returns a Logger instance"""
return logger.Logger()
@pytest.fixture
def knight():
- return {'agility': 8, 'endurance': 8, 'armor': 0, 'attack': 4}
+ return [('endurance', 8), ('attack', 8), ('armor', 0), ('agility', 4)]
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def stats_map():
return {'attack': 'scaleThickness',
'armor': 'clawSharpness',
'agility': 'wingStrength',
'endurance': 'fireBreath'}
def test_comparison(log_instance, knight, dragon_instance, stats_map):
+ dragon_instance.set_relative_stats((5, 5, 5, 5), knight)
log_instance.comparison(knight, dragon_instance, stats_map)
| Implement rudimentary unit tests for logger class | ## Code Before:
import pytest
from mugloar import dragon, logger
@pytest.fixture
def log_instance():
"""Returns a Logger instance"""
return logger.Logger()
@pytest.fixture
def knight():
return {'agility': 8, 'endurance': 8, 'armor': 0, 'attack': 4}
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def stats_map():
return {'attack': 'scaleThickness',
'armor': 'clawSharpness',
'agility': 'wingStrength',
'endurance': 'fireBreath'}
def test_comparison(log_instance, knight, dragon_instance, stats_map):
log_instance.comparison(knight, dragon_instance, stats_map)
## Instruction:
Implement rudimentary unit tests for logger class
## Code After:
import pytest
from mugloar import dragon, logger
@pytest.fixture
def log_instance():
"""Returns a Logger instance"""
return logger.Logger()
@pytest.fixture
def knight():
return [('endurance', 8), ('attack', 8), ('armor', 0), ('agility', 4)]
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def stats_map():
return {'attack': 'scaleThickness',
'armor': 'clawSharpness',
'agility': 'wingStrength',
'endurance': 'fireBreath'}
def test_comparison(log_instance, knight, dragon_instance, stats_map):
dragon_instance.set_relative_stats((5, 5, 5, 5), knight)
log_instance.comparison(knight, dragon_instance, stats_map)
| // ... existing code ...
def knight():
return [('endurance', 8), ('attack', 8), ('armor', 0), ('agility', 4)]
// ... modified code ...
def test_comparison(log_instance, knight, dragon_instance, stats_map):
dragon_instance.set_relative_stats((5, 5, 5, 5), knight)
log_instance.comparison(knight, dragon_instance, stats_map)
// ... rest of the code ... |
df264c490f8600c5047db328c9388c1d07d4cbd5 | setup.py | setup.py | import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='vecrec',
version=version,
author='Kale Kundert and Alex Mitchell',
packages=['vecrec'],
url='https://github.com/kxgames/vecrec',
download_url='https://github.com/kxgames/vecrec/tarball/'+version,
license='LICENSE.txt',
description="A 2D vector and rectangle library.",
long_description=open('README.rst').read(),
keywords=['2D', 'vector', 'rectangle', 'library'])
| import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='vecrec',
version=version,
author='Kale Kundert and Alex Mitchell',
url='https://github.com/kxgames/vecrec',
download_url='https://github.com/kxgames/vecrec/tarball/'+version,
license='LICENSE.txt',
description="A 2D vector and rectangle library.",
long_description=open('README.rst').read(),
keywords=['2D', 'vector', 'rectangle', 'library'],
packages=['vecrec'],
requires=['finalexam', 'coverage'],
)
| Add finalexam and coverage as dependencies. | Add finalexam and coverage as dependencies.
| Python | mit | kxgames/vecrec,kxgames/vecrec | import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='vecrec',
version=version,
author='Kale Kundert and Alex Mitchell',
- packages=['vecrec'],
url='https://github.com/kxgames/vecrec',
download_url='https://github.com/kxgames/vecrec/tarball/'+version,
license='LICENSE.txt',
description="A 2D vector and rectangle library.",
long_description=open('README.rst').read(),
- keywords=['2D', 'vector', 'rectangle', 'library'])
+ keywords=['2D', 'vector', 'rectangle', 'library'],
+ packages=['vecrec'],
+ requires=['finalexam', 'coverage'],
+ )
| Add finalexam and coverage as dependencies. | ## Code Before:
import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='vecrec',
version=version,
author='Kale Kundert and Alex Mitchell',
packages=['vecrec'],
url='https://github.com/kxgames/vecrec',
download_url='https://github.com/kxgames/vecrec/tarball/'+version,
license='LICENSE.txt',
description="A 2D vector and rectangle library.",
long_description=open('README.rst').read(),
keywords=['2D', 'vector', 'rectangle', 'library'])
## Instruction:
Add finalexam and coverage as dependencies.
## Code After:
import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='vecrec',
version=version,
author='Kale Kundert and Alex Mitchell',
url='https://github.com/kxgames/vecrec',
download_url='https://github.com/kxgames/vecrec/tarball/'+version,
license='LICENSE.txt',
description="A 2D vector and rectangle library.",
long_description=open('README.rst').read(),
keywords=['2D', 'vector', 'rectangle', 'library'],
packages=['vecrec'],
requires=['finalexam', 'coverage'],
)
| # ... existing code ...
author='Kale Kundert and Alex Mitchell',
url='https://github.com/kxgames/vecrec',
# ... modified code ...
long_description=open('README.rst').read(),
keywords=['2D', 'vector', 'rectangle', 'library'],
packages=['vecrec'],
requires=['finalexam', 'coverage'],
)
# ... rest of the code ... |
d76398b40844e969439d495d4ea3604e5b2011b4 | mock-recipe-server/test_mock_server.py | mock-recipe-server/test_mock_server.py | from utils import APIPath
def test_testcase_difference(root_path):
"""Ensure that different testcases output different data."""
recipes = set()
testcase_paths = (
APIPath(path, 'http://example.com')
for path in root_path.path.iterdir() if path.is_dir()
)
for testcase_path in testcase_paths:
recipe_path = testcase_path.add('api', 'v1', 'recipe')
recipe_data = recipe_path.read()
assert recipe_data not in recipes
recipes.add(recipe_data)
# This asserts both that testcases have differing signed data
# and that a single testcase does not have the same data for
# signed and unsigned endpoints.
signed_recipe_data = recipe_path.add('signed').read()
assert signed_recipe_data not in recipes
recipes.add(signed_recipe_data)
| from utils import APIPath
def test_testcase_difference(root_path):
"""Ensure that different testcases output different data."""
recipes = set()
testcase_paths = (
APIPath(path, 'http://example.com')
for path in root_path.path.iterdir() if path.is_dir()
)
for testcase_path in testcase_paths:
recipe_path = testcase_path.add('api', 'v1', 'recipe')
try:
recipe_data = recipe_path.read()
signed_recipe_data = recipe_path.add('signed').read()
except FileNotFoundError:
# Some error testcases are purposefully missing files,
# so we just skip checking those.
continue
assert recipe_data not in recipes
recipes.add(recipe_data)
# This asserts both that testcases have differing signed data
# and that a single testcase does not have the same data for
# signed and unsigned endpoints.
assert signed_recipe_data not in recipes
recipes.add(signed_recipe_data)
| Handle error testcases in mock-server tests. | Handle error testcases in mock-server tests.
| Python | mpl-2.0 | Osmose/normandy,Osmose/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy,Osmose/normandy,mozilla/normandy | from utils import APIPath
def test_testcase_difference(root_path):
"""Ensure that different testcases output different data."""
recipes = set()
testcase_paths = (
APIPath(path, 'http://example.com')
for path in root_path.path.iterdir() if path.is_dir()
)
for testcase_path in testcase_paths:
recipe_path = testcase_path.add('api', 'v1', 'recipe')
+ try:
- recipe_data = recipe_path.read()
+ recipe_data = recipe_path.read()
+ signed_recipe_data = recipe_path.add('signed').read()
+ except FileNotFoundError:
+ # Some error testcases are purposefully missing files,
+ # so we just skip checking those.
+ continue
+
assert recipe_data not in recipes
recipes.add(recipe_data)
# This asserts both that testcases have differing signed data
# and that a single testcase does not have the same data for
# signed and unsigned endpoints.
- signed_recipe_data = recipe_path.add('signed').read()
assert signed_recipe_data not in recipes
recipes.add(signed_recipe_data)
| Handle error testcases in mock-server tests. | ## Code Before:
from utils import APIPath
def test_testcase_difference(root_path):
"""Ensure that different testcases output different data."""
recipes = set()
testcase_paths = (
APIPath(path, 'http://example.com')
for path in root_path.path.iterdir() if path.is_dir()
)
for testcase_path in testcase_paths:
recipe_path = testcase_path.add('api', 'v1', 'recipe')
recipe_data = recipe_path.read()
assert recipe_data not in recipes
recipes.add(recipe_data)
# This asserts both that testcases have differing signed data
# and that a single testcase does not have the same data for
# signed and unsigned endpoints.
signed_recipe_data = recipe_path.add('signed').read()
assert signed_recipe_data not in recipes
recipes.add(signed_recipe_data)
## Instruction:
Handle error testcases in mock-server tests.
## Code After:
from utils import APIPath
def test_testcase_difference(root_path):
"""Ensure that different testcases output different data."""
recipes = set()
testcase_paths = (
APIPath(path, 'http://example.com')
for path in root_path.path.iterdir() if path.is_dir()
)
for testcase_path in testcase_paths:
recipe_path = testcase_path.add('api', 'v1', 'recipe')
try:
recipe_data = recipe_path.read()
signed_recipe_data = recipe_path.add('signed').read()
except FileNotFoundError:
# Some error testcases are purposefully missing files,
# so we just skip checking those.
continue
assert recipe_data not in recipes
recipes.add(recipe_data)
# This asserts both that testcases have differing signed data
# and that a single testcase does not have the same data for
# signed and unsigned endpoints.
assert signed_recipe_data not in recipes
recipes.add(signed_recipe_data)
| # ... existing code ...
try:
recipe_data = recipe_path.read()
signed_recipe_data = recipe_path.add('signed').read()
except FileNotFoundError:
# Some error testcases are purposefully missing files,
# so we just skip checking those.
continue
assert recipe_data not in recipes
# ... modified code ...
# signed and unsigned endpoints.
assert signed_recipe_data not in recipes
# ... rest of the code ... |
ff80cfab47b03de5d86d82907de0f28caa7829e9 | test_project/dashboards.py | test_project/dashboards.py | from controlcenter import Dashboard, widgets
class EmptyDashboard(Dashboard):
pass
class MyWidget0(widgets.Widget):
pass
class MyWidget1(widgets.Widget):
pass
class NonEmptyDashboard(Dashboard):
widgets = [
MyWidget0,
widgets.Group([MyWidget1])
]
| from controlcenter import Dashboard, widgets
class EmptyDashboard(Dashboard):
pass
class MyWidget0(widgets.Widget):
template_name = 'chart.html'
class MyWidget1(widgets.Widget):
template_name = 'chart.html'
class NonEmptyDashboard(Dashboard):
widgets = [
MyWidget0,
widgets.Group([MyWidget1])
]
| Define template_name for test widgets | Tests: Define template_name for test widgets
This avoids an "AssertionError: MyWidget0.template_name is not defined."
on Django 2.1, which no longer silences {% include %} exceptions.
Django deprecation notes:
https://docs.djangoproject.com/en/2.1/internals/deprecation/#deprecation-removed-in-2-1
| Python | bsd-3-clause | byashimov/django-controlcenter,byashimov/django-controlcenter,byashimov/django-controlcenter | from controlcenter import Dashboard, widgets
class EmptyDashboard(Dashboard):
pass
class MyWidget0(widgets.Widget):
- pass
+ template_name = 'chart.html'
class MyWidget1(widgets.Widget):
- pass
+ template_name = 'chart.html'
class NonEmptyDashboard(Dashboard):
widgets = [
MyWidget0,
widgets.Group([MyWidget1])
]
| Define template_name for test widgets | ## Code Before:
from controlcenter import Dashboard, widgets
class EmptyDashboard(Dashboard):
pass
class MyWidget0(widgets.Widget):
pass
class MyWidget1(widgets.Widget):
pass
class NonEmptyDashboard(Dashboard):
widgets = [
MyWidget0,
widgets.Group([MyWidget1])
]
## Instruction:
Define template_name for test widgets
## Code After:
from controlcenter import Dashboard, widgets
class EmptyDashboard(Dashboard):
pass
class MyWidget0(widgets.Widget):
template_name = 'chart.html'
class MyWidget1(widgets.Widget):
template_name = 'chart.html'
class NonEmptyDashboard(Dashboard):
widgets = [
MyWidget0,
widgets.Group([MyWidget1])
]
| // ... existing code ...
class MyWidget0(widgets.Widget):
template_name = 'chart.html'
// ... modified code ...
class MyWidget1(widgets.Widget):
template_name = 'chart.html'
// ... rest of the code ... |
a476c42216af99488c2e02bacd29f7e3a869a3e7 | tests/retrieval_metrics/test_precision_at_k.py | tests/retrieval_metrics/test_precision_at_k.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import PrecisionAtK
testdata = [
(
"micro",
tf.constant(0.583333333),
),
(
"macro",
tf.constant(0.5),
),
]
@pytest.mark.parametrize("avg, expected", testdata, ids=["micro", "macro"])
def test_compute(avg, expected):
query_labels = tf.constant([1, 1, 1, 0])
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = PrecisionAtK(k=3, average=avg)
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
np.testing.assert_allclose(precision, expected)
| import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import PrecisionAtK
testdata = [
(
"micro",
tf.constant(0.583333333),
),
(
"macro",
tf.constant(0.5),
),
]
@pytest.mark.parametrize("avg, expected", testdata, ids=["micro", "macro"])
def test_compute(avg, expected):
query_labels = tf.constant([1, 1, 1, 0])
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = PrecisionAtK(k=3, average=avg)
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
np.testing.assert_allclose(precision, expected, atol=1e-05)
| Update atol on precision at k test. | Update atol on precision at k test.
| Python | apache-2.0 | tensorflow/similarity | import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import PrecisionAtK
testdata = [
(
"micro",
tf.constant(0.583333333),
),
(
"macro",
tf.constant(0.5),
),
]
@pytest.mark.parametrize("avg, expected", testdata, ids=["micro", "macro"])
def test_compute(avg, expected):
query_labels = tf.constant([1, 1, 1, 0])
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = PrecisionAtK(k=3, average=avg)
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
- np.testing.assert_allclose(precision, expected)
+ np.testing.assert_allclose(precision, expected, atol=1e-05)
| Update atol on precision at k test. | ## Code Before:
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import PrecisionAtK
testdata = [
(
"micro",
tf.constant(0.583333333),
),
(
"macro",
tf.constant(0.5),
),
]
@pytest.mark.parametrize("avg, expected", testdata, ids=["micro", "macro"])
def test_compute(avg, expected):
query_labels = tf.constant([1, 1, 1, 0])
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = PrecisionAtK(k=3, average=avg)
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
np.testing.assert_allclose(precision, expected)
## Instruction:
Update atol on precision at k test.
## Code After:
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import PrecisionAtK
testdata = [
(
"micro",
tf.constant(0.583333333),
),
(
"macro",
tf.constant(0.5),
),
]
@pytest.mark.parametrize("avg, expected", testdata, ids=["micro", "macro"])
def test_compute(avg, expected):
query_labels = tf.constant([1, 1, 1, 0])
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = PrecisionAtK(k=3, average=avg)
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
np.testing.assert_allclose(precision, expected, atol=1e-05)
| // ... existing code ...
precision = rm.compute(query_labels=query_labels, match_mask=match_mask)
np.testing.assert_allclose(precision, expected, atol=1e-05)
// ... rest of the code ... |
5a4fc9a89bfdb279ad0cda40f45b35ff3841c970 | voteswap/urls.py | voteswap/urls.py | from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
from voteswap.views import landing_page
from voteswap.views import signup
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^home/$', index, name='index'),
url('^$', landing_page, name='landing_page'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url('^user/', include('users.urls', namespace='users')),
url('^signup/$', signup, name='signup'),
]
| from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import logout
from voteswap.views import index
from voteswap.views import landing_page
from voteswap.views import signup
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^home/$', index, name='index'),
url('^$', landing_page, name='landing_page'),
url('^logout/$', logout, name='logout'),
url('^user/', include('users.urls', namespace='users')),
url('^signup/$', signup, name='signup'),
]
| Fix logout view so django stops complaining | Fix logout view so django stops complaining
| Python | mit | sbuss/voteswap,sbuss/voteswap,sbuss/voteswap,sbuss/voteswap | from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
+ from django.contrib.auth.views import logout
from voteswap.views import index
from voteswap.views import landing_page
from voteswap.views import signup
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^home/$', index, name='index'),
url('^$', landing_page, name='landing_page'),
- url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
+ url('^logout/$', logout, name='logout'),
url('^user/', include('users.urls', namespace='users')),
url('^signup/$', signup, name='signup'),
]
| Fix logout view so django stops complaining | ## Code Before:
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
from voteswap.views import landing_page
from voteswap.views import signup
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^home/$', index, name='index'),
url('^$', landing_page, name='landing_page'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url('^user/', include('users.urls', namespace='users')),
url('^signup/$', signup, name='signup'),
]
## Instruction:
Fix logout view so django stops complaining
## Code After:
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import logout
from voteswap.views import index
from voteswap.views import landing_page
from voteswap.views import signup
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^home/$', index, name='index'),
url('^$', landing_page, name='landing_page'),
url('^logout/$', logout, name='logout'),
url('^user/', include('users.urls', namespace='users')),
url('^signup/$', signup, name='signup'),
]
| // ... existing code ...
from django.contrib import admin
from django.contrib.auth.views import logout
// ... modified code ...
url('^$', landing_page, name='landing_page'),
url('^logout/$', logout, name='logout'),
url('^user/', include('users.urls', namespace='users')),
// ... rest of the code ... |
fc7ad7d55622aa9edb77b9f7822260110a772805 | db.py | db.py | from flask.ext.script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from credstash import getAllSecrets
import os
secrets = getAllSecrets(region="eu-west-1")
for key, val in secrets.items():
os.environ[key] = val
application = create_app()
manager = Manager(application)
migrate = Migrate(application, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| from flask.ext.script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from credstash import getAllSecrets
import os
default_env_file = '/home/ubuntu/environment'
environment = 'live'
if os.path.isfile(default_env_file):
with open(default_env_file, 'r') as environment_file:
environment = environment_file.readline().strip()
# on aws get secrets and export to env
os.environ.update(getAllSecrets(region="eu-west-1"))
from config import configs
os.environ['NOTIFY_API_ENVIRONMENT'] = configs[environment]
application = create_app()
manager = Manager(application)
migrate = Migrate(application, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| Bring DB script into line with other prod scripts | Bring DB script into line with other prod scripts
| Python | mit | alphagov/notifications-api,alphagov/notifications-api | from flask.ext.script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from credstash import getAllSecrets
import os
+ default_env_file = '/home/ubuntu/environment'
+ environment = 'live'
+
+ if os.path.isfile(default_env_file):
+ with open(default_env_file, 'r') as environment_file:
+ environment = environment_file.readline().strip()
+
+ # on aws get secrets and export to env
- secrets = getAllSecrets(region="eu-west-1")
+ os.environ.update(getAllSecrets(region="eu-west-1"))
- for key, val in secrets.items():
- os.environ[key] = val
+
+ from config import configs
+
+ os.environ['NOTIFY_API_ENVIRONMENT'] = configs[environment]
application = create_app()
manager = Manager(application)
migrate = Migrate(application, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| Bring DB script into line with other prod scripts | ## Code Before:
from flask.ext.script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from credstash import getAllSecrets
import os
secrets = getAllSecrets(region="eu-west-1")
for key, val in secrets.items():
os.environ[key] = val
application = create_app()
manager = Manager(application)
migrate = Migrate(application, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
## Instruction:
Bring DB script into line with other prod scripts
## Code After:
from flask.ext.script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from credstash import getAllSecrets
import os
default_env_file = '/home/ubuntu/environment'
environment = 'live'
if os.path.isfile(default_env_file):
with open(default_env_file, 'r') as environment_file:
environment = environment_file.readline().strip()
# on aws get secrets and export to env
os.environ.update(getAllSecrets(region="eu-west-1"))
from config import configs
os.environ['NOTIFY_API_ENVIRONMENT'] = configs[environment]
application = create_app()
manager = Manager(application)
migrate = Migrate(application, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| # ... existing code ...
default_env_file = '/home/ubuntu/environment'
environment = 'live'
if os.path.isfile(default_env_file):
with open(default_env_file, 'r') as environment_file:
environment = environment_file.readline().strip()
# on aws get secrets and export to env
os.environ.update(getAllSecrets(region="eu-west-1"))
from config import configs
os.environ['NOTIFY_API_ENVIRONMENT'] = configs[environment]
# ... rest of the code ... |
3327c204f34a725a2d070beb24a7a5a66d414930 | migrations/versions/538eeb160af6_.py | migrations/versions/538eeb160af6_.py |
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
down_revision = '1727fb4309d8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
|
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
down_revision = '6b9d673d8e30'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
| Update alembic order for merging | Update alembic order for merging
| Python | apache-2.0 | bunjiboys/security_monkey,stackArmor/security_monkey,markofu/security_monkey,bunjiboys/security_monkey,bunjiboys/security_monkey,markofu/security_monkey,markofu/security_monkey,Netflix/security_monkey,stackArmor/security_monkey,Netflix/security_monkey,Netflix/security_monkey,stackArmor/security_monkey,Netflix/security_monkey,bunjiboys/security_monkey,Netflix/security_monkey,bunjiboys/security_monkey,stackArmor/security_monkey,stackArmor/security_monkey,markofu/security_monkey,markofu/security_monkey |
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
- down_revision = '1727fb4309d8'
+ down_revision = '6b9d673d8e30'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
| Update alembic order for merging | ## Code Before:
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
down_revision = '1727fb4309d8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
## Instruction:
Update alembic order for merging
## Code After:
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
down_revision = '6b9d673d8e30'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
| # ... existing code ...
revision = '538eeb160af6'
down_revision = '6b9d673d8e30'
# ... rest of the code ... |
5a45840e81d612e1f743ad063fd32da4d19354d4 | cacheops/signals.py | cacheops/signals.py | import django.dispatch
cache_read = django.dispatch.Signal(providing_args=["func", "hit"])
cache_invalidated = django.dispatch.Signal(providing_args=["obj_dict"])
| import django.dispatch
cache_read = django.dispatch.Signal() # args: func, hit
cache_invalidated = django.dispatch.Signal() # args: obj_dict
| Stop using Signal(providing_args) deprected in Django 4.0 | Stop using Signal(providing_args) deprected in Django 4.0
Closes #393
| Python | bsd-3-clause | Suor/django-cacheops | import django.dispatch
- cache_read = django.dispatch.Signal(providing_args=["func", "hit"])
+ cache_read = django.dispatch.Signal() # args: func, hit
- cache_invalidated = django.dispatch.Signal(providing_args=["obj_dict"])
+ cache_invalidated = django.dispatch.Signal() # args: obj_dict
| Stop using Signal(providing_args) deprected in Django 4.0 | ## Code Before:
import django.dispatch
cache_read = django.dispatch.Signal(providing_args=["func", "hit"])
cache_invalidated = django.dispatch.Signal(providing_args=["obj_dict"])
## Instruction:
Stop using Signal(providing_args) deprected in Django 4.0
## Code After:
import django.dispatch
cache_read = django.dispatch.Signal() # args: func, hit
cache_invalidated = django.dispatch.Signal() # args: obj_dict
| ...
cache_read = django.dispatch.Signal() # args: func, hit
cache_invalidated = django.dispatch.Signal() # args: obj_dict
... |
31f81fd98a678949b1bb7d14863d497ab40d5afc | locksmith/common.py | locksmith/common.py | import hashlib
import hmac
import urllib, urllib2
API_OPERATING_STATUSES = (
(1, 'Normal'),
(2, 'Degraded Service'),
(3, 'Service Disruption'),
(4, 'Undergoing Maintenance')
)
API_STATUSES = (
(1, 'Active'),
(2, 'Deprecated'),
(3, 'Disabled')
)
KEY_STATUSES = (
('U', 'Unactivated'),
('A', 'Active'),
('S', 'Suspended')
)
UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
PUB_STATUSES = (
(UNPUBLISHED, 'Unpublished'),
(PUBLISHED, 'Published'),
(NEEDS_UPDATE, 'Needs Update'),
)
def get_signature(params, signkey):
# sorted k,v pairs of everything but signature
data = sorted([(k,v.encode('utf-8')) for k,v in params.iteritems() if k != 'signature'])
qs = urllib.urlencode(data)
return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
def apicall(url, signkey, **params):
params['signature'] = get_signature(params, signkey)
data = sorted([(k,v) for k,v in params.iteritems()])
body = urllib.urlencode(data)
urllib2.urlopen(url, body)
| import hashlib
import hmac
import urllib, urllib2
API_OPERATING_STATUSES = (
(1, 'Normal'),
(2, 'Degraded Service'),
(3, 'Service Disruption'),
(4, 'Undergoing Maintenance')
)
API_STATUSES = (
(1, 'Active'),
(2, 'Deprecated'),
(3, 'Disabled')
)
KEY_STATUSES = (
('U', 'Unactivated'),
('A', 'Active'),
('S', 'Suspended')
)
UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
PUB_STATUSES = (
(UNPUBLISHED, 'Unpublished'),
(PUBLISHED, 'Published'),
(NEEDS_UPDATE, 'Needs Update'),
)
def get_signature(params, signkey):
# sorted k,v pairs of everything but signature
data = sorted([(k,unicode(v).encode('utf-8'))
for k,v in params.iteritems()
if k != 'signature'])
qs = urllib.urlencode(data)
return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
def apicall(url, signkey, **params):
params['signature'] = get_signature(params, signkey)
data = sorted([(k,v) for k,v in params.iteritems()])
body = urllib.urlencode(data)
urllib2.urlopen(url, body)
| Convert url param values to unicode before encoding. | Convert url param values to unicode before encoding.
| Python | bsd-3-clause | sunlightlabs/django-locksmith,sunlightlabs/django-locksmith,sunlightlabs/django-locksmith | import hashlib
import hmac
import urllib, urllib2
API_OPERATING_STATUSES = (
(1, 'Normal'),
(2, 'Degraded Service'),
(3, 'Service Disruption'),
(4, 'Undergoing Maintenance')
)
API_STATUSES = (
(1, 'Active'),
(2, 'Deprecated'),
(3, 'Disabled')
)
KEY_STATUSES = (
('U', 'Unactivated'),
('A', 'Active'),
('S', 'Suspended')
)
UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
PUB_STATUSES = (
(UNPUBLISHED, 'Unpublished'),
(PUBLISHED, 'Published'),
(NEEDS_UPDATE, 'Needs Update'),
)
def get_signature(params, signkey):
# sorted k,v pairs of everything but signature
- data = sorted([(k,v.encode('utf-8')) for k,v in params.iteritems() if k != 'signature'])
+ data = sorted([(k,unicode(v).encode('utf-8'))
+ for k,v in params.iteritems()
+ if k != 'signature'])
qs = urllib.urlencode(data)
return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
def apicall(url, signkey, **params):
params['signature'] = get_signature(params, signkey)
data = sorted([(k,v) for k,v in params.iteritems()])
body = urllib.urlencode(data)
urllib2.urlopen(url, body)
| Convert url param values to unicode before encoding. | ## Code Before:
import hashlib
import hmac
import urllib, urllib2
API_OPERATING_STATUSES = (
(1, 'Normal'),
(2, 'Degraded Service'),
(3, 'Service Disruption'),
(4, 'Undergoing Maintenance')
)
API_STATUSES = (
(1, 'Active'),
(2, 'Deprecated'),
(3, 'Disabled')
)
KEY_STATUSES = (
('U', 'Unactivated'),
('A', 'Active'),
('S', 'Suspended')
)
UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
PUB_STATUSES = (
(UNPUBLISHED, 'Unpublished'),
(PUBLISHED, 'Published'),
(NEEDS_UPDATE, 'Needs Update'),
)
def get_signature(params, signkey):
# sorted k,v pairs of everything but signature
data = sorted([(k,v.encode('utf-8')) for k,v in params.iteritems() if k != 'signature'])
qs = urllib.urlencode(data)
return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
def apicall(url, signkey, **params):
params['signature'] = get_signature(params, signkey)
data = sorted([(k,v) for k,v in params.iteritems()])
body = urllib.urlencode(data)
urllib2.urlopen(url, body)
## Instruction:
Convert url param values to unicode before encoding.
## Code After:
import hashlib
import hmac
import urllib, urllib2
API_OPERATING_STATUSES = (
(1, 'Normal'),
(2, 'Degraded Service'),
(3, 'Service Disruption'),
(4, 'Undergoing Maintenance')
)
API_STATUSES = (
(1, 'Active'),
(2, 'Deprecated'),
(3, 'Disabled')
)
KEY_STATUSES = (
('U', 'Unactivated'),
('A', 'Active'),
('S', 'Suspended')
)
UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
PUB_STATUSES = (
(UNPUBLISHED, 'Unpublished'),
(PUBLISHED, 'Published'),
(NEEDS_UPDATE, 'Needs Update'),
)
def get_signature(params, signkey):
# sorted k,v pairs of everything but signature
data = sorted([(k,unicode(v).encode('utf-8'))
for k,v in params.iteritems()
if k != 'signature'])
qs = urllib.urlencode(data)
return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
def apicall(url, signkey, **params):
params['signature'] = get_signature(params, signkey)
data = sorted([(k,v) for k,v in params.iteritems()])
body = urllib.urlencode(data)
urllib2.urlopen(url, body)
| ...
# sorted k,v pairs of everything but signature
data = sorted([(k,unicode(v).encode('utf-8'))
for k,v in params.iteritems()
if k != 'signature'])
qs = urllib.urlencode(data)
... |
eda35123356edd20b361aa2f1d1f20cc7b922e39 | settings_example.py | settings_example.py | import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| Add CSV file name format setting example | Add CSV file name format setting example
| Python | mit | AustralianAntarcticDataCentre/save_emails_to_files,AustralianAntarcticDataCentre/save_emails_to_files | import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
+
+ CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| Add CSV file name format setting example | ## Code Before:
import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
## Instruction:
Add CSV file name format setting example
## Code After:
import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| // ... existing code ...
CSV_FOLDER = os.getcwd()
CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
// ... rest of the code ... |
56ac100c8ca357a5600db7a16859cca1483ccb13 | blueprints/multi_node_kubernetes_cluster/teardown_kubernetes_cluster/teardown_kubernetes_cluster.py | blueprints/multi_node_kubernetes_cluster/teardown_kubernetes_cluster/teardown_kubernetes_cluster.py | from common.methods import set_progress
from containerorchestrators.kuberneteshandler.models import Kubernetes
def run(job, *args, **kwargs):
resource = job.resource_set.first()
container_orchestrator = Kubernetes.objects.get(id=resource.container_orchestrator_id)
environment = container_orchestrator.environment_set.first()
container_orchestrator.delete()
environment.delete() | from common.methods import set_progress
from containerorchestrators.kuberneteshandler.models import Kubernetes
from utilities.run_command import execute_command
def run(job, *args, **kwargs):
resource = job.resource_set.first()
container_orchestrator = Kubernetes.objects.get(id=resource.container_orchestrator_id)
environment = container_orchestrator.environment_set.first()
container_orchestrator.delete()
environment.delete()
resource_dir = '/var/opt/cloudbolt/kubernetes/resource-{}'.format(resource_id)
execute_command('rm -rf {}'.format(RESOURCE_LOCATION))
| Remove config files from filesystem on teardown | Remove config files from filesystem on teardown
[DEV-13843]
| Python | apache-2.0 | CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge | from common.methods import set_progress
from containerorchestrators.kuberneteshandler.models import Kubernetes
+ from utilities.run_command import execute_command
def run(job, *args, **kwargs):
resource = job.resource_set.first()
container_orchestrator = Kubernetes.objects.get(id=resource.container_orchestrator_id)
environment = container_orchestrator.environment_set.first()
container_orchestrator.delete()
environment.delete()
+
+ resource_dir = '/var/opt/cloudbolt/kubernetes/resource-{}'.format(resource_id)
+ execute_command('rm -rf {}'.format(RESOURCE_LOCATION))
+ | Remove config files from filesystem on teardown | ## Code Before:
from common.methods import set_progress
from containerorchestrators.kuberneteshandler.models import Kubernetes
def run(job, *args, **kwargs):
resource = job.resource_set.first()
container_orchestrator = Kubernetes.objects.get(id=resource.container_orchestrator_id)
environment = container_orchestrator.environment_set.first()
container_orchestrator.delete()
environment.delete()
## Instruction:
Remove config files from filesystem on teardown
## Code After:
from common.methods import set_progress
from containerorchestrators.kuberneteshandler.models import Kubernetes
from utilities.run_command import execute_command
def run(job, *args, **kwargs):
resource = job.resource_set.first()
container_orchestrator = Kubernetes.objects.get(id=resource.container_orchestrator_id)
environment = container_orchestrator.environment_set.first()
container_orchestrator.delete()
environment.delete()
resource_dir = '/var/opt/cloudbolt/kubernetes/resource-{}'.format(resource_id)
execute_command('rm -rf {}'.format(RESOURCE_LOCATION))
| # ... existing code ...
from containerorchestrators.kuberneteshandler.models import Kubernetes
from utilities.run_command import execute_command
# ... modified code ...
environment.delete()
resource_dir = '/var/opt/cloudbolt/kubernetes/resource-{}'.format(resource_id)
execute_command('rm -rf {}'.format(RESOURCE_LOCATION))
# ... rest of the code ... |
049e21dd2d4e90120bfe297696cffa5000028854 | dynd/benchmarks/benchmark_arithmetic.py | dynd/benchmarks/benchmark_arithmetic.py | import numpy as np
from dynd import nd, ndt
from benchrun import Benchmark, clock
class ArithemticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000]
def run(self, size):
a = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
b = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
start = clock()
a + b
stop = clock()
return stop - start
class NumPyArithemticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000]
def run(self, size):
a = np.random.uniform(size = size)
b = np.random.uniform(size = size)
start = clock()
a + b
stop = clock()
return stop - start
if __name__ == '__main__':
benchmark = ArithemticBenchmark()
benchmark.print_result()
benchmark = NumPyArithemticBenchmark()
benchmark.print_result() | import numpy as np
from dynd import nd, ndt
from benchrun import Benchmark, clock
class ArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
def run(self, size):
a = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
b = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
start = clock()
a + b
stop = clock()
return stop - start
class NumPyArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
def run(self, size):
a = np.random.uniform(size = size)
b = np.random.uniform(size = size)
start = clock()
a + b
stop = clock()
return stop - start
if __name__ == '__main__':
benchmark = ArithmeticBenchmark()
benchmark.print_result()
benchmark = NumPyArithmeticBenchmark()
benchmark.print_result()
| Add one bigger size to arithmetic benchmark | Add one bigger size to arithmetic benchmark
| Python | bsd-2-clause | michaelpacer/dynd-python,insertinterestingnamehere/dynd-python,pombredanne/dynd-python,pombredanne/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,izaid/dynd-python,michaelpacer/dynd-python,insertinterestingnamehere/dynd-python,izaid/dynd-python,mwiebe/dynd-python,izaid/dynd-python,mwiebe/dynd-python,pombredanne/dynd-python,cpcloud/dynd-python,insertinterestingnamehere/dynd-python,mwiebe/dynd-python,ContinuumIO/dynd-python,insertinterestingnamehere/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,pombredanne/dynd-python,izaid/dynd-python | import numpy as np
from dynd import nd, ndt
from benchrun import Benchmark, clock
- class ArithemticBenchmark(Benchmark):
+ class ArithmeticBenchmark(Benchmark):
parameters = ('size',)
- size = [100000, 10000000]
+ size = [100000, 10000000, 100000000]
def run(self, size):
a = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
b = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
start = clock()
a + b
stop = clock()
return stop - start
- class NumPyArithemticBenchmark(Benchmark):
+ class NumPyArithmeticBenchmark(Benchmark):
parameters = ('size',)
- size = [100000, 10000000]
+ size = [100000, 10000000, 100000000]
def run(self, size):
a = np.random.uniform(size = size)
b = np.random.uniform(size = size)
start = clock()
a + b
stop = clock()
return stop - start
if __name__ == '__main__':
- benchmark = ArithemticBenchmark()
+ benchmark = ArithmeticBenchmark()
benchmark.print_result()
- benchmark = NumPyArithemticBenchmark()
+ benchmark = NumPyArithmeticBenchmark()
benchmark.print_result()
+ | Add one bigger size to arithmetic benchmark | ## Code Before:
import numpy as np
from dynd import nd, ndt
from benchrun import Benchmark, clock
class ArithemticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000]
def run(self, size):
a = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
b = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
start = clock()
a + b
stop = clock()
return stop - start
class NumPyArithemticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000]
def run(self, size):
a = np.random.uniform(size = size)
b = np.random.uniform(size = size)
start = clock()
a + b
stop = clock()
return stop - start
if __name__ == '__main__':
benchmark = ArithemticBenchmark()
benchmark.print_result()
benchmark = NumPyArithemticBenchmark()
benchmark.print_result()
## Instruction:
Add one bigger size to arithmetic benchmark
## Code After:
import numpy as np
from dynd import nd, ndt
from benchrun import Benchmark, clock
class ArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
def run(self, size):
a = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
b = nd.uniform(dst_tp = ndt.type('{} * float64'.format(size)))
start = clock()
a + b
stop = clock()
return stop - start
class NumPyArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
def run(self, size):
a = np.random.uniform(size = size)
b = np.random.uniform(size = size)
start = clock()
a + b
stop = clock()
return stop - start
if __name__ == '__main__':
benchmark = ArithmeticBenchmark()
benchmark.print_result()
benchmark = NumPyArithmeticBenchmark()
benchmark.print_result()
| # ... existing code ...
class ArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
# ... modified code ...
class NumPyArithmeticBenchmark(Benchmark):
parameters = ('size',)
size = [100000, 10000000, 100000000]
...
if __name__ == '__main__':
benchmark = ArithmeticBenchmark()
benchmark.print_result()
...
benchmark = NumPyArithmeticBenchmark()
benchmark.print_result()
# ... rest of the code ... |
d656c0117e8487b8b56b4ee3caceb2dcb38ec198 | sympy/concrete/tests/test_gosper.py | sympy/concrete/tests/test_gosper.py | def test_normal():
pass
def test_gosper():
pass
| from sympy import Symbol, normal
from sympy.abc import n
def test_normal():
assert normal(4*n+5, 2*(4*n+1)*(2*n+3), n)
def test_gosper():
pass
| Add test for part of gosper's algorithm. | Add test for part of gosper's algorithm.
| Python | bsd-3-clause | abhiii5459/sympy,mafiya69/sympy,atreyv/sympy,wanglongqi/sympy,pandeyadarsh/sympy,liangjiaxing/sympy,srjoglekar246/sympy,Sumith1896/sympy,bukzor/sympy,atsao72/sympy,sunny94/temp,moble/sympy,cccfran/sympy,yashsharan/sympy,drufat/sympy,maniteja123/sympy,AunShiLord/sympy,shikil/sympy,pandeyadarsh/sympy,Davidjohnwilson/sympy,beni55/sympy,kumarkrishna/sympy,jerli/sympy,ga7g08/sympy,hargup/sympy,atsao72/sympy,madan96/sympy,emon10005/sympy,Curious72/sympy,maniteja123/sympy,lindsayad/sympy,amitjamadagni/sympy,toolforger/sympy,garvitr/sympy,ga7g08/sympy,mafiya69/sympy,kevalds51/sympy,kaushik94/sympy,Vishluck/sympy,Titan-C/sympy,oliverlee/sympy,Titan-C/sympy,iamutkarshtiwari/sympy,mcdaniel67/sympy,lidavidm/sympy,MridulS/sympy,Curious72/sympy,mafiya69/sympy,abloomston/sympy,debugger22/sympy,oliverlee/sympy,asm666/sympy,kaichogami/sympy,shikil/sympy,saurabhjn76/sympy,ga7g08/sympy,pbrady/sympy,chaffra/sympy,farhaanbukhsh/sympy,saurabhjn76/sympy,shipci/sympy,sahilshekhawat/sympy,Designist/sympy,liangjiaxing/sympy,liangjiaxing/sympy,Designist/sympy,cccfran/sympy,meghana1995/sympy,flacjacket/sympy,abloomston/sympy,asm666/sympy,yukoba/sympy,garvitr/sympy,chaffra/sympy,cswiercz/sympy,vipulroxx/sympy,sunny94/temp,toolforger/sympy,AkademieOlympia/sympy,sahilshekhawat/sympy,jerli/sympy,sampadsaha5/sympy,Titan-C/sympy,debugger22/sympy,yashsharan/sympy,Sumith1896/sympy,postvakje/sympy,maniteja123/sympy,Arafatk/sympy,pbrady/sympy,kumarkrishna/sympy,Gadal/sympy,VaibhavAgarwalVA/sympy,aktech/sympy,jamesblunt/sympy,kevalds51/sympy,sahilshekhawat/sympy,dqnykamp/sympy,drufat/sympy,sunny94/temp,wanglongqi/sympy,jaimahajan1997/sympy,pandeyadarsh/sympy,Arafatk/sympy,beni55/sympy,vipulroxx/sympy,AkademieOlympia/sympy,beni55/sympy,emon10005/sympy,farhaanbukhsh/sympy,MridulS/sympy,asm666/sympy,sampadsaha5/sympy,cccfran/sympy,Shaswat27/sympy,ChristinaZografou/sympy,MechCoder/sympy,wanglongqi/sympy,postvakje/sympy,ahhda/sympy,souravsingh/sympy,sahmed95/sympy,vipulroxx/sympy,lidavidm/sympy,kaichogami/sympy,hrashk/sympy,meghana1995/sympy,sampadsaha5/sympy,shipci/sympy,debugger22/sympy,shikil/sympy,minrk/sympy,cswiercz/sympy,hargup/sympy,jbbskinny/sympy,grevutiu-gabriel/sympy,diofant/diofant,mcdaniel67/sympy,kevalds51/sympy,hrashk/sympy,skidzo/sympy,atreyv/sympy,kumarkrishna/sympy,cswiercz/sympy,madan96/sympy,drufat/sympy,bukzor/sympy,Gadal/sympy,madan96/sympy,postvakje/sympy,wyom/sympy,kaushik94/sympy,kmacinnis/sympy,souravsingh/sympy,skirpichev/omg,rahuldan/sympy,atsao72/sympy,dqnykamp/sympy,souravsingh/sympy,rahuldan/sympy,kaushik94/sympy,farhaanbukhsh/sympy,Shaswat27/sympy,hrashk/sympy,Gadal/sympy,skidzo/sympy,iamutkarshtiwari/sympy,AunShiLord/sympy,wyom/sympy,Davidjohnwilson/sympy,abloomston/sympy,oliverlee/sympy,Mitchkoens/sympy,minrk/sympy,Vishluck/sympy,shipci/sympy,Arafatk/sympy,garvitr/sympy,saurabhjn76/sympy,Vishluck/sympy,grevutiu-gabriel/sympy,VaibhavAgarwalVA/sympy,kaichogami/sympy,jaimahajan1997/sympy,skidzo/sympy,iamutkarshtiwari/sympy,moble/sympy,Mitchkoens/sympy,kmacinnis/sympy,ahhda/sympy,MridulS/sympy,hargup/sympy,jbbskinny/sympy,jamesblunt/sympy,jamesblunt/sympy,meghana1995/sympy,ahhda/sympy,kmacinnis/sympy,jaimahajan1997/sympy,MechCoder/sympy,VaibhavAgarwalVA/sympy,yashsharan/sympy,Designist/sympy,pbrady/sympy,dqnykamp/sympy,yukoba/sympy,atreyv/sympy,aktech/sympy,AunShiLord/sympy,Curious72/sympy,ChristinaZografou/sympy,abhiii5459/sympy,emon10005/sympy,rahuldan/sympy,sahmed95/sympy,moble/sympy,bukzor/sympy,mcdaniel67/sympy,AkademieOlympia/sympy,grevutiu-gabriel/sympy,MechCoder/sympy,abhiii5459/sympy,jbbskinny/sympy,Sumith1896/sympy,jerli/sympy,Davidjohnwilson/sympy,lindsayad/sympy,sahmed95/sympy,Mitchkoens/sympy,ChristinaZografou/sympy,wyom/sympy,toolforger/sympy,yukoba/sympy,lidavidm/sympy,lindsayad/sympy,Shaswat27/sympy,aktech/sympy,amitjamadagni/sympy,chaffra/sympy | + from sympy import Symbol, normal
+ from sympy.abc import n
+
def test_normal():
- pass
+ assert normal(4*n+5, 2*(4*n+1)*(2*n+3), n)
def test_gosper():
pass
| Add test for part of gosper's algorithm. | ## Code Before:
def test_normal():
pass
def test_gosper():
pass
## Instruction:
Add test for part of gosper's algorithm.
## Code After:
from sympy import Symbol, normal
from sympy.abc import n
def test_normal():
assert normal(4*n+5, 2*(4*n+1)*(2*n+3), n)
def test_gosper():
pass
| // ... existing code ...
from sympy import Symbol, normal
from sympy.abc import n
def test_normal():
assert normal(4*n+5, 2*(4*n+1)*(2*n+3), n)
// ... rest of the code ... |
09c0c2302460c6e32419f640b341c4b968d4227a | opendebates/tests/test_context_processors.py | opendebates/tests/test_context_processors.py | import urlparse
from django.test import TestCase, override_settings
from django.conf import settings
from mock import patch, Mock
from opendebates.context_processors import global_vars
from opendebates.tests.factories import SubmissionFactory
class NumberOfVotesTest(TestCase):
def test_number_of_votes(self):
mock_request = Mock()
with patch('opendebates.utils.cache') as mock_cache:
mock_cache.get.return_value = 2
context = global_vars(mock_request)
self.assertEqual(2, int(context['NUMBER_OF_VOTES']))
class ThemeTests(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
@override_settings(SITE_THEME={'HASHTAG': 'TestHastag'})
def test_email_url(self):
settings.SITE_THEME['HASHTAG'] = 'TestHashtag'
email_url = self.idea.email_url()
fields = urlparse.parse_qs(urlparse.urlparse(email_url).query)
self.assertTrue('subject' in fields, fields)
self.assertTrue('#TestHashtag' in fields['subject'][0], fields['subject'][0])
| import urlparse
from django.test import TestCase, override_settings
from django.conf import settings
from mock import patch, Mock
from opendebates.context_processors import global_vars
from opendebates.tests.factories import SubmissionFactory
class NumberOfVotesTest(TestCase):
def test_number_of_votes(self):
mock_request = Mock()
with patch('opendebates.utils.cache') as mock_cache:
mock_cache.get.return_value = 2
context = global_vars(mock_request)
self.assertEqual(2, int(context['NUMBER_OF_VOTES']))
class ThemeTests(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
@override_settings(SITE_THEME={'HASHTAG': 'TestHashtag'})
def test_email_url(self):
email_url = self.idea.email_url()
fields = urlparse.parse_qs(urlparse.urlparse(email_url).query)
self.assertTrue('subject' in fields, fields)
self.assertTrue('#TestHashtag' in fields['subject'][0], fields['subject'][0])
| Fix type in overridden setting | Fix type in overridden setting
| Python | apache-2.0 | caktus/django-opendebates,ejucovy/django-opendebates,ejucovy/django-opendebates,caktus/django-opendebates,ejucovy/django-opendebates,caktus/django-opendebates,ejucovy/django-opendebates,caktus/django-opendebates | import urlparse
from django.test import TestCase, override_settings
from django.conf import settings
from mock import patch, Mock
from opendebates.context_processors import global_vars
from opendebates.tests.factories import SubmissionFactory
class NumberOfVotesTest(TestCase):
def test_number_of_votes(self):
mock_request = Mock()
with patch('opendebates.utils.cache') as mock_cache:
mock_cache.get.return_value = 2
context = global_vars(mock_request)
self.assertEqual(2, int(context['NUMBER_OF_VOTES']))
class ThemeTests(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
- @override_settings(SITE_THEME={'HASHTAG': 'TestHastag'})
+ @override_settings(SITE_THEME={'HASHTAG': 'TestHashtag'})
def test_email_url(self):
- settings.SITE_THEME['HASHTAG'] = 'TestHashtag'
email_url = self.idea.email_url()
fields = urlparse.parse_qs(urlparse.urlparse(email_url).query)
self.assertTrue('subject' in fields, fields)
self.assertTrue('#TestHashtag' in fields['subject'][0], fields['subject'][0])
| Fix type in overridden setting | ## Code Before:
import urlparse
from django.test import TestCase, override_settings
from django.conf import settings
from mock import patch, Mock
from opendebates.context_processors import global_vars
from opendebates.tests.factories import SubmissionFactory
class NumberOfVotesTest(TestCase):
def test_number_of_votes(self):
mock_request = Mock()
with patch('opendebates.utils.cache') as mock_cache:
mock_cache.get.return_value = 2
context = global_vars(mock_request)
self.assertEqual(2, int(context['NUMBER_OF_VOTES']))
class ThemeTests(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
@override_settings(SITE_THEME={'HASHTAG': 'TestHastag'})
def test_email_url(self):
settings.SITE_THEME['HASHTAG'] = 'TestHashtag'
email_url = self.idea.email_url()
fields = urlparse.parse_qs(urlparse.urlparse(email_url).query)
self.assertTrue('subject' in fields, fields)
self.assertTrue('#TestHashtag' in fields['subject'][0], fields['subject'][0])
## Instruction:
Fix type in overridden setting
## Code After:
import urlparse
from django.test import TestCase, override_settings
from django.conf import settings
from mock import patch, Mock
from opendebates.context_processors import global_vars
from opendebates.tests.factories import SubmissionFactory
class NumberOfVotesTest(TestCase):
def test_number_of_votes(self):
mock_request = Mock()
with patch('opendebates.utils.cache') as mock_cache:
mock_cache.get.return_value = 2
context = global_vars(mock_request)
self.assertEqual(2, int(context['NUMBER_OF_VOTES']))
class ThemeTests(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
@override_settings(SITE_THEME={'HASHTAG': 'TestHashtag'})
def test_email_url(self):
email_url = self.idea.email_url()
fields = urlparse.parse_qs(urlparse.urlparse(email_url).query)
self.assertTrue('subject' in fields, fields)
self.assertTrue('#TestHashtag' in fields['subject'][0], fields['subject'][0])
| # ... existing code ...
@override_settings(SITE_THEME={'HASHTAG': 'TestHashtag'})
def test_email_url(self):
email_url = self.idea.email_url()
# ... rest of the code ... |
fc203d643aa9a69c835aebee0de9b17851ef7a58 | compose/cli/docker_client.py | compose/cli/docker_client.py | from docker import Client
from docker import tls
import ssl
import os
def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(base_url=base_url, tls=tls_config, version='1.18', timeout=timeout)
| from docker import Client
from docker import tls
import ssl
import os
def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
api_version = os.environ.get('COMPOSE_API_VERSION', '1.18')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout)
| Allow API version specification via env var | Allow API version specification via env var
Hard-coding the API version to '1.18' with the docker-py constructor will
cause the docker-py logic at
https://github.com/docker/docker-py/blob/master/docker/client.py#L143-L146
to always fail, which will cause authentication issues if you're using a
remote daemon using API version 1.19 - regardless of the API version of
the registry.
Allow the user to set the API version via an environment variable. If
the variable is not present, it will still default to '1.18' like it
does today.
Signed-off-by: Reilly Herrewig-Pope <feddfa1b1b567537c5d9b5bb606cac73059ea316@mandiant.com>
| Python | apache-2.0 | jeanpralo/compose,shubheksha/docker.github.io,saada/compose,talolard/compose,joaofnfernandes/docker.github.io,ionrock/compose,iamluc/compose,goloveychuk/compose,vdemeester/compose,menglingwei/denverdino.github.io,qzio/compose,d2bit/compose,docker/docker.github.io,GM-Alex/compose,bdwill/docker.github.io,anweiss/docker.github.io,KalleDK/compose,londoncalling/docker.github.io,jiekechoo/compose,londoncalling/docker.github.io,cgvarela/compose,cgvarela/compose,albers/compose,thaJeztah/compose,gdevillele/docker.github.io,bbirand/compose,au-phiware/compose,ph-One/compose,denverdino/docker.github.io,alexisbellido/docker.github.io,moxiegirl/compose,twitherspoon/compose,mchasal/compose,denverdino/docker.github.io,unodba/compose,vlajos/compose,lmesz/compose,mrfuxi/compose,troy0820/docker.github.io,mdaue/compose,LuisBosquez/docker.github.io,troy0820/docker.github.io,artemkaint/compose,gtrdotmcs/compose,joaofnfernandes/docker.github.io,denverdino/docker.github.io,phiroict/docker,jzwlqx/denverdino.github.io,simonista/compose,jrabbit/compose,docker/docker.github.io,rgbkrk/compose,ionrock/compose,TheDataShed/compose,jorgeLuizChaves/compose,menglingwei/denverdino.github.io,dbdd4us/compose,tiry/compose,shin-/docker.github.io,denverdino/denverdino.github.io,charleswhchan/compose,hoogenm/compose,alexisbellido/docker.github.io,KevinGreene/compose,artemkaint/compose,johnstep/docker.github.io,gdevillele/docker.github.io,danix800/docker.github.io,talolard/compose,d2bit/compose,ggtools/compose,kojiromike/compose,alexandrev/compose,jzwlqx/denverdino.github.io,jeanpralo/compose,funkyfuture/docker-compose,dopry/compose,mnuessler/compose,troy0820/docker.github.io,lmesz/compose,saada/compose,thaJeztah/docker.github.io,alexandrev/compose,sanscontext/docker.github.io,thaJeztah/docker.github.io,mbailey/compose,swoopla/compose,shubheksha/docker.github.io,shin-/compose,aanand/fig,TomasTomecek/compose,screwgoth/compose,joeuo/docker.github.io,dilgerma/compose,VinceBarresi/compose,LuisBosquez/docker.github.io,joaofnfernandes/docker.github.io,denverdino/denverdino.github.io,Dakno/compose,JimGalasyn/docker.github.io,bbirand/compose,TomasTomecek/compose,anweiss/docker.github.io,aanand/fig,browning/compose,mohitsoni/compose,shubheksha/docker.github.io,joaofnfernandes/docker.github.io,viranch/compose,thaJeztah/docker.github.io,BSWANG/denverdino.github.io,joaofnfernandes/docker.github.io,mohitsoni/compose,mnowster/compose,JimGalasyn/docker.github.io,bdwill/docker.github.io,alexisbellido/docker.github.io,BSWANG/denverdino.github.io,michael-k/docker-compose,jonaseck2/compose,simonista/compose,JimGalasyn/docker.github.io,andrewgee/compose,docker-zh/docker.github.io,joeuo/docker.github.io,unodba/compose,mrfuxi/compose,genki/compose,bdwill/docker.github.io,runcom/compose,docker-zh/docker.github.io,sdurrheimer/compose,JimGalasyn/docker.github.io,menglingwei/denverdino.github.io,aduermael/docker.github.io,thaJeztah/compose,andrewgee/compose,dbdd4us/compose,GM-Alex/compose,alunduil/fig,anweiss/docker.github.io,denverdino/denverdino.github.io,mchasal/compose,johnstep/docker.github.io,dnephin/compose,ggtools/compose,BSWANG/denverdino.github.io,aduermael/docker.github.io,browning/compose,bsmr-docker/compose,swoopla/compose,danix800/docker.github.io,mindaugasrukas/compose,jiekechoo/compose,rillig/docker.github.io,bdwill/docker.github.io,ZJaffee/compose,denverdino/denverdino.github.io,dilgerma/compose,docker/docker.github.io,docker-zh/docker.github.io,MSakamaki/compose,JimGalasyn/docker.github.io,denverdino/docker.github.io,danix800/docker.github.io,j-fuentes/compose,londoncalling/docker.github.io,jzwlqx/denverdino.github.io,KevinGreene/compose,dopry/compose,vlajos/compose,jzwlqx/denverdino.github.io,menglingwei/denverdino.github.io,johnstep/docker.github.io,danix800/docker.github.io,tangkun75/compose,mbailey/compose,jonaseck2/compose,anweiss/docker.github.io,denverdino/compose,Dakno/compose,alunduil/fig,kikkomep/compose,londoncalling/docker.github.io,josephpage/compose,jrabbit/compose,xydinesh/compose,dockerhn/compose,docker/docker.github.io,viranch/compose,VinceBarresi/compose,denverdino/docker.github.io,londoncalling/docker.github.io,aduermael/docker.github.io,j-fuentes/compose,charleswhchan/compose,rgbkrk/compose,shin-/compose,denverdino/compose,sanscontext/docker.github.io,goloveychuk/compose,docker-zh/docker.github.io,anweiss/docker.github.io,mark-adams/compose,bsmr-docker/compose,bdwill/docker.github.io,kojiromike/compose,TheDataShed/compose,qzio/compose,mnowster/compose,screwgoth/compose,rillig/docker.github.io,mark-adams/compose,sanscontext/docker.github.io,au-phiware/compose,tangkun75/compose,shubheksha/docker.github.io,gdevillele/docker.github.io,thaJeztah/docker.github.io,nhumrich/compose,johnstep/docker.github.io,iamluc/compose,shin-/docker.github.io,runcom/compose,ChrisChinchilla/compose,moxiegirl/compose,alexisbellido/docker.github.io,jorgeLuizChaves/compose,genki/compose,tiry/compose,denverdino/denverdino.github.io,phiroict/docker,joeuo/docker.github.io,docker-zh/docker.github.io,rillig/docker.github.io,BSWANG/denverdino.github.io,phiroict/docker,phiroict/docker,ph-One/compose,ChrisChinchilla/compose,johnstep/docker.github.io,gdevillele/docker.github.io,vdemeester/compose,shin-/docker.github.io,shin-/docker.github.io,kikkomep/compose,mdaue/compose,dnephin/compose,aduermael/docker.github.io,shin-/docker.github.io,joeuo/docker.github.io,KalleDK/compose,joeuo/docker.github.io,LuisBosquez/docker.github.io,jzwlqx/denverdino.github.io,menglingwei/denverdino.github.io,docker/docker.github.io,funkyfuture/docker-compose,twitherspoon/compose,sanscontext/docker.github.io,alexisbellido/docker.github.io,gdevillele/docker.github.io,amitsaha/compose,schmunk42/compose,troy0820/docker.github.io,gtrdotmcs/compose,mindaugasrukas/compose,hoogenm/compose,mnuessler/compose,josephpage/compose,LuisBosquez/docker.github.io,sanscontext/docker.github.io,xydinesh/compose,rillig/docker.github.io,albers/compose,phiroict/docker,ZJaffee/compose,amitsaha/compose,shubheksha/docker.github.io,dockerhn/compose,nhumrich/compose,thaJeztah/docker.github.io,LuisBosquez/docker.github.io,michael-k/docker-compose,schmunk42/compose,sdurrheimer/compose,MSakamaki/compose,BSWANG/denverdino.github.io | from docker import Client
from docker import tls
import ssl
import os
def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
+ api_version = os.environ.get('COMPOSE_API_VERSION', '1.18')
+
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
- return Client(base_url=base_url, tls=tls_config, version='1.18', timeout=timeout)
+ return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout)
| Allow API version specification via env var | ## Code Before:
from docker import Client
from docker import tls
import ssl
import os
def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(base_url=base_url, tls=tls_config, version='1.18', timeout=timeout)
## Instruction:
Allow API version specification via env var
## Code After:
from docker import Client
from docker import tls
import ssl
import os
def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
api_version = os.environ.get('COMPOSE_API_VERSION', '1.18')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout)
| ...
base_url = os.environ.get('DOCKER_HOST')
api_version = os.environ.get('COMPOSE_API_VERSION', '1.18')
tls_config = None
...
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout)
... |
e42d38f9ad3f8b5229c9618e4dd9d6b371de89c5 | test/test_am_bmi.py | test/test_am_bmi.py | import unittest
import utils
import os
import sys
import shutil
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import cryptosite.am_bmi
class Tests(unittest.TestCase):
def test_get_sas(self):
"""Test get_sas() function"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input', 'test.pdb'),
'1abc.pdb')
out = cryptosite.am_bmi.get_sas('1abc', 1.4)
self.assertEqual(len(out), 60)
if __name__ == '__main__':
unittest.main()
| import unittest
import utils
import os
import sys
import shutil
import subprocess
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import cryptosite.am_bmi
class Tests(unittest.TestCase):
def test_get_sas(self):
"""Test get_sas() function"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input', 'test.pdb'),
'1abc.pdb')
out = cryptosite.am_bmi.get_sas('1abc', 1.4)
self.assertEqual(len(out), 60)
def test_main(self):
"""Test simple complete run of am_bmi"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input',
'pm.pdb.B10010001.pdb'), '.')
with open('SnapList.txt', 'w') as fh:
fh.write("pm.pdb.B10010001.pdb -100.0\n")
fh.write("high-energy.pdb -10.0\n")
subprocess.check_call(['cryptosite', 'am_bmi'])
with open('am_features.out') as fh:
lines = sorted(fh.readlines())
self.assertEqual(len(lines), 12)
self.assertEqual(lines[0], 'ALA\t1\tA\t17.328\t12.02\t32.6\t48.0\n')
if __name__ == '__main__':
unittest.main()
| Test simple complete run of am_bmi. | Test simple complete run of am_bmi.
| Python | lgpl-2.1 | salilab/cryptosite,salilab/cryptosite,salilab/cryptosite | import unittest
import utils
import os
import sys
import shutil
+ import subprocess
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ utils.set_search_paths(TOPDIR)
- sys.path.append(os.path.join(TOPDIR, 'lib'))
- sys.path.append(TOPDIR)
import cryptosite.am_bmi
class Tests(unittest.TestCase):
def test_get_sas(self):
"""Test get_sas() function"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input', 'test.pdb'),
'1abc.pdb')
out = cryptosite.am_bmi.get_sas('1abc', 1.4)
self.assertEqual(len(out), 60)
+ def test_main(self):
+ """Test simple complete run of am_bmi"""
+ with utils.temporary_working_directory() as tmpdir:
+ shutil.copy(os.path.join(TOPDIR, 'test', 'input',
+ 'pm.pdb.B10010001.pdb'), '.')
+ with open('SnapList.txt', 'w') as fh:
+ fh.write("pm.pdb.B10010001.pdb -100.0\n")
+ fh.write("high-energy.pdb -10.0\n")
+ subprocess.check_call(['cryptosite', 'am_bmi'])
+ with open('am_features.out') as fh:
+ lines = sorted(fh.readlines())
+ self.assertEqual(len(lines), 12)
+ self.assertEqual(lines[0], 'ALA\t1\tA\t17.328\t12.02\t32.6\t48.0\n')
+
if __name__ == '__main__':
unittest.main()
| Test simple complete run of am_bmi. | ## Code Before:
import unittest
import utils
import os
import sys
import shutil
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import cryptosite.am_bmi
class Tests(unittest.TestCase):
def test_get_sas(self):
"""Test get_sas() function"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input', 'test.pdb'),
'1abc.pdb')
out = cryptosite.am_bmi.get_sas('1abc', 1.4)
self.assertEqual(len(out), 60)
if __name__ == '__main__':
unittest.main()
## Instruction:
Test simple complete run of am_bmi.
## Code After:
import unittest
import utils
import os
import sys
import shutil
import subprocess
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import cryptosite.am_bmi
class Tests(unittest.TestCase):
def test_get_sas(self):
"""Test get_sas() function"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input', 'test.pdb'),
'1abc.pdb')
out = cryptosite.am_bmi.get_sas('1abc', 1.4)
self.assertEqual(len(out), 60)
def test_main(self):
"""Test simple complete run of am_bmi"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input',
'pm.pdb.B10010001.pdb'), '.')
with open('SnapList.txt', 'w') as fh:
fh.write("pm.pdb.B10010001.pdb -100.0\n")
fh.write("high-energy.pdb -10.0\n")
subprocess.check_call(['cryptosite', 'am_bmi'])
with open('am_features.out') as fh:
lines = sorted(fh.readlines())
self.assertEqual(len(lines), 12)
self.assertEqual(lines[0], 'ALA\t1\tA\t17.328\t12.02\t32.6\t48.0\n')
if __name__ == '__main__':
unittest.main()
| # ... existing code ...
import shutil
import subprocess
# ... modified code ...
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import cryptosite.am_bmi
...
def test_main(self):
"""Test simple complete run of am_bmi"""
with utils.temporary_working_directory() as tmpdir:
shutil.copy(os.path.join(TOPDIR, 'test', 'input',
'pm.pdb.B10010001.pdb'), '.')
with open('SnapList.txt', 'w') as fh:
fh.write("pm.pdb.B10010001.pdb -100.0\n")
fh.write("high-energy.pdb -10.0\n")
subprocess.check_call(['cryptosite', 'am_bmi'])
with open('am_features.out') as fh:
lines = sorted(fh.readlines())
self.assertEqual(len(lines), 12)
self.assertEqual(lines[0], 'ALA\t1\tA\t17.328\t12.02\t32.6\t48.0\n')
if __name__ == '__main__':
# ... rest of the code ... |
fbad1649e9939a3be4194e0d508ff5889f48bb6f | unleash/plugins/utils_assign.py | unleash/plugins/utils_assign.py | import re
# regular expression for finding assignments
_quotes = "['|\"|\"\"\"]"
BASE_ASSIGN_PATTERN = r'({}\s*=\s*[ubr]?' + _quotes + r')(.*?)(' +\
_quotes + r')'
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise ValueError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise ValueError('No version assignment ("{}") found.'.format(varname))
return ASSIGN_RE.search(data).group(2)
def replace_assign(data, varname, new_value):
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
def repl(m):
return m.group(1) + new_value + m.group(3)
return ASSIGN_RE.sub(repl, data)
| from unleash.exc import PluginError
import re
# regular expression for finding assignments
_quotes = "['|\"|\"\"\"]"
BASE_ASSIGN_PATTERN = r'({}\s*=\s*[ubr]?' + _quotes + r')(.*?)(' +\
_quotes + r')'
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2)
def replace_assign(data, varname, new_value):
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
def repl(m):
return m.group(1) + new_value + m.group(3)
return ASSIGN_RE.sub(repl, data)
| Raise PluginErrors instead of ValueErrors in versions. | Raise PluginErrors instead of ValueErrors in versions.
| Python | mit | mbr/unleash | + from unleash.exc import PluginError
+
import re
# regular expression for finding assignments
_quotes = "['|\"|\"\"\"]"
BASE_ASSIGN_PATTERN = r'({}\s*=\s*[ubr]?' + _quotes + r')(.*?)(' +\
_quotes + r')'
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
- raise ValueError('Found multiple {}-strings.'.format(varname))
+ raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
- raise ValueError('No version assignment ("{}") found.'.format(varname))
+ raise PluginError('No version assignment ("{}") found.'
+ .format(varname))
return ASSIGN_RE.search(data).group(2)
def replace_assign(data, varname, new_value):
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
def repl(m):
return m.group(1) + new_value + m.group(3)
return ASSIGN_RE.sub(repl, data)
| Raise PluginErrors instead of ValueErrors in versions. | ## Code Before:
import re
# regular expression for finding assignments
_quotes = "['|\"|\"\"\"]"
BASE_ASSIGN_PATTERN = r'({}\s*=\s*[ubr]?' + _quotes + r')(.*?)(' +\
_quotes + r')'
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise ValueError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise ValueError('No version assignment ("{}") found.'.format(varname))
return ASSIGN_RE.search(data).group(2)
def replace_assign(data, varname, new_value):
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
def repl(m):
return m.group(1) + new_value + m.group(3)
return ASSIGN_RE.sub(repl, data)
## Instruction:
Raise PluginErrors instead of ValueErrors in versions.
## Code After:
from unleash.exc import PluginError
import re
# regular expression for finding assignments
_quotes = "['|\"|\"\"\"]"
BASE_ASSIGN_PATTERN = r'({}\s*=\s*[ubr]?' + _quotes + r')(.*?)(' +\
_quotes + r')'
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2)
def replace_assign(data, varname, new_value):
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
def repl(m):
return m.group(1) + new_value + m.group(3)
return ASSIGN_RE.sub(repl, data)
| # ... existing code ...
from unleash.exc import PluginError
import re
# ... modified code ...
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
...
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
# ... rest of the code ... |
c3f94790e8d4d7bca68eb86d1172c9f69f1c070c | tests/support.py | tests/support.py | import os
def open_file(filename):
''' Load a file from the fixtures directory. '''
path = 'fixtures/' + filename
if ('tests' in os.listdir('.')):
path = 'tests/' + path
return open(path, mode='rb')
| import os
def open_file(filename, mode='rb'):
''' Load a file from the fixtures directory. '''
path = 'fixtures/' + filename
if ('tests' in os.listdir('.')):
path = 'tests/' + path
return open(path, mode=mode)
| Support opening files as text streams on tests | Support opening files as text streams on tests
| Python | mit | jaraco/ofxparse,rdsteed/ofxparse,udibr/ofxparse,jseutter/ofxparse | import os
- def open_file(filename):
+ def open_file(filename, mode='rb'):
''' Load a file from the fixtures directory. '''
path = 'fixtures/' + filename
if ('tests' in os.listdir('.')):
path = 'tests/' + path
- return open(path, mode='rb')
+ return open(path, mode=mode)
| Support opening files as text streams on tests | ## Code Before:
import os
def open_file(filename):
''' Load a file from the fixtures directory. '''
path = 'fixtures/' + filename
if ('tests' in os.listdir('.')):
path = 'tests/' + path
return open(path, mode='rb')
## Instruction:
Support opening files as text streams on tests
## Code After:
import os
def open_file(filename, mode='rb'):
''' Load a file from the fixtures directory. '''
path = 'fixtures/' + filename
if ('tests' in os.listdir('.')):
path = 'tests/' + path
return open(path, mode=mode)
| // ... existing code ...
def open_file(filename, mode='rb'):
''' Load a file from the fixtures directory. '''
// ... modified code ...
path = 'tests/' + path
return open(path, mode=mode)
// ... rest of the code ... |
5c28e34a795f3dfd8eebdbeb2509525ce4195bba | subversion/bindings/swig/python/tests/core.py | subversion/bindings/swig/python/tests/core.py | import unittest, os
import svn.core
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary("text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary("image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, "this\nis\ninvalid\n")
svn.core.svn_mime_type_validate("unknown/but-valid; charset=utf8")
def suite():
return unittest.makeSuite(SubversionCoreTestCase, 'test')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| import unittest, os
import svn.core
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
self.assertEqual(svn.core.SubversionException('error message', 1).apr_err,
1)
self.assertEqual(svn.core.SubversionException('error message', 1).message,
'error message')
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary("text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary("image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, "this\nis\ninvalid\n")
svn.core.svn_mime_type_validate("unknown/but-valid; charset=utf8")
def suite():
return unittest.makeSuite(SubversionCoreTestCase, 'test')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| Add a regression test for the bug fixed in r28485. | Add a regression test for the bug fixed in r28485.
* subversion/bindings/swig/python/tests/core.py
(SubversionCoreTestCase.test_SubversionException): Test explicit
exception fields.
| Python | apache-2.0 | jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion | import unittest, os
import svn.core
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
+ self.assertEqual(svn.core.SubversionException('error message', 1).apr_err,
+ 1)
+ self.assertEqual(svn.core.SubversionException('error message', 1).message,
+ 'error message')
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary("text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary("image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, "this\nis\ninvalid\n")
svn.core.svn_mime_type_validate("unknown/but-valid; charset=utf8")
def suite():
return unittest.makeSuite(SubversionCoreTestCase, 'test')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| Add a regression test for the bug fixed in r28485. | ## Code Before:
import unittest, os
import svn.core
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary("text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary("image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, "this\nis\ninvalid\n")
svn.core.svn_mime_type_validate("unknown/but-valid; charset=utf8")
def suite():
return unittest.makeSuite(SubversionCoreTestCase, 'test')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
## Instruction:
Add a regression test for the bug fixed in r28485.
## Code After:
import unittest, os
import svn.core
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
self.assertEqual(svn.core.SubversionException('error message', 1).apr_err,
1)
self.assertEqual(svn.core.SubversionException('error message', 1).message,
'error message')
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary("text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary("image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, "this\nis\ninvalid\n")
svn.core.svn_mime_type_validate("unknown/but-valid; charset=utf8")
def suite():
return unittest.makeSuite(SubversionCoreTestCase, 'test')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| ...
('error message', 1))
self.assertEqual(svn.core.SubversionException('error message', 1).apr_err,
1)
self.assertEqual(svn.core.SubversionException('error message', 1).message,
'error message')
... |
9674a0869c2a333f74178e305677259e7ac379c3 | examples/ignore_websocket.py | examples/ignore_websocket.py |
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
if flow.response.headers.get_first("Connection", None) == "Upgrade":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) |
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
value = flow.response.headers.get_first("Connection", None)
if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | Make the Websocket's connection header value case-insensitive | Make the Websocket's connection header value case-insensitive
| Python | mit | liorvh/mitmproxy,ccccccccccc/mitmproxy,dwfreed/mitmproxy,mhils/mitmproxy,ryoqun/mitmproxy,Kriechi/mitmproxy,azureplus/mitmproxy,dufferzafar/mitmproxy,ikoz/mitmproxy,jpic/mitmproxy,tfeagle/mitmproxy,rauburtin/mitmproxy,MatthewShao/mitmproxy,pombredanne/mitmproxy,pombredanne/mitmproxy,laurmurclar/mitmproxy,StevenVanAcker/mitmproxy,fimad/mitmproxy,elitest/mitmproxy,claimsmall/mitmproxy,ikoz/mitmproxy,bazzinotti/mitmproxy,liorvh/mitmproxy,zbuc/mitmproxy,devasia1000/mitmproxy,ikoz/mitmproxy,StevenVanAcker/mitmproxy,jvillacorta/mitmproxy,tdickers/mitmproxy,StevenVanAcker/mitmproxy,syjzwjj/mitmproxy,ryoqun/mitmproxy,Endika/mitmproxy,0xwindows/InfoLeak,devasia1000/mitmproxy,elitest/mitmproxy,ParthGanatra/mitmproxy,mitmproxy/mitmproxy,noikiy/mitmproxy,jvillacorta/mitmproxy,onlywade/mitmproxy,sethp-jive/mitmproxy,cortesi/mitmproxy,dweinstein/mitmproxy,azureplus/mitmproxy,dufferzafar/mitmproxy,Fuzion24/mitmproxy,ADemonisis/mitmproxy,noikiy/mitmproxy,scriptmediala/mitmproxy,macmantrl/mitmproxy,guiquanz/mitmproxy,gzzhanghao/mitmproxy,byt3bl33d3r/mitmproxy,cortesi/mitmproxy,owers19856/mitmproxy,tdickers/mitmproxy,devasia1000/mitmproxy,syjzwjj/mitmproxy,Endika/mitmproxy,ccccccccccc/mitmproxy,xbzbing/mitmproxy,ujjwal96/mitmproxy,elitest/mitmproxy,liorvh/mitmproxy,inscriptionweb/mitmproxy,inscriptionweb/mitmproxy,tekii/mitmproxy,guiquanz/mitmproxy,vhaupert/mitmproxy,mosajjal/mitmproxy,ADemonisis/mitmproxy,sethp-jive/mitmproxy,ddworken/mitmproxy,vhaupert/mitmproxy,tfeagle/mitmproxy,jpic/mitmproxy,fimad/mitmproxy,legendtang/mitmproxy,xbzbing/mitmproxy,ujjwal96/mitmproxy,ddworken/mitmproxy,Kriechi/mitmproxy,inscriptionweb/mitmproxy,azureplus/mitmproxy,pombredanne/mitmproxy,tfeagle/mitmproxy,legendtang/mitmproxy,byt3bl33d3r/mitmproxy,rauburtin/mitmproxy,Fuzion24/mitmproxy,gzzhanghao/mitmproxy,noikiy/mitmproxy,elitest/mitmproxy,mhils/mitmproxy,ParthGanatra/mitmproxy,mosajjal/mitmproxy,owers19856/mitmproxy,tekii/mitmproxy,cortesi/mitmproxy,macmantrl/mitmproxy,bazzinotti/mitmproxy,dxq-git/mitmproxy,mitmproxy/mitmproxy,jpic/mitmproxy,mosajjal/mitmproxy,mhils/mitmproxy,dweinstein/mitmproxy,fimad/mitmproxy,dxq-git/mitmproxy,xbzbing/mitmproxy,claimsmall/mitmproxy,dwfreed/mitmproxy,xaxa89/mitmproxy,vhaupert/mitmproxy,ujjwal96/mitmproxy,Endika/mitmproxy,ParthGanatra/mitmproxy,meizhoubao/mitmproxy,meizhoubao/mitmproxy,dweinstein/mitmproxy,mhils/mitmproxy,Fuzion24/mitmproxy,gzzhanghao/mitmproxy,azureplus/mitmproxy,dxq-git/mitmproxy,ddworken/mitmproxy,ADemonisis/mitmproxy,0xwindows/InfoLeak,dufferzafar/mitmproxy,zlorb/mitmproxy,tekii/mitmproxy,scriptmediala/mitmproxy,dwfreed/mitmproxy,zlorb/mitmproxy,bazzinotti/mitmproxy,StevenVanAcker/mitmproxy,syjzwjj/mitmproxy,ccccccccccc/mitmproxy,xbzbing/mitmproxy,syjzwjj/mitmproxy,Endika/mitmproxy,onlywade/mitmproxy,sethp-jive/mitmproxy,xaxa89/mitmproxy,xaxa89/mitmproxy,jpic/mitmproxy,guiquanz/mitmproxy,rauburtin/mitmproxy,jvillacorta/mitmproxy,owers19856/mitmproxy,ZeYt/mitmproxy,ZeYt/mitmproxy,zbuc/mitmproxy,zlorb/mitmproxy,Kriechi/mitmproxy,ZeYt/mitmproxy,Kriechi/mitmproxy,ZeYt/mitmproxy,ryoqun/mitmproxy,devasia1000/mitmproxy,claimsmall/mitmproxy,laurmurclar/mitmproxy,MatthewShao/mitmproxy,noikiy/mitmproxy,onlywade/mitmproxy,macmantrl/mitmproxy,scriptmediala/mitmproxy,mitmproxy/mitmproxy,zlorb/mitmproxy,mhils/mitmproxy,sethp-jive/mitmproxy,dxq-git/mitmproxy,MatthewShao/mitmproxy,mitmproxy/mitmproxy,tdickers/mitmproxy,legendtang/mitmproxy,laurmurclar/mitmproxy,macmantrl/mitmproxy,tfeagle/mitmproxy,byt3bl33d3r/mitmproxy,ujjwal96/mitmproxy,Fuzion24/mitmproxy,owers19856/mitmproxy,ikoz/mitmproxy,mosajjal/mitmproxy,vhaupert/mitmproxy,zbuc/mitmproxy,onlywade/mitmproxy,0xwindows/InfoLeak,mitmproxy/mitmproxy,inscriptionweb/mitmproxy,ParthGanatra/mitmproxy,0xwindows/InfoLeak,guiquanz/mitmproxy,byt3bl33d3r/mitmproxy,meizhoubao/mitmproxy,ryoqun/mitmproxy,legendtang/mitmproxy,tdickers/mitmproxy,laurmurclar/mitmproxy,cortesi/mitmproxy,liorvh/mitmproxy,jvillacorta/mitmproxy,dwfreed/mitmproxy,gzzhanghao/mitmproxy,scriptmediala/mitmproxy,dweinstein/mitmproxy,meizhoubao/mitmproxy,rauburtin/mitmproxy,ccccccccccc/mitmproxy,tekii/mitmproxy,bazzinotti/mitmproxy,zbuc/mitmproxy,pombredanne/mitmproxy,claimsmall/mitmproxy,ddworken/mitmproxy,xaxa89/mitmproxy,fimad/mitmproxy,dufferzafar/mitmproxy,ADemonisis/mitmproxy,MatthewShao/mitmproxy |
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
- if flow.response.headers.get_first("Connection", None) == "Upgrade":
+ value = flow.response.headers.get_first("Connection", None)
+ if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | Make the Websocket's connection header value case-insensitive | ## Code Before:
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
if flow.response.headers.get_first("Connection", None) == "Upgrade":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL)
## Instruction:
Make the Websocket's connection header value case-insensitive
## Code After:
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
value = flow.response.headers.get_first("Connection", None)
if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | ...
def response(context, flow):
value = flow.response.headers.get_first("Connection", None)
if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
... |
2e8f42c0b5eb018309d965b01659c496bc08a08b | quickstart/python/understand/example-1/update_initial_intent.6.x.py | quickstart/python/understand/example-1/update_initial_intent.6.x.py | from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
assistant_sid = 'UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Provide actions for your assistant: say something and listen for a repsonse.
update_action = {
'actions': [
{'say': 'Hi there, I\'m your virtual assistant! How can I help you?'},
{'listen': True}
]
}
# Update the default intent to use your new actions.
client.preview.understand \
.assistants(assistant_sid) \
.intents('hello-world') \
.intent_actions().update(update_action)
print("Intent actions updated")
| from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
# Provide actions for your assistant: say something and listen for a repsonse.
update_action = {
'actions': [
{'say': 'Hi there, I\'m your virtual assistant! How can I help you?'},
{'listen': True}
]
}
# Update the default intent to use your new actions.
# Replace 'UAXXX...' with your Assistant's unique SID https://www.twilio.com/console/assistant/list
client.preview.understand \
.assistants('UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.intents('hello-world') \
.intent_actions().update(update_action)
print("Intent actions updated")
| Update intent actions to use assistant SID inline | Update intent actions to use assistant SID inline
Maintaining consistency with the auto-generated code samples for Understand, which
don't allow for our variable-named placeholder values | Python | mit | TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets,TwilioDevEd/api-snippets | from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
-
- assistant_sid = 'UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Provide actions for your assistant: say something and listen for a repsonse.
update_action = {
'actions': [
{'say': 'Hi there, I\'m your virtual assistant! How can I help you?'},
{'listen': True}
]
}
# Update the default intent to use your new actions.
+ # Replace 'UAXXX...' with your Assistant's unique SID https://www.twilio.com/console/assistant/list
client.preview.understand \
- .assistants(assistant_sid) \
+ .assistants('UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.intents('hello-world') \
.intent_actions().update(update_action)
print("Intent actions updated")
| Update intent actions to use assistant SID inline | ## Code Before:
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
assistant_sid = 'UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Provide actions for your assistant: say something and listen for a repsonse.
update_action = {
'actions': [
{'say': 'Hi there, I\'m your virtual assistant! How can I help you?'},
{'listen': True}
]
}
# Update the default intent to use your new actions.
client.preview.understand \
.assistants(assistant_sid) \
.intents('hello-world') \
.intent_actions().update(update_action)
print("Intent actions updated")
## Instruction:
Update intent actions to use assistant SID inline
## Code After:
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
# Provide actions for your assistant: say something and listen for a repsonse.
update_action = {
'actions': [
{'say': 'Hi there, I\'m your virtual assistant! How can I help you?'},
{'listen': True}
]
}
# Update the default intent to use your new actions.
# Replace 'UAXXX...' with your Assistant's unique SID https://www.twilio.com/console/assistant/list
client.preview.understand \
.assistants('UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.intents('hello-world') \
.intent_actions().update(update_action)
print("Intent actions updated")
| ...
client = Client(account_sid, auth_token)
...
# Update the default intent to use your new actions.
# Replace 'UAXXX...' with your Assistant's unique SID https://www.twilio.com/console/assistant/list
client.preview.understand \
.assistants('UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.intents('hello-world') \
... |
b595e1be84159c27b9d9bb81bbd66b78e5c084ce | pyoommf/small_example.py | pyoommf/small_example.py | from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='small_example')
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
#sim.execute_mif()
| from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='small_example')
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
| Remove separate execute mif command. | Remove separate execute mif command.
| Python | bsd-2-clause | ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python | from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='small_example')
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
- #sim.execute_mif()
- | Remove separate execute mif command. | ## Code Before:
from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='small_example')
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
#sim.execute_mif()
## Instruction:
Remove separate execute mif command.
## Code After:
from sim import Sim
from mesh import Mesh
from exchange import Exchange
from demag import Demag
from zeeman import Zeeman
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='small_example')
# Add energies.
sim.add(Exchange(A))
sim.add(Demag())
sim.add(Zeeman(H))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
| ...
sim.run_until(t_sim)
... |
7619513d29c5f7ae886963ced70315d42dbd1a9b | ogbot/core/researcher.py | ogbot/core/researcher.py | from base import BaseBot
from scraping import research, general
class ResearcherBot(BaseBot):
def __init__(self, browser, config, planets):
self.research_client = research.Research(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(ResearcherBot, self).__init__(browser, config, planets)
def get_planet_for_research(self, planets=None):
if planets is None:
planets = self.planets
#for now the main planet will be used for research
return planets[0]
def get_next_research_item(self, planet):
available_research = self.research_client.get_available_research_for_planet(planet)
available_research_item = None
if available_research is not None:
available_research_item = available_research[0]
self.logger.info("Available Research:")
for item in available_research:
self.logger.info(" " + item.name)
# Favor ship upgrades
for item in available_research:
if item.id in [109, 110, 111]:
available_research_item = item
break
return available_research_item
def auto_research_next_item(self):
planet = self.get_planet_for_research(self.planets)
research = self.get_next_research_item(planet)
if research is not None:
self.research_client.research_item(research, planet)
| from base import BaseBot
from scraping import research, general
class ResearcherBot(BaseBot):
def __init__(self, browser, config, planets):
self.research_client = research.Research(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(ResearcherBot, self).__init__(browser, config, planets)
def get_planet_for_research(self, planets=None):
if planets is None:
planets = self.planets
#for now the main planet will be used for research
return planets[0]
def get_next_research_item(self, planet):
available_research = self.research_client.get_available_research_for_planet(planet)
available_research_item = None
if available_research is not None:
available_research_item = available_research[0]
self.logger.info("Available Research:")
for item in available_research:
self.logger.info(" " + item.name)
# Favor ship upgrades
for item in available_research:
if item.id in [109, 110, 111]:
available_research_item = item
break
return available_research_item
def auto_research_next_item(self):
planet = self.get_planet_for_research(self.planets)
research = self.get_next_research_item(planet)
if research is not None:
self.research_client.research_item(research, planet)
else:
self.logger.info("Nothing to research on planet %s" % planet)
| Add logging if no research available | Add logging if no research available
| Python | mit | yosh778/OG-Bot,yosh778/OG-Bot,yosh778/OG-Bot,winiciuscota/OG-Bot | from base import BaseBot
from scraping import research, general
class ResearcherBot(BaseBot):
def __init__(self, browser, config, planets):
self.research_client = research.Research(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(ResearcherBot, self).__init__(browser, config, planets)
def get_planet_for_research(self, planets=None):
if planets is None:
planets = self.planets
#for now the main planet will be used for research
return planets[0]
def get_next_research_item(self, planet):
available_research = self.research_client.get_available_research_for_planet(planet)
available_research_item = None
if available_research is not None:
available_research_item = available_research[0]
self.logger.info("Available Research:")
for item in available_research:
self.logger.info(" " + item.name)
# Favor ship upgrades
for item in available_research:
if item.id in [109, 110, 111]:
available_research_item = item
break
return available_research_item
def auto_research_next_item(self):
planet = self.get_planet_for_research(self.planets)
research = self.get_next_research_item(planet)
if research is not None:
self.research_client.research_item(research, planet)
+ else:
+ self.logger.info("Nothing to research on planet %s" % planet)
| Add logging if no research available | ## Code Before:
from base import BaseBot
from scraping import research, general
class ResearcherBot(BaseBot):
def __init__(self, browser, config, planets):
self.research_client = research.Research(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(ResearcherBot, self).__init__(browser, config, planets)
def get_planet_for_research(self, planets=None):
if planets is None:
planets = self.planets
#for now the main planet will be used for research
return planets[0]
def get_next_research_item(self, planet):
available_research = self.research_client.get_available_research_for_planet(planet)
available_research_item = None
if available_research is not None:
available_research_item = available_research[0]
self.logger.info("Available Research:")
for item in available_research:
self.logger.info(" " + item.name)
# Favor ship upgrades
for item in available_research:
if item.id in [109, 110, 111]:
available_research_item = item
break
return available_research_item
def auto_research_next_item(self):
planet = self.get_planet_for_research(self.planets)
research = self.get_next_research_item(planet)
if research is not None:
self.research_client.research_item(research, planet)
## Instruction:
Add logging if no research available
## Code After:
from base import BaseBot
from scraping import research, general
class ResearcherBot(BaseBot):
def __init__(self, browser, config, planets):
self.research_client = research.Research(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(ResearcherBot, self).__init__(browser, config, planets)
def get_planet_for_research(self, planets=None):
if planets is None:
planets = self.planets
#for now the main planet will be used for research
return planets[0]
def get_next_research_item(self, planet):
available_research = self.research_client.get_available_research_for_planet(planet)
available_research_item = None
if available_research is not None:
available_research_item = available_research[0]
self.logger.info("Available Research:")
for item in available_research:
self.logger.info(" " + item.name)
# Favor ship upgrades
for item in available_research:
if item.id in [109, 110, 111]:
available_research_item = item
break
return available_research_item
def auto_research_next_item(self):
planet = self.get_planet_for_research(self.planets)
research = self.get_next_research_item(planet)
if research is not None:
self.research_client.research_item(research, planet)
else:
self.logger.info("Nothing to research on planet %s" % planet)
| ...
self.research_client.research_item(research, planet)
else:
self.logger.info("Nothing to research on planet %s" % planet)
... |
29e1c2e30d284e1992bae59fe522c31b4e627f0d | dataset/dataset/pipelines.py | dataset/dataset/pipelines.py |
class DatasetPipeline(object):
def process_item(self, item, spider):
return item
| import re
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DatasetPipeline(object):
title_regex = re.compile('(((((\\(?[A-Za-z]{1}[-A-Za-z]+,?\\)?)|[-0-9]+)|-)|\\(?[A-Za-z0-9]+\\)?) *)+')
frequency_regex = re.compile('([A-Z]{1}[a-z]+ *)+')
def process_item(self, item, spider):
if item['name']:
item['name'] = self.title_regex.search(item['name'][0].encode('ascii', 'ignore')).group()
else:
item['name'] = 'Dataset Title Regex Matching Unsuccessful'
if item['frequency']:
item['frequency'] = self.frequency_regex.search(item['frequency'][0].encode('ascii','ignore')).group()
else:
item['frequency'] = 'Dataset Frequency Attribute Regex Matching Unsuccessful'
return item
| Convert item processing to pipeline module | Convert item processing to pipeline module
| Python | mit | MaxLikelihood/CODE | + import re
+ # Define your item pipelines here
+ #
+ # Don't forget to add your pipeline to the ITEM_PIPELINES setting
+ # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DatasetPipeline(object):
+
+ title_regex = re.compile('(((((\\(?[A-Za-z]{1}[-A-Za-z]+,?\\)?)|[-0-9]+)|-)|\\(?[A-Za-z0-9]+\\)?) *)+')
+ frequency_regex = re.compile('([A-Z]{1}[a-z]+ *)+')
+
def process_item(self, item, spider):
+
+ if item['name']:
+ item['name'] = self.title_regex.search(item['name'][0].encode('ascii', 'ignore')).group()
+ else:
+ item['name'] = 'Dataset Title Regex Matching Unsuccessful'
+
+ if item['frequency']:
+ item['frequency'] = self.frequency_regex.search(item['frequency'][0].encode('ascii','ignore')).group()
+ else:
+ item['frequency'] = 'Dataset Frequency Attribute Regex Matching Unsuccessful'
+
return item
| Convert item processing to pipeline module | ## Code Before:
class DatasetPipeline(object):
def process_item(self, item, spider):
return item
## Instruction:
Convert item processing to pipeline module
## Code After:
import re
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DatasetPipeline(object):
title_regex = re.compile('(((((\\(?[A-Za-z]{1}[-A-Za-z]+,?\\)?)|[-0-9]+)|-)|\\(?[A-Za-z0-9]+\\)?) *)+')
frequency_regex = re.compile('([A-Z]{1}[a-z]+ *)+')
def process_item(self, item, spider):
if item['name']:
item['name'] = self.title_regex.search(item['name'][0].encode('ascii', 'ignore')).group()
else:
item['name'] = 'Dataset Title Regex Matching Unsuccessful'
if item['frequency']:
item['frequency'] = self.frequency_regex.search(item['frequency'][0].encode('ascii','ignore')).group()
else:
item['frequency'] = 'Dataset Frequency Attribute Regex Matching Unsuccessful'
return item
| // ... existing code ...
import re
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
// ... modified code ...
class DatasetPipeline(object):
title_regex = re.compile('(((((\\(?[A-Za-z]{1}[-A-Za-z]+,?\\)?)|[-0-9]+)|-)|\\(?[A-Za-z0-9]+\\)?) *)+')
frequency_regex = re.compile('([A-Z]{1}[a-z]+ *)+')
def process_item(self, item, spider):
if item['name']:
item['name'] = self.title_regex.search(item['name'][0].encode('ascii', 'ignore')).group()
else:
item['name'] = 'Dataset Title Regex Matching Unsuccessful'
if item['frequency']:
item['frequency'] = self.frequency_regex.search(item['frequency'][0].encode('ascii','ignore')).group()
else:
item['frequency'] = 'Dataset Frequency Attribute Regex Matching Unsuccessful'
return item
// ... rest of the code ... |
84e964eba11e344f6f0ec612b5743e693a8825bd | thoonk/config.py | thoonk/config.py | import json
import threading
import uuid
from thoonk.consts import *
class ConfigCache(object):
def __init__(self, pubsub):
self._feeds = {}
self.pubsub = pubsub
self.lock = threading.Lock()
self.instance = uuid.uuid4().hex
def __getitem__(self, feed):
with self.lock:
if feed in self._feeds:
return self._feeds[feed]
else:
if not self.pubsub.feed_exists(feed):
raise FeedDoesNotExist
config = json.loads(self.pubsub.redis.get(FEEDCONFIG % feed))
self._feeds[feed] = self.pubsub.feedtypes[config.get(u'type', u'feed')](self.pubsub, feed, config)
return self._feeds[feed]
def invalidate(self, feed, instance, delete=False):
if instance != self.instance:
with self.lock:
if feed in self._feeds:
if delete:
del self._feeds[feed]
else:
del self._feeds[feed].config
| import json
import threading
import uuid
class ConfigCache(object):
"""
The ConfigCache class stores an in-memory version of each
feed's configuration. As there may be multiple systems using
Thoonk with the same Redis server, and each with its own
ConfigCache instance, each ConfigCache has a self.instance
field to uniquely identify itself.
Attributes:
thoonk -- The main Thoonk object.
instance -- A hex string for uniquely identifying this
ConfigCache instance.
Methods:
invalidate -- Force a feed's config to be retrieved from
Redis instead of in-memory.
"""
def __init__(self, thoonk):
"""
Create a new configuration cache.
Arguments:
thoonk -- The main Thoonk object.
"""
self._feeds = {}
self.thoonk = thoonk
self.lock = threading.Lock()
self.instance = uuid.uuid4().hex
def __getitem__(self, feed):
"""
Return a feed object for a given feed name.
Arguments:
feed -- The name of the requested feed.
"""
with self.lock:
if feed in self._feeds:
return self._feeds[feed]
else:
if not self.thoonk.feed_exists(feed):
raise FeedDoesNotExist
config = self.thoonk.redis.get('feed.config:%s' % feed)
config = json.loads(config)
feed_type = config.get(u'type', u'feed')
feed_class = self.thoonk.feedtypes[feed_type]
self._feeds[feed] = feed_class(self.thoonk, feed, config)
return self._feeds[feed]
def invalidate(self, feed, instance, delete=False):
"""
Delete a configuration so that it will be retrieved from Redis
instead of from the cache.
Arguments:
feed -- The name of the feed to invalidate.
instance -- A UUID identifying the cache which made the
invalidation request.
delete -- Indicates if the entire feed object should be
invalidated, or just its configuration.
"""
if instance != self.instance:
with self.lock:
if feed in self._feeds:
if delete:
del self._feeds[feed]
else:
del self._feeds[feed].config
| Add docs to the ConfigCache. | Add docs to the ConfigCache.
| Python | mit | andyet/thoonk.py,fritzy/thoonk.py | import json
import threading
import uuid
- from thoonk.consts import *
-
class ConfigCache(object):
+
+ """
+ The ConfigCache class stores an in-memory version of each
+ feed's configuration. As there may be multiple systems using
+ Thoonk with the same Redis server, and each with its own
+ ConfigCache instance, each ConfigCache has a self.instance
+ field to uniquely identify itself.
+
+ Attributes:
+ thoonk -- The main Thoonk object.
+ instance -- A hex string for uniquely identifying this
+ ConfigCache instance.
+
+ Methods:
+ invalidate -- Force a feed's config to be retrieved from
+ Redis instead of in-memory.
+ """
+
- def __init__(self, pubsub):
+ def __init__(self, thoonk):
+ """
+ Create a new configuration cache.
+
+ Arguments:
+ thoonk -- The main Thoonk object.
+ """
self._feeds = {}
- self.pubsub = pubsub
+ self.thoonk = thoonk
self.lock = threading.Lock()
self.instance = uuid.uuid4().hex
def __getitem__(self, feed):
+ """
+ Return a feed object for a given feed name.
+
+ Arguments:
+ feed -- The name of the requested feed.
+ """
with self.lock:
if feed in self._feeds:
return self._feeds[feed]
else:
- if not self.pubsub.feed_exists(feed):
+ if not self.thoonk.feed_exists(feed):
raise FeedDoesNotExist
- config = json.loads(self.pubsub.redis.get(FEEDCONFIG % feed))
- self._feeds[feed] = self.pubsub.feedtypes[config.get(u'type', u'feed')](self.pubsub, feed, config)
+ config = self.thoonk.redis.get('feed.config:%s' % feed)
+ config = json.loads(config)
+ feed_type = config.get(u'type', u'feed')
+ feed_class = self.thoonk.feedtypes[feed_type]
+ self._feeds[feed] = feed_class(self.thoonk, feed, config)
return self._feeds[feed]
def invalidate(self, feed, instance, delete=False):
+ """
+ Delete a configuration so that it will be retrieved from Redis
+ instead of from the cache.
+
+ Arguments:
+ feed -- The name of the feed to invalidate.
+ instance -- A UUID identifying the cache which made the
+ invalidation request.
+ delete -- Indicates if the entire feed object should be
+ invalidated, or just its configuration.
+ """
if instance != self.instance:
with self.lock:
if feed in self._feeds:
if delete:
del self._feeds[feed]
else:
del self._feeds[feed].config
| Add docs to the ConfigCache. | ## Code Before:
import json
import threading
import uuid
from thoonk.consts import *
class ConfigCache(object):
def __init__(self, pubsub):
self._feeds = {}
self.pubsub = pubsub
self.lock = threading.Lock()
self.instance = uuid.uuid4().hex
def __getitem__(self, feed):
with self.lock:
if feed in self._feeds:
return self._feeds[feed]
else:
if not self.pubsub.feed_exists(feed):
raise FeedDoesNotExist
config = json.loads(self.pubsub.redis.get(FEEDCONFIG % feed))
self._feeds[feed] = self.pubsub.feedtypes[config.get(u'type', u'feed')](self.pubsub, feed, config)
return self._feeds[feed]
def invalidate(self, feed, instance, delete=False):
if instance != self.instance:
with self.lock:
if feed in self._feeds:
if delete:
del self._feeds[feed]
else:
del self._feeds[feed].config
## Instruction:
Add docs to the ConfigCache.
## Code After:
import json
import threading
import uuid
class ConfigCache(object):
"""
The ConfigCache class stores an in-memory version of each
feed's configuration. As there may be multiple systems using
Thoonk with the same Redis server, and each with its own
ConfigCache instance, each ConfigCache has a self.instance
field to uniquely identify itself.
Attributes:
thoonk -- The main Thoonk object.
instance -- A hex string for uniquely identifying this
ConfigCache instance.
Methods:
invalidate -- Force a feed's config to be retrieved from
Redis instead of in-memory.
"""
def __init__(self, thoonk):
"""
Create a new configuration cache.
Arguments:
thoonk -- The main Thoonk object.
"""
self._feeds = {}
self.thoonk = thoonk
self.lock = threading.Lock()
self.instance = uuid.uuid4().hex
def __getitem__(self, feed):
"""
Return a feed object for a given feed name.
Arguments:
feed -- The name of the requested feed.
"""
with self.lock:
if feed in self._feeds:
return self._feeds[feed]
else:
if not self.thoonk.feed_exists(feed):
raise FeedDoesNotExist
config = self.thoonk.redis.get('feed.config:%s' % feed)
config = json.loads(config)
feed_type = config.get(u'type', u'feed')
feed_class = self.thoonk.feedtypes[feed_type]
self._feeds[feed] = feed_class(self.thoonk, feed, config)
return self._feeds[feed]
def invalidate(self, feed, instance, delete=False):
"""
Delete a configuration so that it will be retrieved from Redis
instead of from the cache.
Arguments:
feed -- The name of the feed to invalidate.
instance -- A UUID identifying the cache which made the
invalidation request.
delete -- Indicates if the entire feed object should be
invalidated, or just its configuration.
"""
if instance != self.instance:
with self.lock:
if feed in self._feeds:
if delete:
del self._feeds[feed]
else:
del self._feeds[feed].config
| # ... existing code ...
# ... modified code ...
class ConfigCache(object):
"""
The ConfigCache class stores an in-memory version of each
feed's configuration. As there may be multiple systems using
Thoonk with the same Redis server, and each with its own
ConfigCache instance, each ConfigCache has a self.instance
field to uniquely identify itself.
Attributes:
thoonk -- The main Thoonk object.
instance -- A hex string for uniquely identifying this
ConfigCache instance.
Methods:
invalidate -- Force a feed's config to be retrieved from
Redis instead of in-memory.
"""
def __init__(self, thoonk):
"""
Create a new configuration cache.
Arguments:
thoonk -- The main Thoonk object.
"""
self._feeds = {}
self.thoonk = thoonk
self.lock = threading.Lock()
...
def __getitem__(self, feed):
"""
Return a feed object for a given feed name.
Arguments:
feed -- The name of the requested feed.
"""
with self.lock:
...
else:
if not self.thoonk.feed_exists(feed):
raise FeedDoesNotExist
config = self.thoonk.redis.get('feed.config:%s' % feed)
config = json.loads(config)
feed_type = config.get(u'type', u'feed')
feed_class = self.thoonk.feedtypes[feed_type]
self._feeds[feed] = feed_class(self.thoonk, feed, config)
return self._feeds[feed]
...
def invalidate(self, feed, instance, delete=False):
"""
Delete a configuration so that it will be retrieved from Redis
instead of from the cache.
Arguments:
feed -- The name of the feed to invalidate.
instance -- A UUID identifying the cache which made the
invalidation request.
delete -- Indicates if the entire feed object should be
invalidated, or just its configuration.
"""
if instance != self.instance:
# ... rest of the code ... |
c44be6418bbf92121e56bf68d6c8e2ebef483e17 | script/generate_amalgamation.py | script/generate_amalgamation.py |
import sys
import os.path
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
def add_file(filename):
basename = os.path.basename(filename)
# Only include each file at most once.
if basename in seen_files:
return
seen_files.add(basename)
path = os.path.dirname(filename)
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(os.path.join(path, m.group(1)))
else:
sys.stdout.write(line)
for f in sys.argv[1:]:
add_file(f)
|
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
out = sys.stdout
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
for f in sys.argv[1:]:
add_file(f)
| Add comments for file start/end | Add comments for file start/end
| Python | mit | Nave-Neel/wren,Nave-Neel/wren,minirop/wren,bigdimboom/wren,foresterre/wren,Nelarius/wren,minirop/wren,Nave-Neel/wren,foresterre/wren,Rohansi/wren,Rohansi/wren,minirop/wren,foresterre/wren,Nave-Neel/wren,minirop/wren,foresterre/wren,foresterre/wren,munificent/wren,bigdimboom/wren,Rohansi/wren,Nelarius/wren,Nelarius/wren,Nelarius/wren,Nelarius/wren,munificent/wren,munificent/wren,minirop/wren,munificent/wren,munificent/wren,bigdimboom/wren,bigdimboom/wren,munificent/wren,Rohansi/wren |
import sys
- import os.path
+ from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
+ out = sys.stdout
def add_file(filename):
- basename = os.path.basename(filename)
+ bname = basename(filename)
# Only include each file at most once.
- if basename in seen_files:
+ if bname in seen_files:
return
- seen_files.add(basename)
+ seen_files.add(bname)
- path = os.path.dirname(filename)
+ path = dirname(filename)
+ out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
- add_file(os.path.join(path, m.group(1)))
+ add_file(join(path, m.group(1)))
else:
- sys.stdout.write(line)
+ out.write(line)
+ out.write('// End file "{0}"\n'.format(filename))
for f in sys.argv[1:]:
add_file(f)
| Add comments for file start/end | ## Code Before:
import sys
import os.path
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
def add_file(filename):
basename = os.path.basename(filename)
# Only include each file at most once.
if basename in seen_files:
return
seen_files.add(basename)
path = os.path.dirname(filename)
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(os.path.join(path, m.group(1)))
else:
sys.stdout.write(line)
for f in sys.argv[1:]:
add_file(f)
## Instruction:
Add comments for file start/end
## Code After:
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
out = sys.stdout
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
for f in sys.argv[1:]:
add_file(f)
| ...
import sys
from os.path import basename, dirname, join
import re
...
seen_files = set()
out = sys.stdout
...
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
...
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
... |
879b15779c921445ca4412d5e63319408d8e32bf | python/islp/02statlearn-ex.py | python/islp/02statlearn-ex.py | import pandas as pd
print('\nKNN\n---')
d = {'X1': [ 0, 2, 0, 0, -1, 1 ],
'X2': [ 3, 0, 1, 1, 0, 1 ],
'X3': [ 0, 0, 3, 2, 1, 1 ],
'Y': ['R', 'R', 'R', 'G', 'G', 'R']}
df = pd.DataFrame(data = d)
df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5))
df = df.sort_values(by='dist')
print(df)
print('K=1 =>', df.head(1).Y.to_numpy()[0])
print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary
ascending=False).index.values[0])
print('\nCollege.csv\n-----------')
df = pd.read_csv('College.csv')
print(df)
| import matplotlib.pyplot as plt
import pandas as pd
print('\nKNN\n---')
d = {'X1': [ 0, 2, 0, 0, -1, 1 ],
'X2': [ 3, 0, 1, 1, 0, 1 ],
'X3': [ 0, 0, 3, 2, 1, 1 ],
'Y': ['R', 'R', 'R', 'G', 'G', 'R']}
df = pd.DataFrame(data = d)
df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5))
df = df.sort_values(by='dist')
print(df)
print('K=1 =>', df.head(1).Y.to_numpy()[0])
print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary
ascending=False).index.values[0])
print('\nCollege.csv\n-----------')
df = pd.read_csv('College.csv')
df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
df.set_index('Name', inplace=True)
print(df.describe())
fig = plt.figure()
gs = fig.add_gridspec(10, 10)
for r in range(10):
for c in range(10):
axes = fig.add_subplot(gs[r, c])
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
if r == c:
axes.annotate(df.columns.values[r], (0.5, 0.5),
xycoords='axes fraction', ha='center', va='center')
else:
df.plot.scatter(x=r, y=c, ax=axes)
plt.show()
| Add scatterplot matrix for college.csv. | Add scatterplot matrix for college.csv.
| Python | apache-2.0 | pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff | + import matplotlib.pyplot as plt
import pandas as pd
print('\nKNN\n---')
d = {'X1': [ 0, 2, 0, 0, -1, 1 ],
'X2': [ 3, 0, 1, 1, 0, 1 ],
'X3': [ 0, 0, 3, 2, 1, 1 ],
'Y': ['R', 'R', 'R', 'G', 'G', 'R']}
df = pd.DataFrame(data = d)
df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5))
df = df.sort_values(by='dist')
print(df)
print('K=1 =>', df.head(1).Y.to_numpy()[0])
print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary
ascending=False).index.values[0])
print('\nCollege.csv\n-----------')
df = pd.read_csv('College.csv')
- print(df)
+ df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
+ df.set_index('Name', inplace=True)
+ print(df.describe())
+ fig = plt.figure()
+ gs = fig.add_gridspec(10, 10)
+ for r in range(10):
+ for c in range(10):
+ axes = fig.add_subplot(gs[r, c])
+ axes.xaxis.set_visible(False)
+ axes.yaxis.set_visible(False)
+ if r == c:
+ axes.annotate(df.columns.values[r], (0.5, 0.5),
+ xycoords='axes fraction', ha='center', va='center')
+ else:
+ df.plot.scatter(x=r, y=c, ax=axes)
+ plt.show()
| Add scatterplot matrix for college.csv. | ## Code Before:
import pandas as pd
print('\nKNN\n---')
d = {'X1': [ 0, 2, 0, 0, -1, 1 ],
'X2': [ 3, 0, 1, 1, 0, 1 ],
'X3': [ 0, 0, 3, 2, 1, 1 ],
'Y': ['R', 'R', 'R', 'G', 'G', 'R']}
df = pd.DataFrame(data = d)
df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5))
df = df.sort_values(by='dist')
print(df)
print('K=1 =>', df.head(1).Y.to_numpy()[0])
print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary
ascending=False).index.values[0])
print('\nCollege.csv\n-----------')
df = pd.read_csv('College.csv')
print(df)
## Instruction:
Add scatterplot matrix for college.csv.
## Code After:
import matplotlib.pyplot as plt
import pandas as pd
print('\nKNN\n---')
d = {'X1': [ 0, 2, 0, 0, -1, 1 ],
'X2': [ 3, 0, 1, 1, 0, 1 ],
'X3': [ 0, 0, 3, 2, 1, 1 ],
'Y': ['R', 'R', 'R', 'G', 'G', 'R']}
df = pd.DataFrame(data = d)
df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5))
df = df.sort_values(by='dist')
print(df)
print('K=1 =>', df.head(1).Y.to_numpy()[0])
print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary
ascending=False).index.values[0])
print('\nCollege.csv\n-----------')
df = pd.read_csv('College.csv')
df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
df.set_index('Name', inplace=True)
print(df.describe())
fig = plt.figure()
gs = fig.add_gridspec(10, 10)
for r in range(10):
for c in range(10):
axes = fig.add_subplot(gs[r, c])
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
if r == c:
axes.annotate(df.columns.values[r], (0.5, 0.5),
xycoords='axes fraction', ha='center', va='center')
else:
df.plot.scatter(x=r, y=c, ax=axes)
plt.show()
| // ... existing code ...
import matplotlib.pyplot as plt
import pandas as pd
// ... modified code ...
df = pd.read_csv('College.csv')
df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
df.set_index('Name', inplace=True)
print(df.describe())
fig = plt.figure()
gs = fig.add_gridspec(10, 10)
for r in range(10):
for c in range(10):
axes = fig.add_subplot(gs[r, c])
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
if r == c:
axes.annotate(df.columns.values[r], (0.5, 0.5),
xycoords='axes fraction', ha='center', va='center')
else:
df.plot.scatter(x=r, y=c, ax=axes)
plt.show()
// ... rest of the code ... |
035ff2c50c5611406af172c6215f712086b75335 | tfr/sklearn.py | tfr/sklearn.py | from sklearn.base import BaseEstimator, TransformerMixin
from .signal import SignalFrames
from .reassignment import pitchgram
class PitchgramTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
bin_range=[-48, 67], bin_division=1):
self.sample_rate = sample_rate
self.frame_size = frame_size
self.hop_size = hop_size
# TODO: make this configurable
self.output_frame_size = hop_size
self.bin_range = bin_range
self.bin_division = bin_division
def transform(self, X, **transform_params):
"""
Transforms audio clip X into a normalized pitchgram.
Input: X - mono audio clip - numpy array of shape (samples,)
Output: X_pitchgram - numpy array of shape (frames, bins)
"""
signal_frames = SignalFrames(X, self.frame_size, self.hop_size,
self.sample_rate, mono_mix=True)
X_pitchgram = pitchgram(
signal_frames,
self.output_frame_size,
magnitudes='power_db_normalized',
bin_range=self.bin_range,
bin_division=self.bin_division)
return X_pitchgram
def fit(self, X, y=None, **fit_params):
return self
| from sklearn.base import BaseEstimator, TransformerMixin
from .signal import SignalFrames
from .reassignment import pitchgram
class PitchgramTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
output_frame_size=None,
bin_range=[-48, 67], bin_division=1):
self.sample_rate = sample_rate
self.frame_size = frame_size
self.hop_size = hop_size
# if no output frame size is specified the input hop size is the default
self.output_frame_size = output_frame_size if output_frame_size is not None else hop_size
self.bin_range = bin_range
self.bin_division = bin_division
def transform(self, X, **transform_params):
"""
Transforms audio clip X into a normalized pitchgram.
Input: X - mono audio clip - numpy array of shape (samples,)
Output: X_pitchgram - numpy array of shape (frames, bins)
"""
signal_frames = SignalFrames(X, self.frame_size, self.hop_size,
self.sample_rate, mono_mix=True)
X_pitchgram = pitchgram(
signal_frames,
self.output_frame_size,
magnitudes='power_db_normalized',
bin_range=self.bin_range,
bin_division=self.bin_division)
return X_pitchgram
def fit(self, X, y=None, **fit_params):
return self
| Add the output_frame_size parameter to PitchgramTransformer. | Add the output_frame_size parameter to PitchgramTransformer.
Without it the deserialization via jsonpickle fails.
| Python | mit | bzamecnik/tfr,bzamecnik/tfr | from sklearn.base import BaseEstimator, TransformerMixin
from .signal import SignalFrames
from .reassignment import pitchgram
class PitchgramTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
+ output_frame_size=None,
bin_range=[-48, 67], bin_division=1):
self.sample_rate = sample_rate
self.frame_size = frame_size
self.hop_size = hop_size
- # TODO: make this configurable
- self.output_frame_size = hop_size
+ # if no output frame size is specified the input hop size is the default
+ self.output_frame_size = output_frame_size if output_frame_size is not None else hop_size
self.bin_range = bin_range
self.bin_division = bin_division
def transform(self, X, **transform_params):
"""
Transforms audio clip X into a normalized pitchgram.
Input: X - mono audio clip - numpy array of shape (samples,)
Output: X_pitchgram - numpy array of shape (frames, bins)
"""
signal_frames = SignalFrames(X, self.frame_size, self.hop_size,
self.sample_rate, mono_mix=True)
X_pitchgram = pitchgram(
signal_frames,
self.output_frame_size,
magnitudes='power_db_normalized',
bin_range=self.bin_range,
bin_division=self.bin_division)
return X_pitchgram
def fit(self, X, y=None, **fit_params):
return self
| Add the output_frame_size parameter to PitchgramTransformer. | ## Code Before:
from sklearn.base import BaseEstimator, TransformerMixin
from .signal import SignalFrames
from .reassignment import pitchgram
class PitchgramTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
bin_range=[-48, 67], bin_division=1):
self.sample_rate = sample_rate
self.frame_size = frame_size
self.hop_size = hop_size
# TODO: make this configurable
self.output_frame_size = hop_size
self.bin_range = bin_range
self.bin_division = bin_division
def transform(self, X, **transform_params):
"""
Transforms audio clip X into a normalized pitchgram.
Input: X - mono audio clip - numpy array of shape (samples,)
Output: X_pitchgram - numpy array of shape (frames, bins)
"""
signal_frames = SignalFrames(X, self.frame_size, self.hop_size,
self.sample_rate, mono_mix=True)
X_pitchgram = pitchgram(
signal_frames,
self.output_frame_size,
magnitudes='power_db_normalized',
bin_range=self.bin_range,
bin_division=self.bin_division)
return X_pitchgram
def fit(self, X, y=None, **fit_params):
return self
## Instruction:
Add the output_frame_size parameter to PitchgramTransformer.
## Code After:
from sklearn.base import BaseEstimator, TransformerMixin
from .signal import SignalFrames
from .reassignment import pitchgram
class PitchgramTransformer(BaseEstimator, TransformerMixin):
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
output_frame_size=None,
bin_range=[-48, 67], bin_division=1):
self.sample_rate = sample_rate
self.frame_size = frame_size
self.hop_size = hop_size
# if no output frame size is specified the input hop size is the default
self.output_frame_size = output_frame_size if output_frame_size is not None else hop_size
self.bin_range = bin_range
self.bin_division = bin_division
def transform(self, X, **transform_params):
"""
Transforms audio clip X into a normalized pitchgram.
Input: X - mono audio clip - numpy array of shape (samples,)
Output: X_pitchgram - numpy array of shape (frames, bins)
"""
signal_frames = SignalFrames(X, self.frame_size, self.hop_size,
self.sample_rate, mono_mix=True)
X_pitchgram = pitchgram(
signal_frames,
self.output_frame_size,
magnitudes='power_db_normalized',
bin_range=self.bin_range,
bin_division=self.bin_division)
return X_pitchgram
def fit(self, X, y=None, **fit_params):
return self
| // ... existing code ...
def __init__(self, sample_rate=44100, frame_size=4096, hop_size=2048,
output_frame_size=None,
bin_range=[-48, 67], bin_division=1):
// ... modified code ...
self.hop_size = hop_size
# if no output frame size is specified the input hop size is the default
self.output_frame_size = output_frame_size if output_frame_size is not None else hop_size
self.bin_range = bin_range
// ... rest of the code ... |
cd621061773b7eafcea9358c9b762663a070ccc5 | cc/license/jurisdiction.py | cc/license/jurisdiction.py | import RDF
import zope.interface
import interfaces
import rdf_helper
class Jurisdiction(object):
zope.interface.implements(interfaces.IJurisdiction)
def __init__(self, short_name):
'''@param short_name can be e.g. mx'''
model = rdf_helper.init_model(
rdf_helper.JURI_RDF_PATH)
self.code = short_name
self.id = 'http://creativecommons.org/international/%s/' % short_name
id_uri = RDF.Uri(self.id)
self.local_url = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
self.launched = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
| import RDF
import zope.interface
import interfaces
import rdf_helper
class Jurisdiction(object):
zope.interface.implements(interfaces.IJurisdiction)
def __init__(self, short_name):
"""Creates an object representing a jurisdiction.
short_name is a (usually) two-letter code representing
the same jurisdiction; for a complete list, see
cc.license.jurisdiction_codes()"""
model = rdf_helper.init_model(
rdf_helper.JURI_RDF_PATH)
self.code = short_name
self.id = 'http://creativecommons.org/international/%s/' % short_name
id_uri = RDF.Uri(self.id)
try:
self.local_url = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
except rdf_helper.NoValuesFoundException:
self.local_url = None
try:
self.launched = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
except rdf_helper.NoValuesFoundException:
self.launched = None
| Add documentation and make Jurisdiction calls not fail when some of the values aren't found. | Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
| Python | mit | creativecommons/cc.license,creativecommons/cc.license | import RDF
import zope.interface
import interfaces
import rdf_helper
class Jurisdiction(object):
zope.interface.implements(interfaces.IJurisdiction)
def __init__(self, short_name):
- '''@param short_name can be e.g. mx'''
+ """Creates an object representing a jurisdiction.
+ short_name is a (usually) two-letter code representing
+ the same jurisdiction; for a complete list, see
+ cc.license.jurisdiction_codes()"""
model = rdf_helper.init_model(
rdf_helper.JURI_RDF_PATH)
self.code = short_name
self.id = 'http://creativecommons.org/international/%s/' % short_name
id_uri = RDF.Uri(self.id)
+ try:
- self.local_url = rdf_helper.query_to_single_value(model,
+ self.local_url = rdf_helper.query_to_single_value(model,
- id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
+ id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
+ except rdf_helper.NoValuesFoundException:
+ self.local_url = None
+ try:
- self.launched = rdf_helper.query_to_single_value(model,
+ self.launched = rdf_helper.query_to_single_value(model,
- id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
+ id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
+ except rdf_helper.NoValuesFoundException:
+ self.launched = None
| Add documentation and make Jurisdiction calls not fail when some of the values aren't found. | ## Code Before:
import RDF
import zope.interface
import interfaces
import rdf_helper
class Jurisdiction(object):
zope.interface.implements(interfaces.IJurisdiction)
def __init__(self, short_name):
'''@param short_name can be e.g. mx'''
model = rdf_helper.init_model(
rdf_helper.JURI_RDF_PATH)
self.code = short_name
self.id = 'http://creativecommons.org/international/%s/' % short_name
id_uri = RDF.Uri(self.id)
self.local_url = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
self.launched = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
## Instruction:
Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
## Code After:
import RDF
import zope.interface
import interfaces
import rdf_helper
class Jurisdiction(object):
zope.interface.implements(interfaces.IJurisdiction)
def __init__(self, short_name):
"""Creates an object representing a jurisdiction.
short_name is a (usually) two-letter code representing
the same jurisdiction; for a complete list, see
cc.license.jurisdiction_codes()"""
model = rdf_helper.init_model(
rdf_helper.JURI_RDF_PATH)
self.code = short_name
self.id = 'http://creativecommons.org/international/%s/' % short_name
id_uri = RDF.Uri(self.id)
try:
self.local_url = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
except rdf_helper.NoValuesFoundException:
self.local_url = None
try:
self.launched = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
except rdf_helper.NoValuesFoundException:
self.launched = None
| // ... existing code ...
def __init__(self, short_name):
"""Creates an object representing a jurisdiction.
short_name is a (usually) two-letter code representing
the same jurisdiction; for a complete list, see
cc.license.jurisdiction_codes()"""
model = rdf_helper.init_model(
// ... modified code ...
id_uri = RDF.Uri(self.id)
try:
self.local_url = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None)
except rdf_helper.NoValuesFoundException:
self.local_url = None
try:
self.launched = rdf_helper.query_to_single_value(model,
id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
except rdf_helper.NoValuesFoundException:
self.launched = None
// ... rest of the code ... |
0f7ebf148ab3f88fc983e60f689a9c740ae64e47 | outgoing_mail.py | outgoing_mail.py |
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = 'admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
|
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = 'EventBot <admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com>'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
| Add display name for from address | Add display name for from address
| Python | mit | eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot |
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
- from_address = 'admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com'
+ from_address = 'EventBot <admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com>'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
| Add display name for from address | ## Code Before:
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = 'admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
## Instruction:
Add display name for from address
## Code After:
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = 'EventBot <admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com>'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
| # ... existing code ...
from_address = 'EventBot <admin@' + os.environ['APPLICATION_ID'] + '.appspotmail.com>'
# ... rest of the code ... |
5ff2a8655caa66369733d7c151f36737217498f8 | scoring_engine/db.py | scoring_engine/db.py | import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from scoring_engine.config import config
isolation_level = "READ COMMITTED"
if 'sqlite' in config.db_uri:
# sqlite db does not support transaction based statements
# so we have to manually set it to something else
isolation_level = "READ UNCOMMITTED"
engine = create_engine(config.db_uri, isolation_level=isolation_level)
session = scoped_session(sessionmaker(bind=engine))
db_salt = bcrypt.gensalt()
| import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from scoring_engine.config import config
isolation_level = "READ COMMITTED"
if 'sqlite' in config.db_uri:
# sqlite db does not support transaction based statements
# so we have to manually set it to something else
isolation_level = "READ UNCOMMITTED"
engine = create_engine(config.db_uri, isolation_level=isolation_level)
session = scoped_session(sessionmaker(bind=engine))
db_salt = bcrypt.gensalt()
# This is a monkey patch so that we
# don't need to commit before every query
# We got weird results in the web ui when we didn't
# have this
def query_monkeypatch(classname):
session.commit()
return session.orig_query(classname)
session.orig_query = session.query
session.query = query_monkeypatch
| Add monkeypatch for session query problems | Add monkeypatch for session query problems
| Python | mit | pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine | import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from scoring_engine.config import config
isolation_level = "READ COMMITTED"
if 'sqlite' in config.db_uri:
# sqlite db does not support transaction based statements
# so we have to manually set it to something else
isolation_level = "READ UNCOMMITTED"
engine = create_engine(config.db_uri, isolation_level=isolation_level)
session = scoped_session(sessionmaker(bind=engine))
db_salt = bcrypt.gensalt()
+
+ # This is a monkey patch so that we
+ # don't need to commit before every query
+ # We got weird results in the web ui when we didn't
+ # have this
+ def query_monkeypatch(classname):
+ session.commit()
+ return session.orig_query(classname)
+
+
+ session.orig_query = session.query
+ session.query = query_monkeypatch
+ | Add monkeypatch for session query problems | ## Code Before:
import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from scoring_engine.config import config
isolation_level = "READ COMMITTED"
if 'sqlite' in config.db_uri:
# sqlite db does not support transaction based statements
# so we have to manually set it to something else
isolation_level = "READ UNCOMMITTED"
engine = create_engine(config.db_uri, isolation_level=isolation_level)
session = scoped_session(sessionmaker(bind=engine))
db_salt = bcrypt.gensalt()
## Instruction:
Add monkeypatch for session query problems
## Code After:
import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from scoring_engine.config import config
isolation_level = "READ COMMITTED"
if 'sqlite' in config.db_uri:
# sqlite db does not support transaction based statements
# so we have to manually set it to something else
isolation_level = "READ UNCOMMITTED"
engine = create_engine(config.db_uri, isolation_level=isolation_level)
session = scoped_session(sessionmaker(bind=engine))
db_salt = bcrypt.gensalt()
# This is a monkey patch so that we
# don't need to commit before every query
# We got weird results in the web ui when we didn't
# have this
def query_monkeypatch(classname):
session.commit()
return session.orig_query(classname)
session.orig_query = session.query
session.query = query_monkeypatch
| # ... existing code ...
db_salt = bcrypt.gensalt()
# This is a monkey patch so that we
# don't need to commit before every query
# We got weird results in the web ui when we didn't
# have this
def query_monkeypatch(classname):
session.commit()
return session.orig_query(classname)
session.orig_query = session.query
session.query = query_monkeypatch
# ... rest of the code ... |
59fef68bee92c45438a87336c92bce031de21139 | tests/test_utils.py | tests/test_utils.py |
from datetime import timedelta
from jose import utils
class TestUtils:
def test_total_seconds(self):
td = timedelta(seconds=5)
assert utils.timedelta_total_seconds(td) == 5
def test_long_to_base64(self):
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
|
from datetime import timedelta
from jose import utils
class TestUtils:
def test_total_seconds(self):
td = timedelta(seconds=5)
assert utils.timedelta_total_seconds(td) == 5
def test_long_to_base64(self):
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
assert utils.long_to_base64(0xCAFED00D, size=10) == b'AAAAAAAAyv7QDQ'
| Add test for size parameter of long_to_base64. | Add test for size parameter of long_to_base64.
| Python | mit | mpdavis/python-jose |
from datetime import timedelta
from jose import utils
class TestUtils:
def test_total_seconds(self):
td = timedelta(seconds=5)
assert utils.timedelta_total_seconds(td) == 5
def test_long_to_base64(self):
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
+ assert utils.long_to_base64(0xCAFED00D, size=10) == b'AAAAAAAAyv7QDQ'
| Add test for size parameter of long_to_base64. | ## Code Before:
from datetime import timedelta
from jose import utils
class TestUtils:
def test_total_seconds(self):
td = timedelta(seconds=5)
assert utils.timedelta_total_seconds(td) == 5
def test_long_to_base64(self):
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
## Instruction:
Add test for size parameter of long_to_base64.
## Code After:
from datetime import timedelta
from jose import utils
class TestUtils:
def test_total_seconds(self):
td = timedelta(seconds=5)
assert utils.timedelta_total_seconds(td) == 5
def test_long_to_base64(self):
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
assert utils.long_to_base64(0xCAFED00D, size=10) == b'AAAAAAAAyv7QDQ'
| // ... existing code ...
assert utils.long_to_base64(0xDEADBEEF) == b'3q2-7w'
assert utils.long_to_base64(0xCAFED00D, size=10) == b'AAAAAAAAyv7QDQ'
// ... rest of the code ... |
ee32d3746a9fa788a06931063a8242f936b6ed18 | src/data/meta.py | src/data/meta.py | import collections
class Meta(collections.OrderedDict):
def __init__(self, *args, **kwargs):
self._smallest = float('inf')
self._largest = 0
self._ordered = True
super(Meta, self).__init__(*args, **kwargs)
def __setitem__(self, key, value, *args, **kwargs):
if key in self and self[key] == value:
raise AssertionError('Redundant assignment')
if value > self._smallest:
self._ordered = False
else:
self._smallest = value
if value > self._largest:
self._largest = value
super(Meta, self).__setitem__(key, value, *args, **kwargs)
self._changed()
def items(self):
self._reorder()
return super(Meta, self).items()
def first(self):
self._reorder()
for k, v in self.items():
return k, v
def peek(self):
self._reorder()
for first in self:
return first
def magnitude(self):
return self._largest
def _reorder(self):
if self._ordered:
return
order = sorted(super(Meta, self).items(), key=lambda x: x[1], reverse=True)
for k, v in order:
self.move_to_end(k)
self._ordered = True
def _changed(self):
pass
| import collections
import typing
class Meta(collections.OrderedDict, typing.MutableMapping[str, float]):
def __init__(self, *args, **kwargs) -> None:
self._smallest = float('inf')
self._largest = 0
self._ordered = True
super(Meta, self).__init__(*args, **kwargs)
def __setitem__(self, key: str, value: float) -> None:
if key in self and self[key] == value:
raise AssertionError('Redundant assignment')
if value > self._smallest:
self._ordered = False
else:
self._smallest = value
if value > self._largest:
self._largest = value
super(Meta, self).__setitem__(key, value)
self._changed()
def items(self) -> typing.ItemsView[str, float]:
self._reorder()
return super(Meta, self).items()
def first(self) -> typing.Tuple[str, float]:
self._reorder()
for k, v in self.items():
return k, v
def peek(self) -> str:
self._reorder()
for first in self:
return first
def magnitude(self) -> float:
return self._largest
def _reorder(self) -> None:
if self._ordered:
return
order = sorted(super(Meta, self).items(), key=lambda x: x[1], reverse=True)
for k, v in order:
self.move_to_end(k)
self._ordered = True
def _changed(self):
pass
| Add typing information to Meta. | Add typing information to Meta.
| Python | mit | PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge | import collections
+ import typing
- class Meta(collections.OrderedDict):
+ class Meta(collections.OrderedDict, typing.MutableMapping[str, float]):
- def __init__(self, *args, **kwargs):
+ def __init__(self, *args, **kwargs) -> None:
self._smallest = float('inf')
self._largest = 0
self._ordered = True
super(Meta, self).__init__(*args, **kwargs)
- def __setitem__(self, key, value, *args, **kwargs):
+ def __setitem__(self, key: str, value: float) -> None:
if key in self and self[key] == value:
raise AssertionError('Redundant assignment')
if value > self._smallest:
self._ordered = False
else:
self._smallest = value
if value > self._largest:
self._largest = value
- super(Meta, self).__setitem__(key, value, *args, **kwargs)
+ super(Meta, self).__setitem__(key, value)
self._changed()
- def items(self):
+ def items(self) -> typing.ItemsView[str, float]:
self._reorder()
return super(Meta, self).items()
- def first(self):
+ def first(self) -> typing.Tuple[str, float]:
self._reorder()
for k, v in self.items():
return k, v
- def peek(self):
+ def peek(self) -> str:
self._reorder()
for first in self:
return first
- def magnitude(self):
+ def magnitude(self) -> float:
return self._largest
- def _reorder(self):
+ def _reorder(self) -> None:
if self._ordered:
return
order = sorted(super(Meta, self).items(), key=lambda x: x[1], reverse=True)
for k, v in order:
self.move_to_end(k)
self._ordered = True
def _changed(self):
pass
| Add typing information to Meta. | ## Code Before:
import collections
class Meta(collections.OrderedDict):
def __init__(self, *args, **kwargs):
self._smallest = float('inf')
self._largest = 0
self._ordered = True
super(Meta, self).__init__(*args, **kwargs)
def __setitem__(self, key, value, *args, **kwargs):
if key in self and self[key] == value:
raise AssertionError('Redundant assignment')
if value > self._smallest:
self._ordered = False
else:
self._smallest = value
if value > self._largest:
self._largest = value
super(Meta, self).__setitem__(key, value, *args, **kwargs)
self._changed()
def items(self):
self._reorder()
return super(Meta, self).items()
def first(self):
self._reorder()
for k, v in self.items():
return k, v
def peek(self):
self._reorder()
for first in self:
return first
def magnitude(self):
return self._largest
def _reorder(self):
if self._ordered:
return
order = sorted(super(Meta, self).items(), key=lambda x: x[1], reverse=True)
for k, v in order:
self.move_to_end(k)
self._ordered = True
def _changed(self):
pass
## Instruction:
Add typing information to Meta.
## Code After:
import collections
import typing
class Meta(collections.OrderedDict, typing.MutableMapping[str, float]):
def __init__(self, *args, **kwargs) -> None:
self._smallest = float('inf')
self._largest = 0
self._ordered = True
super(Meta, self).__init__(*args, **kwargs)
def __setitem__(self, key: str, value: float) -> None:
if key in self and self[key] == value:
raise AssertionError('Redundant assignment')
if value > self._smallest:
self._ordered = False
else:
self._smallest = value
if value > self._largest:
self._largest = value
super(Meta, self).__setitem__(key, value)
self._changed()
def items(self) -> typing.ItemsView[str, float]:
self._reorder()
return super(Meta, self).items()
def first(self) -> typing.Tuple[str, float]:
self._reorder()
for k, v in self.items():
return k, v
def peek(self) -> str:
self._reorder()
for first in self:
return first
def magnitude(self) -> float:
return self._largest
def _reorder(self) -> None:
if self._ordered:
return
order = sorted(super(Meta, self).items(), key=lambda x: x[1], reverse=True)
for k, v in order:
self.move_to_end(k)
self._ordered = True
def _changed(self):
pass
| # ... existing code ...
import collections
import typing
# ... modified code ...
class Meta(collections.OrderedDict, typing.MutableMapping[str, float]):
def __init__(self, *args, **kwargs) -> None:
self._smallest = float('inf')
...
def __setitem__(self, key: str, value: float) -> None:
if key in self and self[key] == value:
...
self._largest = value
super(Meta, self).__setitem__(key, value)
self._changed()
...
def items(self) -> typing.ItemsView[str, float]:
self._reorder()
...
def first(self) -> typing.Tuple[str, float]:
self._reorder()
...
def peek(self) -> str:
self._reorder()
...
def magnitude(self) -> float:
return self._largest
...
def _reorder(self) -> None:
if self._ordered:
# ... rest of the code ... |
6d6d1af248ce555cca56521bba5e7c356817c74e | account/forms.py | account/forms.py | from django.contrib.auth.models import User
from django import forms
from account.models import UserProfile
attributes = {"class": "required"}
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attributes),
label="Username",
error_message={'invalid': "This value may contain only letters, numbers and @.+- characters."}
)
email = forms.EmailField()
def clean_username(self):
username = self.cleaned_data["username"]
existing = User.objects.filter(username__iexact=username)
if existing.exists():
raise forms.ValidationError("A user with that username already exists.")
else:
return self.cleaned_data["username"]
class SettingsForm(forms.Form):
email = forms.EmailField()
xsede_username = forms.CharField(max_length=50,
required=False,
label="XSEDE Username")
new_ssh_keypair = forms.BooleanField(required=False)
def clean(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError("The two password fields did not match.")
return self.cleaned_data
class UserProfileForm(forms.ModelForm):
private_key = forms.CharField(widget=forms.Textarea)
public_key = forms.CharField(widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ("xsede_username", "public_key", "activation_key", "password_reset_key", "reset_expires")
| from django.contrib.auth.models import User
from django import forms
from account.models import UserProfile
attributes = {"class": "required"}
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attributes),
label="Username",
error_message={'invalid': "This value may contain only letters, numbers and @.+- characters."}
)
email = forms.EmailField()
def clean_username(self):
username = self.cleaned_data["username"]
existing = User.objects.filter(username__iexact=username)
if existing.exists():
raise forms.ValidationError("A user with that username already exists.")
else:
return self.cleaned_data["username"]
class SettingsForm(forms.Form):
email = forms.EmailField()
xsede_username = forms.CharField(max_length=50,
required=False,
label="XSEDE Username")
new_ssh_keypair = forms.BooleanField(required=False)
class UserProfileForm(forms.ModelForm):
private_key = forms.CharField(widget=forms.Textarea)
public_key = forms.CharField(widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ("xsede_username", "public_key", "activation_key", "password_reset_key", "reset_expires")
| Remove unused section of SettingsForm | Remove unused section of SettingsForm
| Python | mit | crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp | from django.contrib.auth.models import User
from django import forms
from account.models import UserProfile
attributes = {"class": "required"}
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attributes),
label="Username",
error_message={'invalid': "This value may contain only letters, numbers and @.+- characters."}
)
email = forms.EmailField()
def clean_username(self):
username = self.cleaned_data["username"]
existing = User.objects.filter(username__iexact=username)
if existing.exists():
raise forms.ValidationError("A user with that username already exists.")
else:
return self.cleaned_data["username"]
class SettingsForm(forms.Form):
email = forms.EmailField()
xsede_username = forms.CharField(max_length=50,
required=False,
label="XSEDE Username")
new_ssh_keypair = forms.BooleanField(required=False)
- def clean(self):
- if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
- if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
- raise forms.ValidationError("The two password fields did not match.")
- return self.cleaned_data
-
class UserProfileForm(forms.ModelForm):
private_key = forms.CharField(widget=forms.Textarea)
public_key = forms.CharField(widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ("xsede_username", "public_key", "activation_key", "password_reset_key", "reset_expires")
| Remove unused section of SettingsForm | ## Code Before:
from django.contrib.auth.models import User
from django import forms
from account.models import UserProfile
attributes = {"class": "required"}
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attributes),
label="Username",
error_message={'invalid': "This value may contain only letters, numbers and @.+- characters."}
)
email = forms.EmailField()
def clean_username(self):
username = self.cleaned_data["username"]
existing = User.objects.filter(username__iexact=username)
if existing.exists():
raise forms.ValidationError("A user with that username already exists.")
else:
return self.cleaned_data["username"]
class SettingsForm(forms.Form):
email = forms.EmailField()
xsede_username = forms.CharField(max_length=50,
required=False,
label="XSEDE Username")
new_ssh_keypair = forms.BooleanField(required=False)
def clean(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError("The two password fields did not match.")
return self.cleaned_data
class UserProfileForm(forms.ModelForm):
private_key = forms.CharField(widget=forms.Textarea)
public_key = forms.CharField(widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ("xsede_username", "public_key", "activation_key", "password_reset_key", "reset_expires")
## Instruction:
Remove unused section of SettingsForm
## Code After:
from django.contrib.auth.models import User
from django import forms
from account.models import UserProfile
attributes = {"class": "required"}
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attributes),
label="Username",
error_message={'invalid': "This value may contain only letters, numbers and @.+- characters."}
)
email = forms.EmailField()
def clean_username(self):
username = self.cleaned_data["username"]
existing = User.objects.filter(username__iexact=username)
if existing.exists():
raise forms.ValidationError("A user with that username already exists.")
else:
return self.cleaned_data["username"]
class SettingsForm(forms.Form):
email = forms.EmailField()
xsede_username = forms.CharField(max_length=50,
required=False,
label="XSEDE Username")
new_ssh_keypair = forms.BooleanField(required=False)
class UserProfileForm(forms.ModelForm):
private_key = forms.CharField(widget=forms.Textarea)
public_key = forms.CharField(widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ("xsede_username", "public_key", "activation_key", "password_reset_key", "reset_expires")
| # ... existing code ...
# ... rest of the code ... |
63bf9c267ff891f1a2bd1f472a5d77f8df1e0209 | tests/iam/test_iam_valid_json.py | tests/iam/test_iam_valid_json.py | """Test IAM Policy templates are valid JSON."""
import jinja2
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
assert isinstance(rendered, list)
| """Test IAM Policy templates are valid JSON."""
import json
import jinja2
import pytest
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
@pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
def test_all_iam_templates(template_name):
"""Verify all IAM templates render as proper JSON."""
*_, service_json = template_name.split('/')
service, *_ = service_json.split('.')
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
try:
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
except json.decoder.JSONDecodeError:
pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
assert isinstance(rendered, list)
| Split IAM template tests with paramtrize | test: Split IAM template tests with paramtrize
See also: #208
| Python | apache-2.0 | gogoair/foremast,gogoair/foremast | """Test IAM Policy templates are valid JSON."""
+ import json
+
import jinja2
+ import pytest
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
+ @pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
+ def test_all_iam_templates(template_name):
+ """Verify all IAM templates render as proper JSON."""
+ *_, service_json = template_name.split('/')
+ service, *_ = service_json.split('.')
- items = ['resource1', 'resource2']
+ items = ['resource1', 'resource2']
- if service == 'rds-db':
+ if service == 'rds-db':
- items = {
+ items = {
- 'resource1': 'user1',
+ 'resource1': 'user1',
- 'resource2': 'user2',
+ 'resource2': 'user2',
- }
+ }
+ try:
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
+ except json.decoder.JSONDecodeError:
+ pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
- assert isinstance(rendered, list)
+ assert isinstance(rendered, list)
| Split IAM template tests with paramtrize | ## Code Before:
"""Test IAM Policy templates are valid JSON."""
import jinja2
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
assert isinstance(rendered, list)
## Instruction:
Split IAM template tests with paramtrize
## Code After:
"""Test IAM Policy templates are valid JSON."""
import json
import jinja2
import pytest
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
@pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
def test_all_iam_templates(template_name):
"""Verify all IAM templates render as proper JSON."""
*_, service_json = template_name.split('/')
service, *_ = service_json.split('.')
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
try:
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
except json.decoder.JSONDecodeError:
pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
assert isinstance(rendered, list)
| // ... existing code ...
"""Test IAM Policy templates are valid JSON."""
import json
import jinja2
import pytest
// ... modified code ...
@pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
def test_all_iam_templates(template_name):
"""Verify all IAM templates render as proper JSON."""
*_, service_json = template_name.split('/')
service, *_ = service_json.split('.')
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
try:
rendered = render_policy_template(
...
service=service)
except json.decoder.JSONDecodeError:
pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
assert isinstance(rendered, list)
// ... rest of the code ... |
e5d88beba41de18ebab33e0770ddd8bb5174491e | pyfr/quadrules/__init__.py | pyfr/quadrules/__init__.py |
import re
from pyfr.quadrules.base import BaseQuadRule, BaseTabulatedQuadRule
from pyfr.quadrules.line import BaseLineQuadRule
from pyfr.quadrules.tri import BaseTriQuadRule
from pyfr.util import subclass_map
def get_quadrule(basecls, rule, npts):
# See if rule looks like the name of a scheme
if re.match(r'[a-zA-Z0-9\-+_]+$', rule):
rule_map = subclass_map(basecls, 'name')
return rule_map[rule](npts)
# Otherwise see if it looks like a tabulation
elif 'PTS' in rule.upper():
# Create a suitable subclass
rulecls = type(basecls.eletype, (BaseTabulatedQuadRule, basecls), {})
# Instantiate and validate
r = rulecls(rule)
if len(r.points) != npts:
raise ValueError('Invalid number of points for quad rule')
return r
# Invalid
else:
raise ValueError('Invalid quadrature rule')
|
import re
from pyfr.quadrules.base import BaseQuadRule, BaseTabulatedQuadRule
from pyfr.quadrules.line import BaseLineQuadRule
from pyfr.util import subclass_map
def get_quadrule(basecls, rule, npts):
# See if rule looks like the name of a scheme
if re.match(r'[a-zA-Z0-9\-+_]+$', rule):
rule_map = subclass_map(basecls, 'name')
return rule_map[rule](npts)
# Otherwise see if it looks like a tabulation
elif 'PTS' in rule.upper():
# Create a suitable subclass
rulecls = type(basecls.eletype, (BaseTabulatedQuadRule, basecls), {})
# Instantiate and validate
r = rulecls(rule)
if len(r.points) != npts:
raise ValueError('Invalid number of points for quad rule')
return r
# Invalid
else:
raise ValueError('Invalid quadrature rule')
| Fix a bug in the quadrules. | Fix a bug in the quadrules.
| Python | bsd-3-clause | tjcorona/PyFR,tjcorona/PyFR,iyer-arvind/PyFR,BrianVermeire/PyFR,tjcorona/PyFR,Aerojspark/PyFR |
import re
from pyfr.quadrules.base import BaseQuadRule, BaseTabulatedQuadRule
from pyfr.quadrules.line import BaseLineQuadRule
- from pyfr.quadrules.tri import BaseTriQuadRule
from pyfr.util import subclass_map
def get_quadrule(basecls, rule, npts):
# See if rule looks like the name of a scheme
if re.match(r'[a-zA-Z0-9\-+_]+$', rule):
rule_map = subclass_map(basecls, 'name')
return rule_map[rule](npts)
# Otherwise see if it looks like a tabulation
elif 'PTS' in rule.upper():
# Create a suitable subclass
rulecls = type(basecls.eletype, (BaseTabulatedQuadRule, basecls), {})
# Instantiate and validate
r = rulecls(rule)
if len(r.points) != npts:
raise ValueError('Invalid number of points for quad rule')
return r
# Invalid
else:
raise ValueError('Invalid quadrature rule')
| Fix a bug in the quadrules. | ## Code Before:
import re
from pyfr.quadrules.base import BaseQuadRule, BaseTabulatedQuadRule
from pyfr.quadrules.line import BaseLineQuadRule
from pyfr.quadrules.tri import BaseTriQuadRule
from pyfr.util import subclass_map
def get_quadrule(basecls, rule, npts):
# See if rule looks like the name of a scheme
if re.match(r'[a-zA-Z0-9\-+_]+$', rule):
rule_map = subclass_map(basecls, 'name')
return rule_map[rule](npts)
# Otherwise see if it looks like a tabulation
elif 'PTS' in rule.upper():
# Create a suitable subclass
rulecls = type(basecls.eletype, (BaseTabulatedQuadRule, basecls), {})
# Instantiate and validate
r = rulecls(rule)
if len(r.points) != npts:
raise ValueError('Invalid number of points for quad rule')
return r
# Invalid
else:
raise ValueError('Invalid quadrature rule')
## Instruction:
Fix a bug in the quadrules.
## Code After:
import re
from pyfr.quadrules.base import BaseQuadRule, BaseTabulatedQuadRule
from pyfr.quadrules.line import BaseLineQuadRule
from pyfr.util import subclass_map
def get_quadrule(basecls, rule, npts):
# See if rule looks like the name of a scheme
if re.match(r'[a-zA-Z0-9\-+_]+$', rule):
rule_map = subclass_map(basecls, 'name')
return rule_map[rule](npts)
# Otherwise see if it looks like a tabulation
elif 'PTS' in rule.upper():
# Create a suitable subclass
rulecls = type(basecls.eletype, (BaseTabulatedQuadRule, basecls), {})
# Instantiate and validate
r = rulecls(rule)
if len(r.points) != npts:
raise ValueError('Invalid number of points for quad rule')
return r
# Invalid
else:
raise ValueError('Invalid quadrature rule')
| // ... existing code ...
from pyfr.quadrules.line import BaseLineQuadRule
from pyfr.util import subclass_map
// ... rest of the code ... |
5a885124432ccb33d180a8e73c753ceab54ffdf5 | src/Itemizers.py | src/Itemizers.py |
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
def iconForName(klass, name):
"""Return the NSImage instance representing a `name` item."""
imgpath = NSBundle.bundleForClass_(klass).pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
return iconForName(self.class__(), 'module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
return iconForName(self.class__(), 'type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
|
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
return iconForName('module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
return iconForName('type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
| Simplify the icon finder function. | Simplify the icon finder function.
We statically know our bundle identifier, so we don’t have too find the bundle by runtime class.
| Python | mit | mkhl/haskell.sugar |
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
+ haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
+
- def iconForName(klass, name):
+ def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
+ bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
- imgpath = NSBundle.bundleForClass_(klass).pathForResource_ofType_(name, 'png')
+ imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
- return iconForName(self.class__(), 'module')
+ return iconForName('module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
- return iconForName(self.class__(), 'type')
+ return iconForName('type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
| Simplify the icon finder function. | ## Code Before:
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
def iconForName(klass, name):
"""Return the NSImage instance representing a `name` item."""
imgpath = NSBundle.bundleForClass_(klass).pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
return iconForName(self.class__(), 'module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
return iconForName(self.class__(), 'type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
## Instruction:
Simplify the icon finder function.
## Code After:
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
return iconForName('module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
return iconForName('type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
| # ... existing code ...
haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
# ... modified code ...
def image(self):
return iconForName('module')
...
def image(self):
return iconForName('type')
# ... rest of the code ... |
c8b86afc53af25c845c8303111a6e7b17d8c26b4 | ciscripts/check/psqcppconan/check.py | ciscripts/check/psqcppconan/check.py | """Run tests and static analysis checks on a polysquare conan c++ project."""
import argparse
import os
def run(cont, util, shell, argv=None):
"""Run checks on this conan project."""
parser = argparse.ArgumentParser(description="""Run conan checks""")
parser.add_argument("--run-test-binaries",
nargs="*",
type=str,
help="""Files relative to the build dir to run""")
result, remainder = parser.parse_known_args(argv or list())
conan_check_script = "check/conan/check.py"
conan_check = cont.fetch_and_import(conan_check_script)
def _during_test(cont, executor, util, build):
"""Run the specified test binaries with the --tap switch.
We then pipe the output into tap-mocha-reporter.
"""
del build
for binary in result.run_test_binaries or list():
executor(cont,
util.running_output,
os.path.join(os.getcwd(), binary))
util.print_message(binary)
kwargs = {
"kind": "polysquare conan c++",
"during_test": _during_test
}
return conan_check.run(cont,
util,
shell,
argv=remainder,
override_kwargs=kwargs)
| """Run tests and static analysis checks on a polysquare conan c++ project."""
import argparse
import os
def run(cont, util, shell, argv=None):
"""Run checks on this conan project."""
parser = argparse.ArgumentParser(description="""Run conan checks""")
parser.add_argument("--run-test-binaries",
nargs="*",
type=str,
help="""Files relative to the build dir to run""")
result, remainder = parser.parse_known_args(argv or list())
conan_check_script = "check/conan/check.py"
conan_check = cont.fetch_and_import(conan_check_script)
def _during_test(cont, executor, util, build):
"""Run the specified test binaries with the --tap switch.
We then pipe the output into tap-mocha-reporter.
"""
del build
for binary in result.run_test_binaries or list():
if not os.path.exists(binary) and os.path.exists(binary + ".exe"):
binary = binary + ".exe"
executor(cont,
util.running_output,
os.path.join(os.getcwd(), binary))
util.print_message(binary)
kwargs = {
"kind": "polysquare conan c++",
"during_test": _during_test
}
return conan_check.run(cont,
util,
shell,
argv=remainder,
override_kwargs=kwargs)
| Allow the use of .exe | psqcppconan: Allow the use of .exe
| Python | mit | polysquare/polysquare-ci-scripts,polysquare/polysquare-ci-scripts | """Run tests and static analysis checks on a polysquare conan c++ project."""
import argparse
import os
def run(cont, util, shell, argv=None):
"""Run checks on this conan project."""
parser = argparse.ArgumentParser(description="""Run conan checks""")
parser.add_argument("--run-test-binaries",
nargs="*",
type=str,
help="""Files relative to the build dir to run""")
result, remainder = parser.parse_known_args(argv or list())
conan_check_script = "check/conan/check.py"
conan_check = cont.fetch_and_import(conan_check_script)
def _during_test(cont, executor, util, build):
"""Run the specified test binaries with the --tap switch.
We then pipe the output into tap-mocha-reporter.
"""
del build
for binary in result.run_test_binaries or list():
+ if not os.path.exists(binary) and os.path.exists(binary + ".exe"):
+ binary = binary + ".exe"
+
executor(cont,
util.running_output,
os.path.join(os.getcwd(), binary))
util.print_message(binary)
kwargs = {
"kind": "polysquare conan c++",
"during_test": _during_test
}
return conan_check.run(cont,
util,
shell,
argv=remainder,
override_kwargs=kwargs)
| Allow the use of .exe | ## Code Before:
"""Run tests and static analysis checks on a polysquare conan c++ project."""
import argparse
import os
def run(cont, util, shell, argv=None):
"""Run checks on this conan project."""
parser = argparse.ArgumentParser(description="""Run conan checks""")
parser.add_argument("--run-test-binaries",
nargs="*",
type=str,
help="""Files relative to the build dir to run""")
result, remainder = parser.parse_known_args(argv or list())
conan_check_script = "check/conan/check.py"
conan_check = cont.fetch_and_import(conan_check_script)
def _during_test(cont, executor, util, build):
"""Run the specified test binaries with the --tap switch.
We then pipe the output into tap-mocha-reporter.
"""
del build
for binary in result.run_test_binaries or list():
executor(cont,
util.running_output,
os.path.join(os.getcwd(), binary))
util.print_message(binary)
kwargs = {
"kind": "polysquare conan c++",
"during_test": _during_test
}
return conan_check.run(cont,
util,
shell,
argv=remainder,
override_kwargs=kwargs)
## Instruction:
Allow the use of .exe
## Code After:
"""Run tests and static analysis checks on a polysquare conan c++ project."""
import argparse
import os
def run(cont, util, shell, argv=None):
"""Run checks on this conan project."""
parser = argparse.ArgumentParser(description="""Run conan checks""")
parser.add_argument("--run-test-binaries",
nargs="*",
type=str,
help="""Files relative to the build dir to run""")
result, remainder = parser.parse_known_args(argv or list())
conan_check_script = "check/conan/check.py"
conan_check = cont.fetch_and_import(conan_check_script)
def _during_test(cont, executor, util, build):
"""Run the specified test binaries with the --tap switch.
We then pipe the output into tap-mocha-reporter.
"""
del build
for binary in result.run_test_binaries or list():
if not os.path.exists(binary) and os.path.exists(binary + ".exe"):
binary = binary + ".exe"
executor(cont,
util.running_output,
os.path.join(os.getcwd(), binary))
util.print_message(binary)
kwargs = {
"kind": "polysquare conan c++",
"during_test": _during_test
}
return conan_check.run(cont,
util,
shell,
argv=remainder,
override_kwargs=kwargs)
| // ... existing code ...
for binary in result.run_test_binaries or list():
if not os.path.exists(binary) and os.path.exists(binary + ".exe"):
binary = binary + ".exe"
executor(cont,
// ... rest of the code ... |
252cfa3baa7973a923952ecb3c83cdfb9f28ab67 | l10n_br_account/models/fiscal_document.py | l10n_br_account/models/fiscal_document.py |
from odoo import api, models
class FiscalDocument(models.Model):
_inherit = 'l10n_br_fiscal.document'
@api.multi
def unlink(self):
invoices = self.env['account.invoice'].search(
[('fiscal_document_id', 'in', self.ids)])
invoices.unlink()
return super().unlink()
|
from odoo import _, api, models
from odoo.exceptions import UserError
from odoo.addons.l10n_br_fiscal.constants.fiscal import (
SITUACAO_EDOC_EM_DIGITACAO,
)
class FiscalDocument(models.Model):
_inherit = 'l10n_br_fiscal.document'
@api.multi
def unlink(self):
draft_documents = self.filtered(
lambda d: d.state == SITUACAO_EDOC_EM_DIGITACAO)
if draft_documents:
UserError(_("You cannot delete a fiscal document "
"which is not draft state."))
invoices = self.env['account.invoice'].search(
[('fiscal_document_id', 'in', self.ids)])
invoices.unlink()
return super().unlink()
| Allow delete only fiscal documents with draft state | [REF] Allow delete only fiscal documents with draft state
| Python | agpl-3.0 | OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil |
- from odoo import api, models
+ from odoo import _, api, models
+ from odoo.exceptions import UserError
+
+ from odoo.addons.l10n_br_fiscal.constants.fiscal import (
+ SITUACAO_EDOC_EM_DIGITACAO,
+ )
class FiscalDocument(models.Model):
_inherit = 'l10n_br_fiscal.document'
@api.multi
def unlink(self):
+ draft_documents = self.filtered(
+ lambda d: d.state == SITUACAO_EDOC_EM_DIGITACAO)
+
+ if draft_documents:
+ UserError(_("You cannot delete a fiscal document "
+ "which is not draft state."))
+
invoices = self.env['account.invoice'].search(
[('fiscal_document_id', 'in', self.ids)])
invoices.unlink()
return super().unlink()
| Allow delete only fiscal documents with draft state | ## Code Before:
from odoo import api, models
class FiscalDocument(models.Model):
_inherit = 'l10n_br_fiscal.document'
@api.multi
def unlink(self):
invoices = self.env['account.invoice'].search(
[('fiscal_document_id', 'in', self.ids)])
invoices.unlink()
return super().unlink()
## Instruction:
Allow delete only fiscal documents with draft state
## Code After:
from odoo import _, api, models
from odoo.exceptions import UserError
from odoo.addons.l10n_br_fiscal.constants.fiscal import (
SITUACAO_EDOC_EM_DIGITACAO,
)
class FiscalDocument(models.Model):
_inherit = 'l10n_br_fiscal.document'
@api.multi
def unlink(self):
draft_documents = self.filtered(
lambda d: d.state == SITUACAO_EDOC_EM_DIGITACAO)
if draft_documents:
UserError(_("You cannot delete a fiscal document "
"which is not draft state."))
invoices = self.env['account.invoice'].search(
[('fiscal_document_id', 'in', self.ids)])
invoices.unlink()
return super().unlink()
| # ... existing code ...
from odoo import _, api, models
from odoo.exceptions import UserError
from odoo.addons.l10n_br_fiscal.constants.fiscal import (
SITUACAO_EDOC_EM_DIGITACAO,
)
# ... modified code ...
def unlink(self):
draft_documents = self.filtered(
lambda d: d.state == SITUACAO_EDOC_EM_DIGITACAO)
if draft_documents:
UserError(_("You cannot delete a fiscal document "
"which is not draft state."))
invoices = self.env['account.invoice'].search(
# ... rest of the code ... |
fb08c6cfe6b6295a9aca9e579a067f34ee1c69c2 | test/get-gh-comment-info.py | test/get-gh-comment-info.py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--kernel_version', type=str, default="")
parser.add_argument('--k8s_version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
print(args.__dict__[args.retrieve])
| import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--kernel_version', type=str, default="")
parser.add_argument('--k8s_version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
# Update kernel_version to expected format
args.kernel_version = args.kernel_version.replace('.', '')
if args.kernel_version == "netnext":
args.kernel_version = "net-next"
print(args.__dict__[args.retrieve])
| Format test-only's kernel_version to avoid mistakes | test: Format test-only's kernel_version to avoid mistakes
I often try to start test-only builds with e.g.:
test-only --kernel_version=4.19 --focus="..."
That fails because our tests expect "419". We can extend the Python
script used to parse argument to recognize that and update
kernel_version to the expected format.
Signed-off-by: Paul Chaignon <a027184a55211cd23e3f3094f1fdc728df5e0500@cilium.io>
| Python | apache-2.0 | cilium/cilium,tklauser/cilium,tgraf/cilium,tklauser/cilium,michi-covalent/cilium,tklauser/cilium,cilium/cilium,tgraf/cilium,cilium/cilium,michi-covalent/cilium,tgraf/cilium,tgraf/cilium,michi-covalent/cilium,michi-covalent/cilium,tgraf/cilium,cilium/cilium,tklauser/cilium,michi-covalent/cilium,tklauser/cilium,cilium/cilium,tgraf/cilium | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--kernel_version', type=str, default="")
parser.add_argument('--k8s_version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
+ # Update kernel_version to expected format
+ args.kernel_version = args.kernel_version.replace('.', '')
+ if args.kernel_version == "netnext":
+ args.kernel_version = "net-next"
+
print(args.__dict__[args.retrieve])
| Format test-only's kernel_version to avoid mistakes | ## Code Before:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--kernel_version', type=str, default="")
parser.add_argument('--k8s_version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
print(args.__dict__[args.retrieve])
## Instruction:
Format test-only's kernel_version to avoid mistakes
## Code After:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--kernel_version', type=str, default="")
parser.add_argument('--k8s_version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
# Update kernel_version to expected format
args.kernel_version = args.kernel_version.replace('.', '')
if args.kernel_version == "netnext":
args.kernel_version = "net-next"
print(args.__dict__[args.retrieve])
| ...
# Update kernel_version to expected format
args.kernel_version = args.kernel_version.replace('.', '')
if args.kernel_version == "netnext":
args.kernel_version = "net-next"
print(args.__dict__[args.retrieve])
... |
0d475a69ca53eee62aeb39f35b3d3a8f875d5e71 | tests/menu_test_5.py | tests/menu_test_5.py | """Tests the menu features."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import enum_menu
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def test_menu_1(test):
"""Check for main() call from console functionality."""
test.assertFalse(op.exists("generated_file.txt"))
subprocess.call("menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
subprocess.call("menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| """Tests the menu features."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import enum_menu
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def test_menu_1(test):
"""Check for main() call from console functionality."""
test.assertFalse(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| Change to get test to pass. | Change to get test to pass.
| Python | mit | jeffrimko/Qprompt | """Tests the menu features."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import enum_menu
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def test_menu_1(test):
"""Check for main() call from console functionality."""
test.assertFalse(op.exists("generated_file.txt"))
- subprocess.call("menu_helper_1.py g q", shell=True)
+ subprocess.call("python ./menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
- subprocess.call("menu_helper_1.py d q", shell=True)
+ subprocess.call("python ./menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| Change to get test to pass. | ## Code Before:
"""Tests the menu features."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import enum_menu
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def test_menu_1(test):
"""Check for main() call from console functionality."""
test.assertFalse(op.exists("generated_file.txt"))
subprocess.call("menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
subprocess.call("menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
## Instruction:
Change to get test to pass.
## Code After:
"""Tests the menu features."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import enum_menu
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def test_menu_1(test):
"""Check for main() call from console functionality."""
test.assertFalse(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| // ... existing code ...
test.assertFalse(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py g q", shell=True)
test.assertTrue(op.exists("generated_file.txt"))
subprocess.call("python ./menu_helper_1.py d q", shell=True)
test.assertFalse(op.exists("generated_file.txt"))
// ... rest of the code ... |
8beaab317d5da25edd093be42f57e35ac12408b8 | feincms3/plugins/html.py | feincms3/plugins/html.py |
from django.db import models
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from content_editor.admin import ContentEditorInline
__all__ = ("HTML", "HTMLInline", "render_html")
class HTML(models.Model):
"""
Raw HTML plugin
"""
html = models.TextField(
"HTML",
help_text=_(
"The content will be inserted directly into the page."
" It is VERY important that the HTML snippet is well-formed!"
),
)
class Meta:
abstract = True
verbose_name = "HTML"
verbose_name_plural = "HTML"
def __str__(self):
return ""
class HTMLInline(ContentEditorInline):
"""
Just available for consistency, absolutely no difference to a standard
``ContentEditorInline``.
"""
pass
def render_html(plugin, **kwargs):
"""
Return the HTML code as safe string so that it is not escaped. Of course
the contents are not guaranteed to be safe at all
"""
return mark_safe(plugin.html)
|
from django import forms
from django.db import models
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from content_editor.admin import ContentEditorInline
__all__ = ("HTML", "HTMLInline", "render_html")
class HTML(models.Model):
"""
Raw HTML plugin
"""
html = models.TextField(
"HTML",
help_text=_(
"The content will be inserted directly into the page."
" It is VERY important that the HTML snippet is well-formed!"
),
)
class Meta:
abstract = True
verbose_name = "HTML"
verbose_name_plural = "HTML"
def __str__(self):
return ""
class HTMLInline(ContentEditorInline):
"""
Just available for consistency, absolutely no difference to a standard
``ContentEditorInline``.
"""
formfield_overrides = {
models.TextField: {
"widget": forms.Textarea(
attrs={"rows": 3, "cols": 40, "class": "vLargeTextField"}
)
}
}
def render_html(plugin, **kwargs):
"""
Return the HTML code as safe string so that it is not escaped. Of course
the contents are not guaranteed to be safe at all
"""
return mark_safe(plugin.html)
| Make the default HTML textarea smaller | Make the default HTML textarea smaller
| Python | bsd-3-clause | matthiask/feincms3,matthiask/feincms3,matthiask/feincms3 |
+ from django import forms
from django.db import models
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from content_editor.admin import ContentEditorInline
__all__ = ("HTML", "HTMLInline", "render_html")
class HTML(models.Model):
"""
Raw HTML plugin
"""
html = models.TextField(
"HTML",
help_text=_(
"The content will be inserted directly into the page."
" It is VERY important that the HTML snippet is well-formed!"
),
)
class Meta:
abstract = True
verbose_name = "HTML"
verbose_name_plural = "HTML"
def __str__(self):
return ""
class HTMLInline(ContentEditorInline):
"""
Just available for consistency, absolutely no difference to a standard
``ContentEditorInline``.
"""
- pass
+ formfield_overrides = {
+ models.TextField: {
+ "widget": forms.Textarea(
+ attrs={"rows": 3, "cols": 40, "class": "vLargeTextField"}
+ )
+ }
+ }
def render_html(plugin, **kwargs):
"""
Return the HTML code as safe string so that it is not escaped. Of course
the contents are not guaranteed to be safe at all
"""
return mark_safe(plugin.html)
| Make the default HTML textarea smaller | ## Code Before:
from django.db import models
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from content_editor.admin import ContentEditorInline
__all__ = ("HTML", "HTMLInline", "render_html")
class HTML(models.Model):
"""
Raw HTML plugin
"""
html = models.TextField(
"HTML",
help_text=_(
"The content will be inserted directly into the page."
" It is VERY important that the HTML snippet is well-formed!"
),
)
class Meta:
abstract = True
verbose_name = "HTML"
verbose_name_plural = "HTML"
def __str__(self):
return ""
class HTMLInline(ContentEditorInline):
"""
Just available for consistency, absolutely no difference to a standard
``ContentEditorInline``.
"""
pass
def render_html(plugin, **kwargs):
"""
Return the HTML code as safe string so that it is not escaped. Of course
the contents are not guaranteed to be safe at all
"""
return mark_safe(plugin.html)
## Instruction:
Make the default HTML textarea smaller
## Code After:
from django import forms
from django.db import models
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from content_editor.admin import ContentEditorInline
__all__ = ("HTML", "HTMLInline", "render_html")
class HTML(models.Model):
"""
Raw HTML plugin
"""
html = models.TextField(
"HTML",
help_text=_(
"The content will be inserted directly into the page."
" It is VERY important that the HTML snippet is well-formed!"
),
)
class Meta:
abstract = True
verbose_name = "HTML"
verbose_name_plural = "HTML"
def __str__(self):
return ""
class HTMLInline(ContentEditorInline):
"""
Just available for consistency, absolutely no difference to a standard
``ContentEditorInline``.
"""
formfield_overrides = {
models.TextField: {
"widget": forms.Textarea(
attrs={"rows": 3, "cols": 40, "class": "vLargeTextField"}
)
}
}
def render_html(plugin, **kwargs):
"""
Return the HTML code as safe string so that it is not escaped. Of course
the contents are not guaranteed to be safe at all
"""
return mark_safe(plugin.html)
| ...
from django import forms
from django.db import models
...
formfield_overrides = {
models.TextField: {
"widget": forms.Textarea(
attrs={"rows": 3, "cols": 40, "class": "vLargeTextField"}
)
}
}
... |
d2051073d48873408a711b56676ee099e5ff685a | sunpy/timeseries/__init__.py | sunpy/timeseries/__init__.py | from __future__ import absolute_import
from sunpy.timeseries.metadata import TimeSeriesMetaData
from sunpy.timeseries.timeseries_factory import TimeSeries
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.timeseries.sources.eve import EVESpWxTimeSeries
from sunpy.timeseries.sources.goes import XRSTimeSeries
from sunpy.timeseries.sources.noaa import NOAAIndicesTimeSeries, NOAAPredictIndicesTimeSeries
from sunpy.timeseries.sources.lyra import LYRATimeSeries
from sunpy.timeseries.sources.norh import NoRHTimeSeries
from sunpy.timeseries.sources.rhessi import RHESSISummaryTimeSeries
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
| from __future__ import absolute_import
from sunpy.timeseries.metadata import TimeSeriesMetaData
from sunpy.timeseries.timeseries_factory import TimeSeries
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.timeseries.sources.eve import EVESpWxTimeSeries
from sunpy.timeseries.sources.goes import XRSTimeSeries
from sunpy.timeseries.sources.noaa import NOAAIndicesTimeSeries, NOAAPredictIndicesTimeSeries
from sunpy.timeseries.sources.lyra import LYRATimeSeries
from sunpy.timeseries.sources.norh import NoRHTimeSeries
from sunpy.timeseries.sources.rhessi import RHESSISummaryTimeSeries
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
# register pandas datetime converter with matplotlib
# This is to work around the change in pandas-dev/pandas#17710
import pandas.plotting._converter
pandas.plotting._converter.register()
| Fix matplotlib / pandas 0.21 bug in examples | Fix matplotlib / pandas 0.21 bug in examples
Here we manually register the pandas matplotlib converters so people
doing manual plotting with pandas works under pandas 0.21
| Python | bsd-2-clause | dpshelio/sunpy,dpshelio/sunpy,dpshelio/sunpy | from __future__ import absolute_import
from sunpy.timeseries.metadata import TimeSeriesMetaData
from sunpy.timeseries.timeseries_factory import TimeSeries
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.timeseries.sources.eve import EVESpWxTimeSeries
from sunpy.timeseries.sources.goes import XRSTimeSeries
from sunpy.timeseries.sources.noaa import NOAAIndicesTimeSeries, NOAAPredictIndicesTimeSeries
from sunpy.timeseries.sources.lyra import LYRATimeSeries
from sunpy.timeseries.sources.norh import NoRHTimeSeries
from sunpy.timeseries.sources.rhessi import RHESSISummaryTimeSeries
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
+ # register pandas datetime converter with matplotlib
+ # This is to work around the change in pandas-dev/pandas#17710
+ import pandas.plotting._converter
+ pandas.plotting._converter.register()
+ | Fix matplotlib / pandas 0.21 bug in examples | ## Code Before:
from __future__ import absolute_import
from sunpy.timeseries.metadata import TimeSeriesMetaData
from sunpy.timeseries.timeseries_factory import TimeSeries
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.timeseries.sources.eve import EVESpWxTimeSeries
from sunpy.timeseries.sources.goes import XRSTimeSeries
from sunpy.timeseries.sources.noaa import NOAAIndicesTimeSeries, NOAAPredictIndicesTimeSeries
from sunpy.timeseries.sources.lyra import LYRATimeSeries
from sunpy.timeseries.sources.norh import NoRHTimeSeries
from sunpy.timeseries.sources.rhessi import RHESSISummaryTimeSeries
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
## Instruction:
Fix matplotlib / pandas 0.21 bug in examples
## Code After:
from __future__ import absolute_import
from sunpy.timeseries.metadata import TimeSeriesMetaData
from sunpy.timeseries.timeseries_factory import TimeSeries
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.timeseries.sources.eve import EVESpWxTimeSeries
from sunpy.timeseries.sources.goes import XRSTimeSeries
from sunpy.timeseries.sources.noaa import NOAAIndicesTimeSeries, NOAAPredictIndicesTimeSeries
from sunpy.timeseries.sources.lyra import LYRATimeSeries
from sunpy.timeseries.sources.norh import NoRHTimeSeries
from sunpy.timeseries.sources.rhessi import RHESSISummaryTimeSeries
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
# register pandas datetime converter with matplotlib
# This is to work around the change in pandas-dev/pandas#17710
import pandas.plotting._converter
pandas.plotting._converter.register()
| # ... existing code ...
from sunpy.timeseries.sources.fermi_gbm import GBMSummaryTimeSeries
# register pandas datetime converter with matplotlib
# This is to work around the change in pandas-dev/pandas#17710
import pandas.plotting._converter
pandas.plotting._converter.register()
# ... rest of the code ... |
7d3ffe4582a5b4032f9a59a3ea8edfded57a7a1f | src/nodeconductor_openstack/openstack/migrations/0031_tenant_backup_storage.py | src/nodeconductor_openstack/openstack/migrations/0031_tenant_backup_storage.py | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
from nodeconductor.quotas import models as quotas_models
from .. import models
def cleanup_tenant_quotas(apps, schema_editor):
for obj in models.Tenant.objects.all():
quotas_names = models.Tenant.QUOTAS_NAMES + [f.name for f in models.Tenant.get_quotas_fields()]
obj.quotas.exclude(name__in=quotas_names).delete()
class Migration(migrations.Migration):
dependencies = [
('openstack', '0030_subnet_dns_nameservers'),
]
operations = [
migrations.RunPython(cleanup_tenant_quotas),
]
| from __future__ import unicode_literals
from django.db import migrations
from .. import models
def cleanup_tenant_quotas(apps, schema_editor):
quota_names = models.Tenant.get_quotas_names()
for obj in models.Tenant.objects.all():
obj.quotas.exclude(name__in=quota_names).delete()
class Migration(migrations.Migration):
dependencies = [
('openstack', '0030_subnet_dns_nameservers'),
]
operations = [
migrations.RunPython(cleanup_tenant_quotas),
]
| Clean up quota cleanup migration | Clean up quota cleanup migration [WAL-433]
| Python | mit | opennode/nodeconductor-openstack | from __future__ import unicode_literals
- from django.contrib.contenttypes.models import ContentType
from django.db import migrations
-
- from nodeconductor.quotas import models as quotas_models
from .. import models
def cleanup_tenant_quotas(apps, schema_editor):
+ quota_names = models.Tenant.get_quotas_names()
for obj in models.Tenant.objects.all():
- quotas_names = models.Tenant.QUOTAS_NAMES + [f.name for f in models.Tenant.get_quotas_fields()]
- obj.quotas.exclude(name__in=quotas_names).delete()
+ obj.quotas.exclude(name__in=quota_names).delete()
class Migration(migrations.Migration):
dependencies = [
('openstack', '0030_subnet_dns_nameservers'),
]
operations = [
migrations.RunPython(cleanup_tenant_quotas),
]
| Clean up quota cleanup migration | ## Code Before:
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
from nodeconductor.quotas import models as quotas_models
from .. import models
def cleanup_tenant_quotas(apps, schema_editor):
for obj in models.Tenant.objects.all():
quotas_names = models.Tenant.QUOTAS_NAMES + [f.name for f in models.Tenant.get_quotas_fields()]
obj.quotas.exclude(name__in=quotas_names).delete()
class Migration(migrations.Migration):
dependencies = [
('openstack', '0030_subnet_dns_nameservers'),
]
operations = [
migrations.RunPython(cleanup_tenant_quotas),
]
## Instruction:
Clean up quota cleanup migration
## Code After:
from __future__ import unicode_literals
from django.db import migrations
from .. import models
def cleanup_tenant_quotas(apps, schema_editor):
quota_names = models.Tenant.get_quotas_names()
for obj in models.Tenant.objects.all():
obj.quotas.exclude(name__in=quota_names).delete()
class Migration(migrations.Migration):
dependencies = [
('openstack', '0030_subnet_dns_nameservers'),
]
operations = [
migrations.RunPython(cleanup_tenant_quotas),
]
| // ... existing code ...
from django.db import migrations
// ... modified code ...
def cleanup_tenant_quotas(apps, schema_editor):
quota_names = models.Tenant.get_quotas_names()
for obj in models.Tenant.objects.all():
obj.quotas.exclude(name__in=quota_names).delete()
// ... rest of the code ... |
6a8f5bcc6dd42e125f7219d7d692c3af610c38c3 | masters/master.client.polymer/polymer_repos.py | masters/master.client.polymer/polymer_repos.py |
REPOS = (
'polymer',
'platform',
'CustomElements',
'mdv',
'PointerGestures',
'ShadowDOM',
'HTMLImports',
)
|
REPOS = (
'polymer',
'platform',
'CustomElements',
'mdv',
'PointerGestures',
'PointerEvents',
'ShadowDOM',
'HTMLImports',
)
| Add PointerEvents repo to master.client.polymer. | Add PointerEvents repo to master.client.polymer.
R=hinoka@google.com
BUG=chromium:237914
Review URL: https://codereview.chromium.org/15783003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@201643 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | eunchong/build,eunchong/build,eunchong/build,eunchong/build |
REPOS = (
'polymer',
'platform',
'CustomElements',
'mdv',
'PointerGestures',
+ 'PointerEvents',
'ShadowDOM',
'HTMLImports',
)
| Add PointerEvents repo to master.client.polymer. | ## Code Before:
REPOS = (
'polymer',
'platform',
'CustomElements',
'mdv',
'PointerGestures',
'ShadowDOM',
'HTMLImports',
)
## Instruction:
Add PointerEvents repo to master.client.polymer.
## Code After:
REPOS = (
'polymer',
'platform',
'CustomElements',
'mdv',
'PointerGestures',
'PointerEvents',
'ShadowDOM',
'HTMLImports',
)
| ...
'PointerGestures',
'PointerEvents',
'ShadowDOM',
... |
b0236a2cb936df9571139f074b35c178e2573593 | dadi/__init__.py | dadi/__init__.py | import numpy
# This gives a nicer printout for masked arrays.
numpy.ma.default_real_fill_value = numpy.nan
import Integration
import PhiManip
import Numerics
import SFS
import ms
try:
import Plotting
except ImportError:
pass
try:
import os
__DIRECTORY__ = os.path.dirname(Integration.__file__)
__svn_file__ = os.path.join(__DIRECTORY__, 'svnversion')
__SVNVERSION__ = file(__svn_file__).read().strip()
except:
__SVNVERSION__ = 'Unknown'
| import Integration
import PhiManip
import Numerics
import SFS
import ms
try:
import Plotting
except ImportError:
pass
try:
import os
__DIRECTORY__ = os.path.dirname(Integration.__file__)
__svn_file__ = os.path.join(__DIRECTORY__, 'svnversion')
__SVNVERSION__ = file(__svn_file__).read().strip()
except:
__SVNVERSION__ = 'Unknown'
| Remove extraneous setting of masked fill value. | Remove extraneous setting of masked fill value.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@115 979d6bd5-6d4d-0410-bece-f567c23bd345
| Python | bsd-3-clause | RyanGutenkunst/dadi,niuhuifei/dadi,cheese1213/dadi,yangjl/dadi,yangjl/dadi,ChenHsiang/dadi,paulirish/dadi,beni55/dadi,ChenHsiang/dadi,beni55/dadi,RyanGutenkunst/dadi,paulirish/dadi,cheese1213/dadi,niuhuifei/dadi | - import numpy
- # This gives a nicer printout for masked arrays.
- numpy.ma.default_real_fill_value = numpy.nan
-
import Integration
import PhiManip
import Numerics
import SFS
import ms
try:
import Plotting
except ImportError:
pass
try:
import os
__DIRECTORY__ = os.path.dirname(Integration.__file__)
__svn_file__ = os.path.join(__DIRECTORY__, 'svnversion')
__SVNVERSION__ = file(__svn_file__).read().strip()
except:
__SVNVERSION__ = 'Unknown'
| Remove extraneous setting of masked fill value. | ## Code Before:
import numpy
# This gives a nicer printout for masked arrays.
numpy.ma.default_real_fill_value = numpy.nan
import Integration
import PhiManip
import Numerics
import SFS
import ms
try:
import Plotting
except ImportError:
pass
try:
import os
__DIRECTORY__ = os.path.dirname(Integration.__file__)
__svn_file__ = os.path.join(__DIRECTORY__, 'svnversion')
__SVNVERSION__ = file(__svn_file__).read().strip()
except:
__SVNVERSION__ = 'Unknown'
## Instruction:
Remove extraneous setting of masked fill value.
## Code After:
import Integration
import PhiManip
import Numerics
import SFS
import ms
try:
import Plotting
except ImportError:
pass
try:
import os
__DIRECTORY__ = os.path.dirname(Integration.__file__)
__svn_file__ = os.path.join(__DIRECTORY__, 'svnversion')
__SVNVERSION__ = file(__svn_file__).read().strip()
except:
__SVNVERSION__ = 'Unknown'
| // ... existing code ...
import Integration
// ... rest of the code ... |
dc786699618e6ebc1206080d9c0fdb697d519668 | pydy/viz/server.py | pydy/viz/server.py |
import os
import webbrowser
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
__all__ = ['run_server']
def run_server(port=8000,scene_file="Null"):
#change dir to static first.
os.chdir("static/")
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("hit ctrl+c to stop the server..")
print("To view visualization, open:\n")
url = "http://localhost:"+ str(sa[1]) + "/index.html?load=" + scene_file
print(url)
webbrowser.open(url)
httpd.serve_forever()
if __name__ == "__main__":
run_server()
|
import os
import sys
import webbrowser
if sys.version_info < (3, 0):
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
else:
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
__all__ = ['run_server']
def run_server(port=8000,scene_file="Null"):
#change dir to static first.
os.chdir("static/")
HandlerClass = SimpleHTTPRequestHandler
ServerClass = HTTPServer
Protocol = "HTTP/1.0"
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("hit ctrl+c to stop the server..")
print("To view visualization, open:\n")
url = "http://localhost:"+ str(sa[1]) + "/index.html?load=" + scene_file
print(url)
webbrowser.open(url)
httpd.serve_forever()
if __name__ == "__main__":
run_server()
| Fix HTTPServer imports with Python 3 | Fix HTTPServer imports with Python 3
| Python | bsd-3-clause | Shekharrajak/pydy,Shekharrajak/pydy,skidzo/pydy,skidzo/pydy,oliverlee/pydy,Shekharrajak/pydy,oliverlee/pydy,skidzo/pydy,skidzo/pydy,Shekharrajak/pydy,oliverlee/pydy |
import os
+ import sys
import webbrowser
- import BaseHTTPServer
+ if sys.version_info < (3, 0):
- from SimpleHTTPServer import SimpleHTTPRequestHandler
+ from SimpleHTTPServer import SimpleHTTPRequestHandler
+ from BaseHTTPServer import HTTPServer
+ else:
+ from http.server import SimpleHTTPRequestHandler
+ from http.server import HTTPServer
+
__all__ = ['run_server']
def run_server(port=8000,scene_file="Null"):
#change dir to static first.
os.chdir("static/")
HandlerClass = SimpleHTTPRequestHandler
- ServerClass = BaseHTTPServer.HTTPServer
+ ServerClass = HTTPServer
Protocol = "HTTP/1.0"
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("hit ctrl+c to stop the server..")
print("To view visualization, open:\n")
url = "http://localhost:"+ str(sa[1]) + "/index.html?load=" + scene_file
print(url)
webbrowser.open(url)
httpd.serve_forever()
if __name__ == "__main__":
run_server()
| Fix HTTPServer imports with Python 3 | ## Code Before:
import os
import webbrowser
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
__all__ = ['run_server']
def run_server(port=8000,scene_file="Null"):
#change dir to static first.
os.chdir("static/")
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("hit ctrl+c to stop the server..")
print("To view visualization, open:\n")
url = "http://localhost:"+ str(sa[1]) + "/index.html?load=" + scene_file
print(url)
webbrowser.open(url)
httpd.serve_forever()
if __name__ == "__main__":
run_server()
## Instruction:
Fix HTTPServer imports with Python 3
## Code After:
import os
import sys
import webbrowser
if sys.version_info < (3, 0):
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
else:
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
__all__ = ['run_server']
def run_server(port=8000,scene_file="Null"):
#change dir to static first.
os.chdir("static/")
HandlerClass = SimpleHTTPRequestHandler
ServerClass = HTTPServer
Protocol = "HTTP/1.0"
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("hit ctrl+c to stop the server..")
print("To view visualization, open:\n")
url = "http://localhost:"+ str(sa[1]) + "/index.html?load=" + scene_file
print(url)
webbrowser.open(url)
httpd.serve_forever()
if __name__ == "__main__":
run_server()
| // ... existing code ...
import os
import sys
import webbrowser
if sys.version_info < (3, 0):
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
else:
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
// ... modified code ...
HandlerClass = SimpleHTTPRequestHandler
ServerClass = HTTPServer
Protocol = "HTTP/1.0"
// ... rest of the code ... |
0daae44acaefcc40b749166a1ee4ab8fe6ace368 | fix_virtualenv.py | fix_virtualenv.py | from __future__ import unicode_literals, print_function
import os
import argparse
import sys
import shutil
def main():
ap = argparse.ArgumentParser()
ap.add_argument("virtualenv", help="The path to the virtual environment.")
args = ap.parse_args()
target = "{}/include/python{}.{}".format(args.virtualenv, sys.version_info.major, sys.version_info.minor)
try:
source = os.readlink(target)
except:
print(target, "is not a symlink. Perhaps this script has already been run.")
sys.exit(1)
tmp = target + ".tmp"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.mkdir(tmp)
for i in os.listdir(source):
if i == "pygame_sdl2":
continue
os.symlink(os.path.join(source, i), os.path.join(tmp, i))
os.unlink(target)
os.rename(tmp, target)
if __name__ == "__main__":
main()
| from __future__ import unicode_literals, print_function
import os
import argparse
import sys
import shutil
import sysconfig
def main():
target = os.path.dirname(sysconfig.get_config_h_filename())
try:
source = os.readlink(target)
except:
print(target, "is not a symlink. Perhaps this script has already been run.")
sys.exit(1)
tmp = target + ".tmp"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.mkdir(tmp)
for i in os.listdir(source):
if i == "pygame_sdl2":
continue
os.symlink(os.path.join(source, i), os.path.join(tmp, i))
os.unlink(target)
os.rename(tmp, target)
if __name__ == "__main__":
main()
| Use sysconfig to find the include directory. | Use sysconfig to find the include directory.
| Python | lgpl-2.1 | renpy/pygame_sdl2,renpy/pygame_sdl2,renpy/pygame_sdl2,renpy/pygame_sdl2 | from __future__ import unicode_literals, print_function
import os
import argparse
import sys
import shutil
+ import sysconfig
def main():
+ target = os.path.dirname(sysconfig.get_config_h_filename())
- ap = argparse.ArgumentParser()
- ap.add_argument("virtualenv", help="The path to the virtual environment.")
- args = ap.parse_args()
-
- target = "{}/include/python{}.{}".format(args.virtualenv, sys.version_info.major, sys.version_info.minor)
-
try:
source = os.readlink(target)
except:
print(target, "is not a symlink. Perhaps this script has already been run.")
sys.exit(1)
tmp = target + ".tmp"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.mkdir(tmp)
for i in os.listdir(source):
if i == "pygame_sdl2":
continue
os.symlink(os.path.join(source, i), os.path.join(tmp, i))
os.unlink(target)
os.rename(tmp, target)
if __name__ == "__main__":
main()
| Use sysconfig to find the include directory. | ## Code Before:
from __future__ import unicode_literals, print_function
import os
import argparse
import sys
import shutil
def main():
ap = argparse.ArgumentParser()
ap.add_argument("virtualenv", help="The path to the virtual environment.")
args = ap.parse_args()
target = "{}/include/python{}.{}".format(args.virtualenv, sys.version_info.major, sys.version_info.minor)
try:
source = os.readlink(target)
except:
print(target, "is not a symlink. Perhaps this script has already been run.")
sys.exit(1)
tmp = target + ".tmp"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.mkdir(tmp)
for i in os.listdir(source):
if i == "pygame_sdl2":
continue
os.symlink(os.path.join(source, i), os.path.join(tmp, i))
os.unlink(target)
os.rename(tmp, target)
if __name__ == "__main__":
main()
## Instruction:
Use sysconfig to find the include directory.
## Code After:
from __future__ import unicode_literals, print_function
import os
import argparse
import sys
import shutil
import sysconfig
def main():
target = os.path.dirname(sysconfig.get_config_h_filename())
try:
source = os.readlink(target)
except:
print(target, "is not a symlink. Perhaps this script has already been run.")
sys.exit(1)
tmp = target + ".tmp"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.mkdir(tmp)
for i in os.listdir(source):
if i == "pygame_sdl2":
continue
os.symlink(os.path.join(source, i), os.path.join(tmp, i))
os.unlink(target)
os.rename(tmp, target)
if __name__ == "__main__":
main()
| // ... existing code ...
import shutil
import sysconfig
// ... modified code ...
def main():
target = os.path.dirname(sysconfig.get_config_h_filename())
// ... rest of the code ... |
4688d48ceeb365174353ab710d03c39dda10a115 | tssim/__init__.py | tssim/__init__.py |
__author__ = """Franz Woellert"""
__email__ = 'franz.woellert@gmail.com'
__version__ = '0.1.0'
|
__author__ = """Franz Woellert"""
__email__ = 'franz.woellert@gmail.com'
__version__ = '0.1.0'
from tssim.core.series import TimeSeries
from tssim.core.function import TimeFunction
from tssim.core.track import TimeTrack
from tssim.functions import random
| Adjust module and class references to accessible from package top level. | Adjust module and class references to accessible from package top level.
| Python | mit | mansenfranzen/tssim |
__author__ = """Franz Woellert"""
__email__ = 'franz.woellert@gmail.com'
__version__ = '0.1.0'
+
+ from tssim.core.series import TimeSeries
+ from tssim.core.function import TimeFunction
+ from tssim.core.track import TimeTrack
+ from tssim.functions import random
+ | Adjust module and class references to accessible from package top level. | ## Code Before:
__author__ = """Franz Woellert"""
__email__ = 'franz.woellert@gmail.com'
__version__ = '0.1.0'
## Instruction:
Adjust module and class references to accessible from package top level.
## Code After:
__author__ = """Franz Woellert"""
__email__ = 'franz.woellert@gmail.com'
__version__ = '0.1.0'
from tssim.core.series import TimeSeries
from tssim.core.function import TimeFunction
from tssim.core.track import TimeTrack
from tssim.functions import random
| # ... existing code ...
__version__ = '0.1.0'
from tssim.core.series import TimeSeries
from tssim.core.function import TimeFunction
from tssim.core.track import TimeTrack
from tssim.functions import random
# ... rest of the code ... |
722c3dad6d0a0cc34955ab4a5cfafb90a7cf0e64 | scaffold/twork_app/twork_app/web/action/not_found.py | scaffold/twork_app/twork_app/web/action/not_found.py |
'''NotFoundHandler
'''
from tornado.web import HTTPError
from twork_app.web.action.base import BaseHandler
class NotFoundHandler(BaseHandler):
'''NotFoundHandler, RESTFUL SUPPORTED.
'''
ST_ITEM = 'NOT_FOUND'
def post(self, *args, **kwargs):
raise HTTPError(404)
def delete(self, *args, **kwargs):
raise HTTPError(404)
def get(self, *args, **kwargs):
raise HTTPError(404)
|
'''NotFoundHandler
'''
from tornado.web import HTTPError
from twork_app.web.action.base import BaseHandler
class NotFoundHandler(BaseHandler):
'''NotFoundHandler, RESTFUL SUPPORTED.
'''
ST_ITEM = 'NOT_FOUND'
def post(self, *args, **kwargs):
raise HTTPError(404)
def put(self, *args, **kwargs):
raise HTTPError(404)
def delete(self, *args, **kwargs):
raise HTTPError(404)
def get(self, *args, **kwargs):
raise HTTPError(404)
| Rewrite put method for not found handler | Rewrite put method for not found handler
| Python | apache-2.0 | bufferx/twork,bufferx/twork |
'''NotFoundHandler
'''
from tornado.web import HTTPError
from twork_app.web.action.base import BaseHandler
class NotFoundHandler(BaseHandler):
'''NotFoundHandler, RESTFUL SUPPORTED.
'''
ST_ITEM = 'NOT_FOUND'
def post(self, *args, **kwargs):
raise HTTPError(404)
+ def put(self, *args, **kwargs):
+ raise HTTPError(404)
+
def delete(self, *args, **kwargs):
raise HTTPError(404)
def get(self, *args, **kwargs):
raise HTTPError(404)
| Rewrite put method for not found handler | ## Code Before:
'''NotFoundHandler
'''
from tornado.web import HTTPError
from twork_app.web.action.base import BaseHandler
class NotFoundHandler(BaseHandler):
'''NotFoundHandler, RESTFUL SUPPORTED.
'''
ST_ITEM = 'NOT_FOUND'
def post(self, *args, **kwargs):
raise HTTPError(404)
def delete(self, *args, **kwargs):
raise HTTPError(404)
def get(self, *args, **kwargs):
raise HTTPError(404)
## Instruction:
Rewrite put method for not found handler
## Code After:
'''NotFoundHandler
'''
from tornado.web import HTTPError
from twork_app.web.action.base import BaseHandler
class NotFoundHandler(BaseHandler):
'''NotFoundHandler, RESTFUL SUPPORTED.
'''
ST_ITEM = 'NOT_FOUND'
def post(self, *args, **kwargs):
raise HTTPError(404)
def put(self, *args, **kwargs):
raise HTTPError(404)
def delete(self, *args, **kwargs):
raise HTTPError(404)
def get(self, *args, **kwargs):
raise HTTPError(404)
| # ... existing code ...
def put(self, *args, **kwargs):
raise HTTPError(404)
def delete(self, *args, **kwargs):
# ... rest of the code ... |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.