response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
This is required over access_stream_* in certain cases where
we need the stream data only to prepare a response that user can access
and not send it out to unauthorized recipients. | def get_stream_by_narrow_operand_access_unchecked(operand: Union[str, int], realm: Realm) -> Stream:
"""This is required over access_stream_* in certain cases where
we need the stream data only to prepare a response that user can access
and not send it out to unauthorized recipients.
"""
if isinstance(operand, str):
return get_stream(operand, realm)
return get_stream_by_id_in_realm(operand, realm) |
Get streams with subscribers | def get_occupied_streams(realm: Realm) -> QuerySet[Stream]:
"""Get streams with subscribers"""
exists_expression = Exists(
Subscription.objects.filter(
active=True,
is_user_active=True,
user_profile__realm=realm,
recipient_id=OuterRef("recipient_id"),
),
)
occupied_streams = (
Stream.objects.filter(realm=realm, deactivated=False)
.annotate(occupied=exists_expression)
.filter(occupied=True)
)
return occupied_streams |
Fetch which stream colors have already been used for each user in
user_ids. Uses an optimized query designed to support picking
colors when bulk-adding users to streams, which requires
inspecting all Subscription objects for the users, which can often
end up being all Subscription objects in the realm. | def get_used_colors_for_user_ids(user_ids: List[int]) -> Dict[int, Set[str]]:
"""Fetch which stream colors have already been used for each user in
user_ids. Uses an optimized query designed to support picking
colors when bulk-adding users to streams, which requires
inspecting all Subscription objects for the users, which can often
end up being all Subscription objects in the realm.
"""
query = (
Subscription.objects.filter(
user_profile_id__in=user_ids,
recipient__type=Recipient.STREAM,
)
.values("user_profile_id", "color")
.distinct()
)
result: Dict[int, Set[str]] = defaultdict(set)
for row in query:
assert row["color"] is not None
result[row["user_profile_id"]].add(row["color"])
return result |
Glossary:
subscribed_ids:
This shows the users who are actually subscribed to the
stream, which we generally send to the person subscribing
to the stream.
private_peer_dict:
These are the folks that need to know about a new subscriber.
It's usually a superset of the subscribers.
Note that we only compute this for PRIVATE streams. We
let other code handle peers for public streams, since the
peers for all public streams are actually the same group
of users, and downstream code can use that property of
public streams to avoid extra work. | def bulk_get_subscriber_peer_info(
realm: Realm,
streams: Collection[Stream] | QuerySet[Stream],
) -> SubscriberPeerInfo:
"""
Glossary:
subscribed_ids:
This shows the users who are actually subscribed to the
stream, which we generally send to the person subscribing
to the stream.
private_peer_dict:
These are the folks that need to know about a new subscriber.
It's usually a superset of the subscribers.
Note that we only compute this for PRIVATE streams. We
let other code handle peers for public streams, since the
peers for all public streams are actually the same group
of users, and downstream code can use that property of
public streams to avoid extra work.
"""
subscribed_ids = {}
private_peer_dict = {}
private_stream_ids = {stream.id for stream in streams if stream.invite_only}
public_stream_ids = {stream.id for stream in streams if not stream.invite_only}
stream_user_ids = get_user_ids_for_streams(private_stream_ids | public_stream_ids)
if private_stream_ids:
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
for stream_id in private_stream_ids:
# Realm admins can see all private stream
# subscribers.
subscribed_user_ids = stream_user_ids.get(stream_id, set())
subscribed_ids[stream_id] = subscribed_user_ids
private_peer_dict[stream_id] = subscribed_user_ids | realm_admin_ids
for stream_id in public_stream_ids:
subscribed_user_ids = stream_user_ids.get(stream_id, set())
subscribed_ids[stream_id] = subscribed_user_ids
return SubscriberPeerInfo(
subscribed_ids=subscribed_ids,
private_peer_dict=private_peer_dict,
) |
Returns the set of active user IDs who can access any message
history on this stream (regardless of whether they have a
UserMessage) based on the stream's configuration.
1. if !history_public_to_subscribers:
History is not available to anyone
2. if history_public_to_subscribers:
All subscribers can access the history including guests
The results of this function need to be kept consistent with
what can_access_stream_history would dictate. | def subscriber_ids_with_stream_history_access(stream: Stream) -> Set[int]:
"""Returns the set of active user IDs who can access any message
history on this stream (regardless of whether they have a
UserMessage) based on the stream's configuration.
1. if !history_public_to_subscribers:
History is not available to anyone
2. if history_public_to_subscribers:
All subscribers can access the history including guests
The results of this function need to be kept consistent with
what can_access_stream_history would dictate.
"""
if not stream.is_history_public_to_subscribers():
return set()
return set(
get_active_subscriptions_for_stream_id(
stream.id, include_deactivated_users=False
).values_list("user_profile_id", flat=True)
) |
This function optimizes an important use case for large
streams. Open realms often have many long_term_idle users, which
can result in 10,000s of long_term_idle recipients in default
streams. do_send_messages has an optimization to avoid doing work
for long_term_idle unless message flags or notifications should be
generated.
However, it's expensive even to fetch and process them all in
Python at all. This function returns all recipients of a stream
message that could possibly require action in the send-message
codepath.
Basically, it returns all subscribers, excluding all long-term
idle users who it can prove will not receive a UserMessage row or
notification for the message (i.e. no alert words, mentions, or
email/push notifications are configured) and thus are not needed
for processing the message send.
Critically, this function is called before the Markdown
processor. As a result, it returns all subscribers who have ANY
configured alert words, even if their alert words aren't present
in the message. Similarly, it returns all subscribers who match
the "possible mention" parameters.
Downstream logic, which runs after the Markdown processor has
parsed the message, will do the precise determination. | def get_subscriptions_for_send_message(
*,
realm_id: int,
stream_id: int,
topic_name: str,
possible_stream_wildcard_mention: bool,
topic_participant_user_ids: AbstractSet[int],
possibly_mentioned_user_ids: AbstractSet[int],
) -> QuerySet[Subscription]:
"""This function optimizes an important use case for large
streams. Open realms often have many long_term_idle users, which
can result in 10,000s of long_term_idle recipients in default
streams. do_send_messages has an optimization to avoid doing work
for long_term_idle unless message flags or notifications should be
generated.
However, it's expensive even to fetch and process them all in
Python at all. This function returns all recipients of a stream
message that could possibly require action in the send-message
codepath.
Basically, it returns all subscribers, excluding all long-term
idle users who it can prove will not receive a UserMessage row or
notification for the message (i.e. no alert words, mentions, or
email/push notifications are configured) and thus are not needed
for processing the message send.
Critically, this function is called before the Markdown
processor. As a result, it returns all subscribers who have ANY
configured alert words, even if their alert words aren't present
in the message. Similarly, it returns all subscribers who match
the "possible mention" parameters.
Downstream logic, which runs after the Markdown processor has
parsed the message, will do the precise determination.
"""
query = get_active_subscriptions_for_stream_id(
stream_id,
include_deactivated_users=False,
)
if possible_stream_wildcard_mention:
return query
query = query.filter(
Q(user_profile__long_term_idle=False)
| Q(push_notifications=True)
| (Q(push_notifications=None) & Q(user_profile__enable_stream_push_notifications=True))
| Q(email_notifications=True)
| (Q(email_notifications=None) & Q(user_profile__enable_stream_email_notifications=True))
| Q(user_profile_id__in=possibly_mentioned_user_ids)
| Q(user_profile_id__in=topic_participant_user_ids)
| Q(
user_profile_id__in=AlertWord.objects.filter(realm_id=realm_id).values_list(
"user_profile_id"
)
)
| Q(
user_profile_id__in=UserTopic.objects.filter(
stream_id=stream_id,
topic_name__iexact=topic_name,
visibility_policy=UserTopic.VisibilityPolicy.FOLLOWED,
).values_list("user_profile_id")
)
)
return query |
Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream. | def validate_user_access_to_subscribers(
user_profile: Optional[UserProfile], stream: Stream
) -> None:
"""Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{
"realm_id": stream.realm_id,
"is_web_public": stream.is_web_public,
"invite_only": stream.invite_only,
},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id),
) |
Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors. | def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Even guest users can access subscribers to web-public streams,
# since they can freely become subscribers to these streams.
if stream_dict["is_web_public"]:
return
# With the exception of web-public streams, a guest must
# be subscribed to a stream (even a public one) in order
# to see subscribers.
if user_profile.is_guest and check_user_subscribed(user_profile):
return
# We could explicitly handle the case where guests aren't
# subscribed here in an `else` statement or we can fall
# through to the subsequent logic. Tim prefers the latter.
# Adding an `else` would ensure better code coverage.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this channel"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if stream_dict["invite_only"] and not check_user_subscribed(user_profile):
raise JsonableError(_("Unable to retrieve subscribers for private channel")) |
sub_dict maps stream_id => whether the user is subscribed to that stream. | def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
is_subscribed: bool
check_user_subscribed = lambda user_profile: is_subscribed
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
check_user_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts)
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result |
Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields | def get_subscribers_query(
stream: Stream, requesting_user: Optional[UserProfile]
) -> QuerySet[Subscription]:
"""Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
return get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=False) |
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others | def display_list(values: List[str], display_limit: int) -> str:
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = f"{values[0]}"
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(f"{value}" for value in values[:-1])
display_string += f" and {values[-1]}"
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(f"{value}" for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string |
Given a path to a Markdown file, return the rendered HTML.
Note that this assumes that any HTML in the Markdown file is
trusted; it is intended to be used for documentation, not user
data. | def render_markdown_path(
markdown_file_path: str,
context: Optional[Dict[str, Any]] = None,
integration_doc: bool = False,
help_center: bool = False,
) -> str:
"""Given a path to a Markdown file, return the rendered HTML.
Note that this assumes that any HTML in the Markdown file is
trusted; it is intended to be used for documentation, not user
data."""
# We set this global hackishly
from zerver.lib.markdown.help_settings_links import set_relative_settings_links
set_relative_settings_links(bool(context is not None and context.get("html_settings_links")))
from zerver.lib.markdown.help_relative_links import set_relative_help_links
set_relative_help_links(bool(context is not None and context.get("html_settings_links")))
global md_extensions
global md_macro_extension
if md_extensions is None:
md_extensions = [
markdown.extensions.extra.makeExtension(),
markdown.extensions.toc.makeExtension(),
markdown.extensions.admonition.makeExtension(),
markdown.extensions.codehilite.makeExtension(
linenums=False,
guess_lang=False,
),
zerver.lib.markdown.fenced_code.makeExtension(
run_content_validators=bool(
context is not None and context.get("run_content_validators", False)
),
),
zerver.lib.markdown.api_arguments_table_generator.makeExtension(),
zerver.lib.markdown.api_return_values_table_generator.makeExtension(),
zerver.lib.markdown.nested_code_blocks.makeExtension(),
zerver.lib.markdown.tabbed_sections.makeExtension(),
zerver.lib.markdown.help_settings_links.makeExtension(),
zerver.lib.markdown.help_relative_links.makeExtension(),
zerver.lib.markdown.help_emoticon_translations_table.makeExtension(),
zerver.lib.markdown.static.makeExtension(),
]
if context is not None and "api_url" in context:
# We need to generate the API code examples extension each
# time so the `api_url` config parameter can be set dynamically.
#
# TODO: Convert this to something more efficient involving
# passing the API URL as a direct parameter.
extensions = [
zerver.openapi.markdown_extension.makeExtension(
api_url=context["api_url"],
),
*md_extensions,
]
else:
extensions = md_extensions
if integration_doc:
md_macro_extension = zerver.lib.markdown.include.makeExtension(
base_path="templates/zerver/integrations/include/"
)
elif help_center:
md_macro_extension = zerver.lib.markdown.include.makeExtension(base_path="help/include/")
else:
md_macro_extension = zerver.lib.markdown.include.makeExtension(
base_path="api_docs/include/"
)
if not any(doc in markdown_file_path for doc in docs_without_macros):
extensions = [md_macro_extension, *extensions]
md_engine = markdown.Markdown(extensions=extensions)
md_engine.reset()
jinja = engines["Jinja2"]
assert isinstance(jinja, Jinja2)
if markdown_file_path.startswith("/"):
with open(markdown_file_path) as fp:
markdown_string = fp.read()
else:
markdown_string = jinja.env.loader.get_source(jinja.env, markdown_file_path)[0]
API_ENDPOINT_NAME = context.get("API_ENDPOINT_NAME", "") if context is not None else ""
markdown_string = markdown_string.replace("API_ENDPOINT_NAME", API_ENDPOINT_NAME)
html = md_engine.convert(markdown_string)
if context is None:
return mark_safe(html) # noqa: S308
return mark_safe(jinja.from_string(html).render(context)) |
Checks whether the zulip_test_template database template, is
consistent with our database migrations; if not, it updates it
in the fastest way possible:
* If all we need to do is add some migrations, just runs those
migrations on the template database.
* Otherwise, we rebuild the test template database from scratch.
The default behavior is sufficient for the `test-backend` use
case, where the test runner code will clone directly from the
template database.
The `rebuild_test_database` option (used by our frontend and API
tests) asks us to drop and re-cloning the zulip_test database from
the template so those test suites can run with a fresh copy. | def update_test_databases_if_required(rebuild_test_database: bool = False) -> None:
"""Checks whether the zulip_test_template database template, is
consistent with our database migrations; if not, it updates it
in the fastest way possible:
* If all we need to do is add some migrations, just runs those
migrations on the template database.
* Otherwise, we rebuild the test template database from scratch.
The default behavior is sufficient for the `test-backend` use
case, where the test runner code will clone directly from the
template database.
The `rebuild_test_database` option (used by our frontend and API
tests) asks us to drop and re-cloning the zulip_test database from
the template so those test suites can run with a fresh copy.
"""
test_template_db_status = TEST_DATABASE.template_status()
if test_template_db_status == "needs_rebuild":
run(["tools/rebuild-test-database"])
TEST_DATABASE.write_new_db_digest()
return
if test_template_db_status == "run_migrations":
TEST_DATABASE.run_db_migrations()
run(["tools/setup/generate-fixtures"])
return
if rebuild_test_database:
run(["tools/setup/generate-fixtures"]) |
The logic in zerver/lib/test_runner.py tries to delete all the
temporary test databases generated by test-backend threads, but it
cannot guarantee it handles all race conditions correctly. This
is a catch-all function designed to delete any that might have
been leaked due to crashes (etc.). The high-level algorithm is to:
* Delete every database with a name like zulip_test_template_*
* Unless it is registered in a file under TEMPLATE_DATABASE_DIR as
part of a currently running test-backend invocation
* And that file is less expiry_time old.
This should ensure we ~never break a running test-backend process,
while also ensuring we will eventually delete all leaked databases. | def destroy_leaked_test_databases(expiry_time: int = 60 * 60) -> int:
"""The logic in zerver/lib/test_runner.py tries to delete all the
temporary test databases generated by test-backend threads, but it
cannot guarantee it handles all race conditions correctly. This
is a catch-all function designed to delete any that might have
been leaked due to crashes (etc.). The high-level algorithm is to:
* Delete every database with a name like zulip_test_template_*
* Unless it is registered in a file under TEMPLATE_DATABASE_DIR as
part of a currently running test-backend invocation
* And that file is less expiry_time old.
This should ensure we ~never break a running test-backend process,
while also ensuring we will eventually delete all leaked databases.
"""
files = glob.glob(os.path.join(UUID_VAR_DIR, TEMPLATE_DATABASE_DIR, "*"))
test_databases: Set[str] = set()
try:
with connection.cursor() as cursor:
cursor.execute("SELECT datname FROM pg_database;")
rows = cursor.fetchall()
for row in rows:
if "zulip_test_template_" in row[0]:
test_databases.add(row[0])
except ProgrammingError:
pass
databases_in_use: Set[str] = set()
for file in files:
if round(time.time()) - os.path.getmtime(file) < expiry_time:
with open(file) as f:
for line in f:
databases_in_use.add(f"zulip_test_template_{line}".rstrip())
else:
# Any test-backend run older than expiry_time can be
# cleaned up, both the database and the file listing its
# databases.
os.remove(file)
databases_to_drop = test_databases - databases_in_use
if not databases_to_drop:
return 0
commands = "\n".join(f"DROP DATABASE IF EXISTS {db};" for db in databases_to_drop)
subprocess.run(
["psql", "-q", "-v", "ON_ERROR_STOP=1", "-h", "localhost", "postgres", "zulip_test"],
input=commands,
check=True,
text=True,
)
return len(databases_to_drop) |
This function is used to reset the zulip_test database fastest way possible,
i.e. First, it deletes the database and then clones it from zulip_test_template.
This function is used with puppeteer tests, so it can quickly reset the test
database after each run. | def reset_zulip_test_database() -> None:
"""
This function is used to reset the zulip_test database fastest way possible,
i.e. First, it deletes the database and then clones it from zulip_test_template.
This function is used with puppeteer tests, so it can quickly reset the test
database after each run.
"""
from zerver.lib.test_runner import destroy_test_databases
# Make sure default database is 'zulip_test'.
assert connections["default"].settings_dict["NAME"] == "zulip_test"
# Clearing all the active PSQL sessions with 'zulip_test'.
run(
[
"env",
"PGHOST=localhost",
"PGUSER=zulip_test",
"scripts/setup/terminate-psql-sessions",
"zulip_test",
]
)
destroy_test_databases()
# Pointing default database to test database template, so we can instantly clone it.
settings.DATABASES["default"]["NAME"] = BACKEND_DATABASE_TEMPLATE
connection = connections["default"]
clone_database_suffix = "clone"
connection.creation.clone_test_db(
suffix=clone_database_suffix,
)
settings_dict = connection.creation.get_test_db_clone_settings(clone_database_suffix)
# We manually rename the clone database to 'zulip_test' because when cloning it,
# its name is set to original database name + some suffix.
# Also, we need it to be 'zulip_test' so that our running server can recognize it.
with connection.cursor() as cursor:
cursor.execute("ALTER DATABASE zulip_test_template_clone RENAME TO zulip_test;")
settings_dict["NAME"] = "zulip_test"
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close() |
Allow a user to capture just the queries executed during
the with statement. | def queries_captured(
include_savepoints: bool = False, keep_cache_warm: bool = False
) -> Iterator[List[CapturedQuery]]:
"""
Allow a user to capture just the queries executed during
the with statement.
"""
queries: List[CapturedQuery] = []
def cursor_execute(self: TimeTrackingCursor, sql: Query, vars: Optional[Params] = None) -> None:
start = time.time()
try:
return super(TimeTrackingCursor, self).execute(sql, vars)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or not isinstance(sql, str) or "SAVEPOINT" not in sql:
queries.append(
CapturedQuery(
sql=self.mogrify(sql, vars).decode(),
time=f"{duration:.3f}",
)
)
def cursor_executemany(
self: TimeTrackingCursor, sql: Query, vars_list: Iterable[Params]
) -> None: # nocoverage -- doesn't actually get used in tests
vars_list, vars_list1 = itertools.tee(vars_list)
start = time.time()
try:
return super(TimeTrackingCursor, self).executemany(sql, vars_list)
finally:
stop = time.time()
duration = stop - start
queries.extend(
CapturedQuery(
sql=self.mogrify(sql, vars).decode(),
time=f"{duration:.3f}",
)
for vars in vars_list1
)
if not keep_cache_warm:
cache = get_cache_backend(None)
cache.clear()
flush_per_request_caches()
clear_client_cache()
with mock.patch.multiple(
TimeTrackingCursor, execute=cursor_execute, executemany=cursor_executemany
):
yield queries |
Redirect stdout to /dev/null. | def stdout_suppressed() -> Iterator[IO[str]]:
"""Redirect stdout to /dev/null."""
with open(os.devnull, "a") as devnull:
stdout, sys.stdout = sys.stdout, devnull
try:
yield stdout
finally:
sys.stdout = stdout |
This function is used to reset email visibility for all users and
RealmUserDefault object in the zulip realm in development environment
to "EMAIL_ADDRESS_VISIBILITY_EVERYONE" since the default value is
"EMAIL_ADDRESS_VISIBILITY_ADMINS". This function is needed in
tests that want "email" field of users to be set to their real email. | def reset_email_visibility_to_everyone_in_zulip_realm() -> None:
"""
This function is used to reset email visibility for all users and
RealmUserDefault object in the zulip realm in development environment
to "EMAIL_ADDRESS_VISIBILITY_EVERYONE" since the default value is
"EMAIL_ADDRESS_VISIBILITY_ADMINS". This function is needed in
tests that want "email" field of users to be set to their real email.
"""
realm = get_realm("zulip")
realm_user_default = RealmUserDefault.objects.get(realm=realm)
do_set_realm_user_default_setting(
realm_user_default,
"email_address_visibility",
RealmUserDefault.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_change_user_setting(
user,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
) |
Temporarily add a rate-limiting rule to the ratelimiter | def ratelimit_rule(
range_seconds: int,
num_requests: int,
domain: str = "api_by_user",
) -> Iterator[None]:
"""Temporarily add a rate-limiting rule to the ratelimiter"""
RateLimitedIPAddr("127.0.0.1", domain=domain).clear_history()
domain_rules = rules.get(domain, []).copy()
domain_rules.append((range_seconds, num_requests))
domain_rules.sort(key=lambda x: x[0])
with patch.dict(rules, {domain: domain_rules}), override_settings(RATE_LIMITING=True):
yield |
This function runs only under parallel mode. It initializes the
individual processes which are also called workers. | def init_worker(
counter: "multiprocessing.sharedctypes.Synchronized[int]",
initial_settings: Optional[Dict[str, Any]] = None,
serialized_contents: Optional[Dict[str, str]] = None,
process_setup: Optional[Callable[..., None]] = None,
process_setup_args: Optional[Tuple[Any, ...]] = None,
debug_mode: Optional[bool] = None,
used_aliases: Optional[Set[str]] = None,
) -> None:
"""
This function runs only under parallel mode. It initializes the
individual processes which are also called workers.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
"""
You can now use _worker_id.
"""
# Clear the cache
from zerver.lib.cache import get_cache_backend
cache = get_cache_backend(None)
cache.clear()
# Close all connections
connections.close_all()
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
initialize_worker_path(_worker_id) |
Render a TeX string into HTML using KaTeX
Returns the HTML string, or None if there was some error in the TeX syntax
Keyword arguments:
tex -- Text string with the TeX to render
Don't include delimiters ('$$', '\[ \]', etc.)
is_inline -- Boolean setting that indicates whether the render should be
inline (i.e. for embedding it in text) or not. The latter
will show the content centered, and in the "expanded" form
(default True) | def render_tex(tex: str, is_inline: bool = True) -> Optional[str]:
r"""Render a TeX string into HTML using KaTeX
Returns the HTML string, or None if there was some error in the TeX syntax
Keyword arguments:
tex -- Text string with the TeX to render
Don't include delimiters ('$$', '\[ \]', etc.)
is_inline -- Boolean setting that indicates whether the render should be
inline (i.e. for embedding it in text) or not. The latter
will show the content centered, and in the "expanded" form
(default True)
"""
if settings.KATEX_SERVER:
try:
resp = KatexSession().post(
# We explicitly disable the Smokescreen proxy for this
# call, since it intentionally connects to localhost.
# This is safe because the host is explicitly fixed, and
# the port is pulled from our own configuration.
f"http://localhost:{settings.KATEX_SERVER_PORT}/",
data={
"content": tex,
"is_display": "false" if is_inline else "true",
"shared_secret": settings.SHARED_SECRET,
},
proxies={"http": ""},
)
except requests.exceptions.Timeout:
logging.warning("KaTeX rendering service timed out with %d byte long input", len(tex))
return None
except requests.exceptions.RequestException as e:
logging.warning("KaTeX rendering service failed: %s", type(e).__name__)
return None
if resp.status_code == 200:
return resp.content.decode().strip()
elif resp.status_code == 400:
return None
else:
logging.warning(
"KaTeX rendering service failed: (%s) %s", resp.status_code, resp.content.decode()
)
return None
katex_path = (
static_path("webpack-bundles/katex-cli.js")
if settings.PRODUCTION
else os.path.join(settings.DEPLOY_ROOT, "node_modules/katex/cli.js")
)
if not os.path.isfile(katex_path):
logging.error("Cannot find KaTeX for latex rendering!")
return None
command = ["node", katex_path]
if not is_inline:
command.extend(["--display-mode"])
try:
stdout = subprocess.check_output(command, input=tex, stderr=subprocess.DEVNULL, text=True)
# stdout contains a newline at the end
return stdout.strip()
except subprocess.CalledProcessError:
return None |
Call the function in a separate thread.
Return its return value, or raise an exception,
within approximately 'timeout' seconds.
The function may receive a TimeoutExpiredError exception
anywhere in its code, which could have arbitrary
unsafe effects (resources not released, etc.).
It might also fail to receive the exception and
keep running in the background even though
timeout() has returned.
This may also fail to interrupt functions which are
stuck in a long-running primitive interpreter
operation. | def unsafe_timeout(timeout: float, func: Callable[[], ResultT]) -> ResultT:
"""Call the function in a separate thread.
Return its return value, or raise an exception,
within approximately 'timeout' seconds.
The function may receive a TimeoutExpiredError exception
anywhere in its code, which could have arbitrary
unsafe effects (resources not released, etc.).
It might also fail to receive the exception and
keep running in the background even though
timeout() has returned.
This may also fail to interrupt functions which are
stuck in a long-running primitive interpreter
operation."""
class TimeoutThread(threading.Thread):
def __init__(self) -> None:
threading.Thread.__init__(self)
self.result: Optional[ResultT] = None
self.exc_info: Tuple[
Optional[Type[BaseException]],
Optional[BaseException],
Optional[TracebackType],
] = (None, None, None)
# Don't block the whole program from exiting
# if this is the only thread left.
self.daemon = True
@override
def run(self) -> None:
try:
self.result = func()
except BaseException:
self.exc_info = sys.exc_info()
def raise_async_timeout(self) -> None:
# This function is called from another thread; we attempt
# to raise a TimeoutExpiredError in _this_ thread.
assert self.ident is not None
ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_ulong(self.ident),
ctypes.py_object(TimeoutExpiredError),
)
thread = TimeoutThread()
thread.start()
thread.join(timeout)
if thread.is_alive():
# We need to retry, because an async exception received while
# the thread is in a system call is simply ignored.
for i in range(10):
thread.raise_async_timeout()
time.sleep(0.1)
if not thread.is_alive():
break
if thread.exc_info[1] is not None:
# Re-raise the exception we sent, if possible, so the
# stacktrace originates in the slow code
raise thread.exc_info[1].with_traceback(thread.exc_info[2])
# If we don't have that for some reason (e.g. we failed to
# kill it), just raise from here; the thread _may still be
# running_ because it failed to see any of our exceptions, and
# we just ignore it.
if thread.is_alive(): # nocoverage
logging.warning("Failed to time out backend thread")
raise TimeoutExpiredError # nocoverage
if thread.exc_info[1] is not None:
# Died with some other exception; re-raise it
raise thread.exc_info[1].with_traceback(thread.exc_info[2])
assert thread.result is not None
return thread.result |
Use this where you are getting dicts that are based off of messages
that may come from the outside world, especially from third party
APIs and bots.
We prefer 'topic' to 'subject' here. We expect at least one field
to be present (or the caller must know how to handle KeyError). | def get_topic_from_message_info(message_info: Dict[str, Any]) -> str:
"""
Use this where you are getting dicts that are based off of messages
that may come from the outside world, especially from third party
APIs and bots.
We prefer 'topic' to 'subject' here. We expect at least one field
to be present (or the caller must know how to handle KeyError).
"""
if "topic" in message_info:
return message_info["topic"]
return message_info["subject"] |
Resolved topics are denoted only by a title change, not by a boolean toggle in a database column. This
method inspects the topic name and returns a tuple of:
- Whether the topic has been resolved
- The topic name with the resolution prefix, if present in stored_name, removed | def get_topic_resolution_and_bare_name(stored_name: str) -> Tuple[bool, str]:
"""
Resolved topics are denoted only by a title change, not by a boolean toggle in a database column. This
method inspects the topic name and returns a tuple of:
- Whether the topic has been resolved
- The topic name with the resolution prefix, if present in stored_name, removed
"""
if stored_name.startswith(RESOLVED_TOPIC_PREFIX):
return (True, stored_name[len(RESOLVED_TOPIC_PREFIX) :])
return (False, stored_name) |
Users who either sent or reacted to the messages in the topic.
The function is expensive for large numbers of messages in the topic. | def participants_for_topic(realm_id: int, recipient_id: int, topic_name: str) -> Set[int]:
"""
Users who either sent or reacted to the messages in the topic.
The function is expensive for large numbers of messages in the topic.
"""
messages = Message.objects.filter(
# Uses index: zerver_message_realm_recipient_upper_subject
realm_id=realm_id,
recipient_id=recipient_id,
subject__iexact=topic_name,
)
participants = set(
UserProfile.objects.filter(
Q(id__in=Subquery(messages.values("sender_id")))
| Q(
id__in=Subquery(
Reaction.objects.filter(message__in=messages).values("user_profile_id")
)
)
).values_list("id", flat=True)
)
return participants |
This is responsible for inspecting the function signature and getting the
metadata from the parameters. We want to keep this function as pure as
possible not leaking side effects to the global state. Side effects should
be executed separately after the ViewFuncInfo is returned. | def parse_view_func_signature(
view_func: Callable[Concatenate[HttpRequest, ParamT], object],
) -> ViewFuncInfo:
"""This is responsible for inspecting the function signature and getting the
metadata from the parameters. We want to keep this function as pure as
possible not leaking side effects to the global state. Side effects should
be executed separately after the ViewFuncInfo is returned.
"""
type_hints = get_type_hints(view_func, include_extras=True)
parameters = inspect.signature(view_func).parameters
view_func_full_name = f"{view_func.__module__}.{view_func.__name__}"
process_parameters: List[FuncParam[object]] = []
for param_name, parameter in parameters.items():
assert param_name in type_hints
if parameter.kind != inspect.Parameter.KEYWORD_ONLY:
continue
param_info = parse_single_parameter(
param_name=param_name, param_type=type_hints[param_name], parameter=parameter
)
process_parameters.append(param_info)
return ViewFuncInfo(
view_func_full_name=view_func_full_name,
parameters=process_parameters,
) |
Master function for accessing another user by ID in API code;
verifies the user ID is in the same realm, and if requested checks
for administrative privileges, with flags for various special
cases. | def access_user_by_id(
user_profile: UserProfile,
target_user_id: int,
*,
allow_deactivated: bool = False,
allow_bots: bool = False,
for_admin: bool,
) -> UserProfile:
"""Master function for accessing another user by ID in API code;
verifies the user ID is in the same realm, and if requested checks
for administrative privileges, with flags for various special
cases.
"""
try:
target = get_user_profile_by_id_in_realm(target_user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
return access_user_common(target, user_profile, allow_deactivated, allow_bots, for_admin) |
Variant of access_user_by_id allowing cross-realm bots to be accessed. | def access_user_by_id_including_cross_realm(
user_profile: UserProfile,
target_user_id: int,
*,
allow_deactivated: bool = False,
allow_bots: bool = False,
for_admin: bool,
) -> UserProfile:
"""Variant of access_user_by_id allowing cross-realm bots to be accessed."""
try:
target = get_user_by_id_in_realm_including_cross_realm(target_user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
return access_user_common(target, user_profile, allow_deactivated, allow_bots, for_admin) |
Formats a user row returned by a database fetch using
.values(*realm_user_dict_fields) into a dictionary representation
of that user for API delivery to clients. The acting_user
argument is used for permissions checks. | def format_user_row(
realm_id: int,
acting_user: Optional[UserProfile],
row: RawUserDict,
client_gravatar: bool,
user_avatar_url_field_optional: bool,
custom_profile_field_data: Optional[Dict[str, Any]] = None,
) -> APIUserDict:
"""Formats a user row returned by a database fetch using
.values(*realm_user_dict_fields) into a dictionary representation
of that user for API delivery to clients. The acting_user
argument is used for permissions checks.
"""
is_admin = is_administrator_role(row["role"])
is_owner = row["role"] == UserProfile.ROLE_REALM_OWNER
is_guest = row["role"] == UserProfile.ROLE_GUEST
is_bot = row["is_bot"]
delivery_email = None
if acting_user is not None and can_access_delivery_email(
acting_user, row["id"], row["email_address_visibility"]
):
delivery_email = row["delivery_email"]
result = APIUserDict(
email=row["email"],
user_id=row["id"],
avatar_version=row["avatar_version"],
is_admin=is_admin,
is_owner=is_owner,
is_guest=is_guest,
is_billing_admin=row["is_billing_admin"],
role=row["role"],
is_bot=is_bot,
full_name=row["full_name"],
timezone=canonicalize_timezone(row["timezone"]),
is_active=row["is_active"],
date_joined=row["date_joined"].isoformat(),
delivery_email=delivery_email,
)
if acting_user is None:
# Remove data about other users which are not useful to spectators
# or can reveal personal information about a user.
# Only send day level precision date_joined data to spectators.
del result["is_billing_admin"]
del result["timezone"]
assert isinstance(result["date_joined"], str)
result["date_joined"] = str(date_parser.parse(result["date_joined"]).date())
# Zulip clients that support using `GET /avatar/{user_id}` as a
# fallback if we didn't send an avatar URL in the user object pass
# user_avatar_url_field_optional in client_capabilities.
#
# This is a major network performance optimization for
# organizations with 10,000s of users where we would otherwise
# send avatar URLs in the payload (either because most users have
# uploaded avatars or because EMAIL_ADDRESS_VISIBILITY_ADMINS
# prevents the older client_gravatar optimization from helping).
# The performance impact is large largely because the hashes in
# avatar URLs structurally cannot compress well.
#
# The user_avatar_url_field_optional gives the server sole
# discretion in deciding for which users we want to send the
# avatar URL (Which saves clients an RTT at the cost of some
# bandwidth). At present, the server looks at `long_term_idle` to
# decide which users to include avatars for, piggy-backing on a
# different optimization for organizations with 10,000s of users.
include_avatar_url = not user_avatar_url_field_optional or not row["long_term_idle"]
if include_avatar_url:
result["avatar_url"] = get_avatar_field(
user_id=row["id"],
realm_id=realm_id,
email=row["delivery_email"],
avatar_source=row["avatar_source"],
avatar_version=row["avatar_version"],
medium=False,
client_gravatar=client_gravatar,
)
if is_bot:
result["bot_type"] = row["bot_type"]
if is_cross_realm_bot_email(row["email"]):
result["is_system_bot"] = True
# Note that bot_owner_id can be None with legacy data.
result["bot_owner_id"] = row["bot_owner_id"]
elif custom_profile_field_data is not None:
result["profile_data"] = custom_profile_field_data
return result |
Fetches data about the target user(s) appropriate for sending to
acting_user via the standard format for the Zulip API. If
target_user is None, we fetch all users in the realm. | def get_users_for_api(
realm: Realm,
acting_user: Optional[UserProfile],
*,
target_user: Optional[UserProfile] = None,
client_gravatar: bool,
user_avatar_url_field_optional: bool,
include_custom_profile_fields: bool = True,
user_list_incomplete: bool = False,
) -> Dict[int, APIUserDict]:
"""Fetches data about the target user(s) appropriate for sending to
acting_user via the standard format for the Zulip API. If
target_user is None, we fetch all users in the realm.
"""
profiles_by_user_id = None
custom_profile_field_data = None
# target_user is an optional parameter which is passed when user data of a specific user
# is required. It is 'None' otherwise.
accessible_user_dicts: List[RawUserDict] = []
inaccessible_user_dicts: List[APIUserDict] = []
if target_user is not None:
accessible_user_dicts = [user_profile_to_user_row(target_user)]
else:
accessible_user_dicts, inaccessible_user_dicts = get_user_dicts_in_realm(realm, acting_user)
if include_custom_profile_fields:
base_query = CustomProfileFieldValue.objects.select_related("field")
# TODO: Consider optimizing this query away with caching.
if target_user is not None:
custom_profile_field_values = base_query.filter(user_profile=target_user)
else:
custom_profile_field_values = base_query.filter(field__realm_id=realm.id)
profiles_by_user_id = get_custom_profile_field_values(custom_profile_field_values)
result = {}
for row in accessible_user_dicts:
if profiles_by_user_id is not None:
custom_profile_field_data = profiles_by_user_id.get(row["id"], {})
client_gravatar_for_user = (
client_gravatar
and row["email_address_visibility"] == UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE
)
result[row["id"]] = format_user_row(
realm.id,
acting_user=acting_user,
row=row,
client_gravatar=client_gravatar_for_user,
user_avatar_url_field_optional=user_avatar_url_field_optional,
custom_profile_field_data=custom_profile_field_data,
)
if not user_list_incomplete:
for inaccessible_user_row in inaccessible_user_dicts:
# We already have the required data for inaccessible users
# in row object, so we can just add it to result directly.
user_id = inaccessible_user_row["user_id"]
result[user_id] = inaccessible_user_row
return result |
It is generally unsafe to call is_verified directly on `request.user` since
the attribute `otp_device` does not exist on an `AnonymousUser`, and `is_verified`
does not make sense without 2FA being enabled.
This wraps the checks for all these assumptions to make sure the call is safe. | def is_2fa_verified(user: UserProfile) -> bool:
"""
It is generally unsafe to call is_verified directly on `request.user` since
the attribute `otp_device` does not exist on an `AnonymousUser`, and `is_verified`
does not make sense without 2FA being enabled.
This wraps the checks for all these assumptions to make sure the call is safe.
"""
# Explicitly require the caller to ensure that settings.TWO_FACTOR_AUTHENTICATION_ENABLED
# is True before calling `is_2fa_verified`.
assert settings.TWO_FACTOR_AUTHENTICATION_ENABLED
return is_verified(user) |
This locks the user groups with the given potential_subgroup_ids, as well
as their indirect subgroups, followed by the potential supergroup. It
ensures that we lock the user groups in a consistent order topologically to
avoid unnecessary deadlocks on non-conflicting queries.
Regardless of whether the user groups returned are used, always call this
helper before making changes to subgroup memberships. This avoids
introducing cycles among user groups when there is a race condition in
which one of these subgroups become an ancestor of the parent user group in
another transaction.
Note that it only does a permission check on the potential supergroup,
not the potential subgroups or their recursive subgroups. | def lock_subgroups_with_respect_to_supergroup(
potential_subgroup_ids: Collection[int], potential_supergroup_id: int, acting_user: UserProfile
) -> Iterator[LockedUserGroupContext]:
"""This locks the user groups with the given potential_subgroup_ids, as well
as their indirect subgroups, followed by the potential supergroup. It
ensures that we lock the user groups in a consistent order topologically to
avoid unnecessary deadlocks on non-conflicting queries.
Regardless of whether the user groups returned are used, always call this
helper before making changes to subgroup memberships. This avoids
introducing cycles among user groups when there is a race condition in
which one of these subgroups become an ancestor of the parent user group in
another transaction.
Note that it only does a permission check on the potential supergroup,
not the potential subgroups or their recursive subgroups.
"""
with transaction.atomic(savepoint=False):
# Calling list with the QuerySet forces its evaluation putting a lock on
# the queried rows.
recursive_subgroups = list(
get_recursive_subgroups_for_groups(
potential_subgroup_ids, acting_user.realm
).select_for_update(nowait=True)
)
# TODO: This select_for_update query is subject to deadlocking, and
# better error handling is needed. We may use
# select_for_update(nowait=True) and release the locks held by ending
# the transaction with a JsonableError by handling the DatabaseError.
# But at the current scale of concurrent requests, we rely on
# Postgres's deadlock detection when it occurs.
potential_supergroup = access_user_group_by_id(
potential_supergroup_id, acting_user, for_read=False
)
# We avoid making a separate query for user_group_ids because the
# recursive query already returns those user groups.
potential_subgroups = [
user_group
for user_group in recursive_subgroups
if user_group.id in potential_subgroup_ids
]
# We expect that the passed user_group_ids each corresponds to an
# existing user group.
group_ids_found = [group.id for group in potential_subgroups]
group_ids_not_found = [
group_id for group_id in potential_subgroup_ids if group_id not in group_ids_found
]
if group_ids_not_found:
raise JsonableError(
_("Invalid user group ID: {group_id}").format(group_id=group_ids_not_found[0])
)
for subgroup in potential_subgroups:
# At this time, we only do a check on the realm ID of the fetched
# subgroup. This would be caught by the check earlier, so there is
# no coverage here.
if not has_user_group_access(subgroup, acting_user, for_read=False, as_subgroup=True):
raise JsonableError(_("Insufficient permission")) # nocoverage
yield LockedUserGroupContext(
direct_subgroups=potential_subgroups,
recursive_subgroups=recursive_subgroups,
supergroup=potential_supergroup,
) |
This function is used in do_events_register code path so this code
should be performant. We need to do 2 database queries because
Django's ORM doesn't properly support the left join between
UserGroup and UserGroupMembership that we need. | def user_groups_in_realm_serialized(realm: Realm) -> List[UserGroupDict]:
"""This function is used in do_events_register code path so this code
should be performant. We need to do 2 database queries because
Django's ORM doesn't properly support the left join between
UserGroup and UserGroupMembership that we need.
"""
realm_groups = NamedUserGroup.objects.filter(realm=realm)
group_dicts: Dict[int, UserGroupDict] = {}
for user_group in realm_groups:
group_dicts[user_group.id] = dict(
id=user_group.id,
name=user_group.name,
description=user_group.description,
members=[],
direct_subgroup_ids=[],
is_system_group=user_group.is_system_group,
can_mention_group=user_group.can_mention_group_id,
)
membership = UserGroupMembership.objects.filter(user_group__realm=realm).values_list(
"user_group_id", "user_profile_id"
)
for user_group_id, user_profile_id in membership:
group_dicts[user_group_id]["members"].append(user_profile_id)
group_membership = GroupGroupMembership.objects.filter(subgroup__realm=realm).values_list(
"subgroup_id", "supergroup_id"
)
for subgroup_id, supergroup_id in group_membership:
group_dicts[supergroup_id]["direct_subgroup_ids"].append(subgroup_id)
for group_dict in group_dicts.values():
group_dict["members"] = sorted(group_dict["members"])
group_dict["direct_subgroup_ids"] = sorted(group_dict["direct_subgroup_ids"])
return sorted(group_dicts.values(), key=lambda group_dict: group_dict["id"]) |
Any changes to this function likely require a migration to adjust
existing realms. See e.g. migration 0382_create_role_based_system_groups.py,
which is a copy of this function from when we introduced system groups. | def create_system_user_groups_for_realm(realm: Realm) -> Dict[int, NamedUserGroup]:
"""Any changes to this function likely require a migration to adjust
existing realms. See e.g. migration 0382_create_role_based_system_groups.py,
which is a copy of this function from when we introduced system groups.
"""
role_system_groups_dict: Dict[int, NamedUserGroup] = {}
system_groups_info_list: List[Dict[str, str]] = []
nobody_group_info = {
"name": SystemGroups.NOBODY,
"description": "Nobody",
}
full_members_group_info = {
"name": SystemGroups.FULL_MEMBERS,
"description": "Members of this organization, not including new accounts and guests",
}
everyone_on_internet_group_info = {
"name": SystemGroups.EVERYONE_ON_INTERNET,
"description": "Everyone on the Internet",
}
system_groups_info_list = [
nobody_group_info,
NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[UserProfile.ROLE_REALM_OWNER],
NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[UserProfile.ROLE_REALM_ADMINISTRATOR],
NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[UserProfile.ROLE_MODERATOR],
full_members_group_info,
NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[UserProfile.ROLE_MEMBER],
NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[UserProfile.ROLE_GUEST],
everyone_on_internet_group_info,
]
bulk_create_system_user_groups(system_groups_info_list, realm)
system_groups_name_dict: Dict[str, NamedUserGroup] = get_role_based_system_groups_dict(realm)
for role in NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP:
group_name = NamedUserGroup.SYSTEM_USER_GROUP_ROLE_MAP[role]["name"]
role_system_groups_dict[role] = system_groups_name_dict[group_name]
# Order of this list here is important to create correct GroupGroupMembership objects
# Note that because we do not create user memberships here, no audit log entries for
# user memberships are populated either.
system_user_groups_list = [
system_groups_name_dict[SystemGroups.NOBODY],
system_groups_name_dict[SystemGroups.OWNERS],
system_groups_name_dict[SystemGroups.ADMINISTRATORS],
system_groups_name_dict[SystemGroups.MODERATORS],
system_groups_name_dict[SystemGroups.FULL_MEMBERS],
system_groups_name_dict[SystemGroups.MEMBERS],
system_groups_name_dict[SystemGroups.EVERYONE],
system_groups_name_dict[SystemGroups.EVERYONE_ON_INTERNET],
]
creation_time = timezone_now()
realmauditlog_objects = [
RealmAuditLog(
realm=realm,
acting_user=None,
event_type=RealmAuditLog.USER_GROUP_CREATED,
event_time=creation_time,
modified_user_group=user_group,
)
for user_group in system_user_groups_list
]
groups_with_updated_settings = []
for group in system_user_groups_list:
user_group = set_defaults_for_group_settings(group, {}, system_groups_name_dict)
groups_with_updated_settings.append(user_group)
NamedUserGroup.objects.bulk_update(groups_with_updated_settings, ["can_mention_group"])
subgroup_objects: List[GroupGroupMembership] = []
# "Nobody" system group is not a subgroup of any user group, since it is already empty.
subgroup, remaining_groups = system_user_groups_list[1], system_user_groups_list[2:]
for supergroup in remaining_groups:
subgroup_objects.append(GroupGroupMembership(subgroup=subgroup, supergroup=supergroup))
now = timezone_now()
realmauditlog_objects.extend(
[
RealmAuditLog(
realm=realm,
modified_user_group=supergroup,
event_type=RealmAuditLog.USER_GROUP_DIRECT_SUBGROUP_MEMBERSHIP_ADDED,
event_time=now,
acting_user=None,
extra_data={"subgroup_ids": [subgroup.id]},
),
RealmAuditLog(
realm=realm,
modified_user_group=subgroup,
event_type=RealmAuditLog.USER_GROUP_DIRECT_SUPERGROUP_MEMBERSHIP_ADDED,
event_time=now,
acting_user=None,
extra_data={"supergroup_ids": [supergroup.id]},
),
]
)
subgroup = supergroup
GroupGroupMembership.objects.bulk_create(subgroup_objects)
RealmAuditLog.objects.bulk_create(realmauditlog_objects)
return role_system_groups_dict |
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup. | def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
"""
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
"""
if not ums:
return
vals = [(um.user_profile_id, um.message_id, um.flags) for um in ums]
query = SQL(
"""
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
ON CONFLICT DO NOTHING
"""
)
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals) |
Fetches UserTopic objects associated with the target user.
* include_deactivated: Whether to include those associated with
deactivated streams.
* include_stream_name: Whether to include stream names in the
returned dictionaries.
* visibility_policy: If specified, returns only UserTopic objects
with the specified visibility_policy value. | def get_user_topics(
user_profile: UserProfile,
include_deactivated: bool = False,
include_stream_name: bool = False,
visibility_policy: Optional[int] = None,
) -> List[UserTopicDict]:
"""
Fetches UserTopic objects associated with the target user.
* include_deactivated: Whether to include those associated with
deactivated streams.
* include_stream_name: Whether to include stream names in the
returned dictionaries.
* visibility_policy: If specified, returns only UserTopic objects
with the specified visibility_policy value.
"""
query = UserTopic.objects.filter(user_profile=user_profile)
if visibility_policy is not None:
query = query.filter(visibility_policy=visibility_policy)
# Exclude user topics that are part of deactivated streams unless
# explicitly requested.
if not include_deactivated:
query = query.filter(stream__deactivated=False)
rows = query.values(
"stream_id", "stream__name", "topic_name", "last_updated", "visibility_policy"
)
result = []
for row in rows:
user_topic_dict: UserTopicDict = {
"stream_id": row["stream_id"],
"topic_name": row["topic_name"],
"visibility_policy": row["visibility_policy"],
"last_updated": datetime_to_timestamp(row["last_updated"]),
}
if include_stream_name:
user_topic_dict["stream__name"] = row["stream__name"]
result.append(user_topic_dict)
return result |
This is only used in tests. | def set_topic_visibility_policy(
user_profile: UserProfile,
topics: List[List[str]],
visibility_policy: int,
last_updated: Optional[datetime] = None,
) -> None:
"""
This is only used in tests.
"""
UserTopic.objects.filter(
user_profile=user_profile,
visibility_policy=visibility_policy,
).delete()
if last_updated is None:
last_updated = timezone_now()
for stream_name, topic_name in topics:
stream = get_stream(stream_name, user_profile.realm)
recipient_id = stream.recipient_id
assert recipient_id is not None
bulk_set_user_topic_visibility_policy_in_database(
user_profiles=[user_profile],
stream_id=stream.id,
recipient_id=recipient_id,
topic_name=topic_name,
visibility_policy=visibility_policy,
last_updated=last_updated,
) |
Prefetch the visibility policies the user has configured for
various topics.
The prefetching helps to avoid the db queries later in the loop
to determine the user's visibility policy for a topic. | def build_get_topic_visibility_policy(
user_profile: UserProfile,
) -> Callable[[int, str], int]:
"""Prefetch the visibility policies the user has configured for
various topics.
The prefetching helps to avoid the db queries later in the loop
to determine the user's visibility policy for a topic.
"""
rows = UserTopic.objects.filter(user_profile=user_profile).values(
"recipient_id",
"topic_name",
"visibility_policy",
)
topic_to_visibility_policy: Dict[Tuple[int, str], int] = defaultdict(int)
for row in rows:
recipient_id = row["recipient_id"]
topic_name = row["topic_name"]
visibility_policy = row["visibility_policy"]
topic_to_visibility_policy[(recipient_id, topic_name)] = visibility_policy
def get_topic_visibility_policy(recipient_id: int, topic_name: str) -> int:
return topic_to_visibility_policy[(recipient_id, topic_name.lower())]
return get_topic_visibility_policy |
Assert that the input is an integer and is contained in `possible_values`. If the input is not in
`possible_values`, a `ValidationError` is raised containing the failing field's name. | def check_int_in(possible_values: List[int]) -> Validator[int]:
"""
Assert that the input is an integer and is contained in `possible_values`. If the input is not in
`possible_values`, a `ValidationError` is raised containing the failing field's name.
"""
def validator(var_name: str, val: object) -> int:
n = check_int(var_name, val)
if n not in possible_values:
raise ValidationError(_("Invalid {var_name}").format(var_name=var_name))
return n
return validator |
Use this validator if an argument is of a variable type (e.g. processing
properties that might be strings or booleans).
`allowed_type_funcs`: the check_* validator functions for the possible data
types for this variable. | def check_union(allowed_type_funcs: Collection[Validator[ResultT]]) -> Validator[ResultT]:
"""
Use this validator if an argument is of a variable type (e.g. processing
properties that might be strings or booleans).
`allowed_type_funcs`: the check_* validator functions for the possible data
types for this variable.
"""
def enumerated_type_check(var_name: str, val: object) -> ResultT:
for func in allowed_type_funcs:
try:
return func(var_name, val)
except ValidationError:
pass
raise ValidationError(_("{var_name} is not an allowed_type").format(var_name=var_name))
return enumerated_type_check |
This function is used to validate the data sent to the server while
creating/editing choices of the choice field in Organization settings. | def validate_select_field_data(field_data: ProfileFieldData) -> Dict[str, Dict[str, str]]:
"""
This function is used to validate the data sent to the server while
creating/editing choices of the choice field in Organization settings.
"""
validator = check_dict_only(
[
("text", check_required_string),
("order", check_required_string),
]
)
# To create an array of texts of each option
distinct_field_names: Set[str] = set()
for key, value in field_data.items():
if not key.strip():
raise ValidationError(_("'{item}' cannot be blank.").format(item="value"))
valid_value = validator("field_data", value)
assert value is valid_value # To justify the unchecked cast below
distinct_field_names.add(valid_value["text"])
# To show error if the options are duplicate
if len(field_data) != len(distinct_field_names):
raise ValidationError(_("Field must not have duplicate choices."))
return cast(Dict[str, Dict[str, str]], field_data) |
This function is used to validate the value selected by the user against a
choice field. This is not used to validate admin data. | def validate_select_field(var_name: str, field_data: str, value: object) -> str:
"""
This function is used to validate the value selected by the user against a
choice field. This is not used to validate admin data.
"""
s = check_string(var_name, value)
field_data_dict = orjson.loads(field_data)
if s not in field_data_dict:
msg = _("'{value}' is not a valid choice for '{field_name}'.")
raise ValidationError(msg.format(value=value, field_name=var_name))
return s |
This code works with the web app; mobile and other
clients should also start supporting this soon. | def do_widget_post_save_actions(send_request: SendMessageRequest) -> None:
"""
This code works with the web app; mobile and other
clients should also start supporting this soon.
"""
message_content = send_request.message.content
sender_id = send_request.message.sender_id
message_id = send_request.message.id
widget_type = None
extra_data = None
widget_type, extra_data = get_widget_data(message_content)
widget_content = send_request.widget_content
if widget_content is not None:
# Note that we validate this data in check_message,
# so we can trust it here.
widget_type = widget_content["widget_type"]
extra_data = widget_content["extra_data"]
if widget_type:
content = dict(
widget_type=widget_type,
extra_data=extra_data,
)
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type="widget",
content=json.dumps(content),
)
submessage.save()
send_request.submessages = SubMessage.get_raw_db_rows([message_id]) |
If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window. | def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
"""If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window.
"""
if db_data:
realm_uri_prefix = db_data.realm_uri + "/"
if link.startswith((realm_uri_prefix + "#", realm_uri_prefix + "user_uploads/")):
return link[len(realm_uri_prefix) :]
return link |
Sanitize a URL against XSS attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url. | def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a URL against XSS attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urlsplit(url.replace(" ", "%20"))
scheme, netloc, path, query, fragment = parts
except ValueError:
# Bad URL - so bad it couldn't be parsed.
return ""
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == "" and netloc == "" and "@" in path:
scheme = "mailto"
elif scheme == "" and netloc == "" and len(path) > 0 and path[0] == "/":
# Allow domain-relative links
return urlunsplit(("", "", path, query, fragment))
elif (scheme, netloc, path, query) == ("", "", "", "") and len(fragment) > 0:
# Allow fragment links
return urlunsplit(("", "", "", "", fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url("http://" + url)
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we allow a fixed set of schemes.
if scheme not in allowed_schemes:
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# URL passes all tests. Return URL as-is.
return urlunsplit((scheme, netloc, path, query, fragment)) |
Augment a linkifier so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP. | def prepare_linkifier_pattern(source: str) -> str:
"""Augment a linkifier so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP."""
# This NEL character (0x85) is interpolated via a variable,
# because r"" strings cannot use backslash escapes.
next_line = "\u0085"
# We use an extended definition of 'whitespace' which is
# equivalent to \p{White_Space} -- since \s in re2 only matches
# ASCII spaces, and re2 does not support \p{White_Space}.
return rf"""(?P<{BEFORE_CAPTURE_GROUP}>^|\s|{next_line}|\pZ|['"\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?P<{AFTER_CAPTURE_GROUP}>$|[^\pL\pN])""" |
Convert Markdown to HTML, with Zulip-specific settings and hacks. | def do_convert(
content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message] = None,
message_realm: Optional[Realm] = None,
sent_by_bot: bool = False,
translate_emoticons: bool = False,
url_embed_data: Optional[Dict[str, Optional[UrlEmbedData]]] = None,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
no_previews: bool = False,
) -> MessageRenderingResult:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for Markdown purposes
if message is not None and message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
linkifiers_key = DEFAULT_MARKDOWN_KEY
else:
linkifiers_key = message_realm.id
if message and hasattr(message, "id") and message.id:
logging_message_id = "id# " + str(message.id)
else:
logging_message_id = "unknown"
if (
message is not None
and message_realm is not None
and message_realm.is_zephyr_mirror_realm
and message.sending_client.name == "zephyr_mirror"
):
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
linkifiers_key = ZEPHYR_MIRROR_MARKDOWN_KEY
maybe_update_markdown_engines(linkifiers_key, email_gateway)
md_engine_key = (linkifiers_key, email_gateway)
_md_engine = md_engines[md_engine_key]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
rendering_result: MessageRenderingResult = MessageRenderingResult(
rendered_content="",
mentions_topic_wildcard=False,
mentions_stream_wildcard=False,
mentions_user_ids=set(),
mentions_user_group_ids=set(),
alert_words=set(),
links_for_preview=set(),
user_ids_with_alert_words=set(),
potential_attachment_path_ids=[],
)
_md_engine.zulip_message = message
_md_engine.zulip_rendering_result = rendering_result
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews
)
_md_engine.url_embed_data = url_embed_data
# Pre-fetch data from the DB that is used in the Markdown thread
if message_realm is not None:
# Here we fetch the data structures needed to render
# mentions/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_backend = MentionBackend(message_realm.id)
message_sender = None
if message is not None:
message_sender = message.sender
mention_data = MentionData(mention_backend, content, message_sender)
stream_names = possible_linked_stream_names(content)
stream_name_info = mention_data.get_stream_name_map(stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = get_name_keyed_dict_for_active_realm_emoji(message_realm.id)
else:
active_realm_emoji = {}
_md_engine.zulip_db_data = DbData(
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
active_realm_emoji=active_realm_emoji,
realm_uri=message_realm.uri,
sent_by_bot=sent_by_bot,
stream_names=stream_name_info,
translate_emoticons=translate_emoticons,
)
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. Markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a linkifier that makes some syntax
# infinite-loop).
rendering_result.rendered_content = unsafe_timeout(5, lambda: _md_engine.convert(content))
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
MAX_MESSAGE_LENGTH = settings.MAX_MESSAGE_LENGTH
if len(rendering_result.rendered_content) > MAX_MESSAGE_LENGTH * 100:
raise MarkdownRenderingError(
f"Rendered content exceeds {MAX_MESSAGE_LENGTH * 100} characters (message {logging_message_id})"
)
return rendering_result
except Exception:
cleaned = privacy_clean_markdown(content)
markdown_logger.exception(
"Exception in Markdown parser; input (sanitized) was: %s\n (message %s)",
cleaned,
logging_message_id,
)
raise MarkdownRenderingError
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None |
This is basically just a wrapper for do_render_markdown. | def render_message_markdown(
message: Message,
content: str,
realm: Optional[Realm] = None,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
url_embed_data: Optional[Dict[str, Optional[UrlEmbedData]]] = None,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
"""
This is basically just a wrapper for do_render_markdown.
"""
if realm is None:
realm = message.get_realm()
sender = message.sender
sent_by_bot = sender.is_bot
translate_emoticons = sender.translate_emoticons
rendering_result = markdown_convert(
content,
realm_alert_words_automaton=realm_alert_words_automaton,
message=message,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
url_embed_data=url_embed_data,
mention_data=mention_data,
email_gateway=email_gateway,
)
return rendering_result |
Sanitizes a value to be safe to store in a Linux filesystem, in
S3, and in a URL. So Unicode is allowed, but not special
characters other than ".", "-", and "_".
This implementation is based on django.utils.text.slugify; it is
modified by:
* adding '.' to the list of allowed characters.
* preserving the case of the value.
* not stripping trailing dashes and underscores. | def sanitize_name(value: str) -> str:
"""
Sanitizes a value to be safe to store in a Linux filesystem, in
S3, and in a URL. So Unicode is allowed, but not special
characters other than ".", "-", and "_".
This implementation is based on django.utils.text.slugify; it is
modified by:
* adding '.' to the list of allowed characters.
* preserving the case of the value.
* not stripping trailing dashes and underscores.
"""
value = unicodedata.normalize("NFKC", value)
value = re.sub(r"[^\w\s.-]", "", value).strip()
value = re.sub(r"[-\s]+", "-", value)
if value in {"", ".", ".."}:
return "uploaded-file"
return value |
Verify that we are only reading and writing files under the
expected paths. This is expected to be already enforced at other
layers, via cleaning of user input, but we assert it here for
defense in depth. | def assert_is_local_storage_path(type: Literal["avatars", "files"], full_path: str) -> None:
"""
Verify that we are only reading and writing files under the
expected paths. This is expected to be already enforced at other
layers, via cleaning of user input, but we assert it here for
defense in depth.
"""
assert settings.LOCAL_UPLOADS_DIR is not None
type_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type)
assert os.path.commonpath([type_path, full_path]) == type_path |
This method can be used to standardize a dictionary of headers with
the standard format that Django expects. For reference, refer to:
https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.headers
NOTE: Historically, Django's headers were not case-insensitive. We're still
capitalizing our headers to make it easier to compare/search later if required. | def standardize_headers(input_headers: Union[None, Dict[str, Any]]) -> Dict[str, str]:
"""This method can be used to standardize a dictionary of headers with
the standard format that Django expects. For reference, refer to:
https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.headers
NOTE: Historically, Django's headers were not case-insensitive. We're still
capitalizing our headers to make it easier to compare/search later if required.
"""
canonical_headers = {}
if not input_headers:
return {}
for raw_header in input_headers:
polished_header = raw_header.upper().replace("-", "_")
if polished_header not in [
"CONTENT_TYPE",
"CONTENT_LENGTH",
] and not polished_header.startswith("HTTP_"):
polished_header = "HTTP_" + polished_header
canonical_headers[polished_header] = str(input_headers[raw_header])
return canonical_headers |
For integrations that require custom HTTP headers for some (or all)
of their test fixtures, this method will call a specially named
function from the target integration module to determine what set
of HTTP headers goes with the given test fixture. | def get_fixture_http_headers(integration_name: str, fixture_name: str) -> Dict["str", "str"]:
"""For integrations that require custom HTTP headers for some (or all)
of their test fixtures, this method will call a specially named
function from the target integration module to determine what set
of HTTP headers goes with the given test fixture.
"""
view_module_name = f"zerver.webhooks.{integration_name}.view"
try:
# TODO: We may want to migrate to a more explicit registration
# strategy for this behavior rather than a try/except import.
view_module = importlib.import_module(view_module_name)
fixture_to_headers = view_module.fixture_to_headers
except (ImportError, AttributeError):
return {}
return fixture_to_headers(fixture_name) |
If an integration requires an event type kind of HTTP header which can
be easily (statically) determined, then name the fixtures in the format
of "header_value__other_details" or even "header_value" and the use this
method in the headers.py file for the integration. | def get_http_headers_from_filename(http_header_key: str) -> Callable[[str], Dict[str, str]]:
"""If an integration requires an event type kind of HTTP header which can
be easily (statically) determined, then name the fixtures in the format
of "header_value__other_details" or even "header_value" and the use this
method in the headers.py file for the integration."""
def fixture_to_headers(filename: str) -> Dict[str, str]:
if "__" in filename:
event_type = filename.split("__")[0]
else:
event_type = filename
return {http_header_key: event_type}
return fixture_to_headers |
If an integration requires time input in unix milliseconds, this helper
checks to ensure correct type and will catch any errors related to type or
value and raise a JsonableError.
Returns a datetime representing the time. | def unix_milliseconds_to_timestamp(milliseconds: Any, webhook: str) -> datetime:
"""If an integration requires time input in unix milliseconds, this helper
checks to ensure correct type and will catch any errors related to type or
value and raise a JsonableError.
Returns a datetime representing the time."""
try:
# timestamps are in milliseconds so divide by 1000
seconds = milliseconds / 1000
return timestamp_to_datetime(seconds)
except (ValueError, TypeError):
raise JsonableError(
_("The {webhook} webhook expects time in milliseconds.").format(webhook=webhook)
) |
Migration 0041 had a bug, where if multiple messages referenced the
same attachment, rather than creating a single attachment object
for all of them, we would incorrectly create one for each message.
This results in exceptions looking up the Attachment object
corresponding to a file that was used in multiple messages that
predate migration 0041.
This migration fixes this by removing the duplicates, moving their
messages onto a single canonical Attachment object (per path_id). | def fix_duplicate_attachments(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Migration 0041 had a bug, where if multiple messages referenced the
same attachment, rather than creating a single attachment object
for all of them, we would incorrectly create one for each message.
This results in exceptions looking up the Attachment object
corresponding to a file that was used in multiple messages that
predate migration 0041.
This migration fixes this by removing the duplicates, moving their
messages onto a single canonical Attachment object (per path_id).
"""
Attachment = apps.get_model("zerver", "Attachment")
# Loop through all groups of Attachment objects with the same `path_id`
for group in (
Attachment.objects.values("path_id")
.annotate(Count("id"))
.order_by()
.filter(id__count__gt=1)
):
# Sort by the minimum message ID, to find the first attachment
attachments = sorted(
Attachment.objects.filter(path_id=group["path_id"]).order_by("id"),
key=lambda x: min(x.messages.all().values_list("id")[0]),
)
surviving = attachments[0]
to_cleanup = attachments[1:]
for a in to_cleanup:
# For each duplicate attachment, we transfer its messages
# to the canonical attachment object for that path, and
# then delete the original attachment.
for msg in a.messages.all():
surviving.messages.add(msg)
surviving.is_realm_public = surviving.is_realm_public or a.is_realm_public
surviving.save()
a.delete() |
Delete any old scheduled jobs, to handle changes in the format of
that table. Ideally, we'd translate the jobs, but it's not really
worth the development effort to save a few invitation reminders
and day2 followup emails. | def delete_old_scheduled_jobs(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Delete any old scheduled jobs, to handle changes in the format of
that table. Ideally, we'd translate the jobs, but it's not really
worth the development effort to save a few invitation reminders
and day2 followup emails.
"""
ScheduledJob = apps.get_model("zerver", "ScheduledJob")
ScheduledJob.objects.all().delete() |
Delete any old scheduled jobs, to handle changes in the format of
send_email. Ideally, we'd translate the jobs, but it's not really
worth the development effort to save a few invitation reminders
and day2 followup emails. | def delete_old_scheduled_jobs(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Delete any old scheduled jobs, to handle changes in the format of
send_email. Ideally, we'd translate the jobs, but it's not really
worth the development effort to save a few invitation reminders
and day2 followup emails.
"""
ScheduledJob = apps.get_model("zerver", "ScheduledJob")
ScheduledJob.objects.all().delete() |
Fixes UserProfile objects that incorrectly had a bot_owner set | def migrate_fix_invalid_bot_owner_values(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""Fixes UserProfile objects that incorrectly had a bot_owner set"""
UserProfile = apps.get_model("zerver", "UserProfile")
UserProfile.objects.filter(is_bot=False).exclude(bot_owner=None).update(bot_owner=None) |
With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured. | def ensure_no_empty_passwords(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
# Because we're backporting this migration to the Zulip 2.0.x
# series, we've given it migration number 0209, which is a
# duplicate with an existing migration already merged into Zulip
# main. Migration 0247_realmauditlog_event_type_to_int.py
# changes the format of RealmAuditLog.event_type, so we need the
# following conditional block to determine what values to use when
# searching for the relevant events in that log.
event_type_class = RealmAuditLog._meta.get_field("event_type").get_internal_type()
if event_type_class == "CharField":
USER_PASSWORD_CHANGED: Union[int, str] = "user_password_changed"
USER_API_KEY_CHANGED: Union[int, str] = "user_api_key_changed"
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# First, we do some bulk queries to collect data we'll find useful
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(
RealmAuditLog.objects.filter(event_type=USER_PASSWORD_CHANGED).values_list(
"modified_user_id", flat=True
)
)
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id,
event_type__in=[USER_PASSWORD_CHANGED, USER_API_KEY_CHANGED],
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# had an empty password, they have changed their
# password, and we have no record of the password's
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
# to be reset.
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
# This user has changed their password before
# generating their current personal API key, so we can
# prove their current personal API key could not have
# been exposed by this bug.
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# We record in this log file users whose current API key was
# generated after a real password was set, so there's no need
# to reset their API key, but because they've changed their
# password, we don't know whether or not they originally had a
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = "empty_password"
AFFECTED_USER_TYPE_CHANGED_PASSWORD = "changed_password"
MIGRATION_ID = "0209_user_profile_no_empty_password"
def write_realm_audit_log_entry(
user_profile: Any, event_time: Any, event_type: Any, affected_user_type: str
) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=orjson.dumps(
{
"migration_id": MIGRATION_ID,
"affected_user_type": affected_user_type,
}
).decode(),
)
# If Zulip's built-in password authentication is not enabled on
# the server level, then we plan to skip resetting any users' API
# keys, since the bug requires EmailAuthBackend.
email_auth_enabled = "zproject.backends.EmailAuthBackend" in settings.AUTHENTICATION_BACKENDS
# A quick note: This query could in theory exclude users with
# is_active=False, is_bot=True, or realm__deactivated=True here to
# accessing only active human users in non-deactivated realms.
# But it's better to just be thorough; users can be reactivated,
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
# account.
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password("", user_profile.password):
# This user currently has the empty string as their password.
# Change their password and record that we did so.
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(
user_profile, event_time, USER_PASSWORD_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD
)
if email_auth_enabled and not user_profile.is_bot:
# As explained above, if the built-in password authentication
# is enabled, reset the API keys. We can skip bot accounts here,
# because the `password` attribute on a bot user is useless.
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(
user_profile,
event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD,
)
user_profile.save(update_fields=update_fields)
continue
elif (
email_auth_enabled and user_profile.id in password_change_user_ids_api_key_reset_needed
):
# For these users, we just need to reset the API key.
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(
user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_CHANGED_PASSWORD
) |
This migration fixes any PreregistrationUser objects that might
have been already corrupted to have the administrator role by the
buggy original version of migration
0198_preregistrationuser_invited_as.
Since invitations that create new users as administrators are
rare, it is cleaner to just remove the role from all
PreregistrationUser objects than to filter for just those older
invitation objects that could have been corrupted by the original
migration, which would have been possible using the
django_migrations table to check the date when the buggy migration
was run. | def clear_preregistrationuser_invited_as_admin(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""This migration fixes any PreregistrationUser objects that might
have been already corrupted to have the administrator role by the
buggy original version of migration
0198_preregistrationuser_invited_as.
Since invitations that create new users as administrators are
rare, it is cleaner to just remove the role from all
PreregistrationUser objects than to filter for just those older
invitation objects that could have been corrupted by the original
migration, which would have been possible using the
django_migrations table to check the date when the buggy migration
was run.
"""
INVITED_AS_MEMBER = 1
INVITED_AS_REALM_ADMIN = 2
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
PreregistrationUser.objects.filter(invited_as=INVITED_AS_REALM_ADMIN).update(
invited_as=INVITED_AS_MEMBER
) |
Conceptually, this migration cleans up the old NEW_USER_BOT and FEEDBACK_BOT
UserProfile objects (their implementations were removed long ago).
We do this by:
* Changing their sent messages to have been sent by NOTIFICATION_BOT.
* Changing their 1:1 PMs to be PMs with NOTIFICATION_BOT and deleting their
PM recipient object.
* Deleting any Huddles that involve them (zulip.com data suggests there are none,
so this is mainly out of caution) and Recipient objects (which will cascade to
associated Subscription, Message, and UserMessage objects if they exist).
* Deleting their UserProfile objects.
The end result if these users are completely removed, with any
messages that might have been associated with them transferred
to NOTIFICATION_BOT to preserve history. | def fix_messages(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Conceptually, this migration cleans up the old NEW_USER_BOT and FEEDBACK_BOT
UserProfile objects (their implementations were removed long ago).
We do this by:
* Changing their sent messages to have been sent by NOTIFICATION_BOT.
* Changing their 1:1 PMs to be PMs with NOTIFICATION_BOT and deleting their
PM recipient object.
* Deleting any Huddles that involve them (zulip.com data suggests there are none,
so this is mainly out of caution) and Recipient objects (which will cascade to
associated Subscription, Message, and UserMessage objects if they exist).
* Deleting their UserProfile objects.
The end result if these users are completely removed, with any
messages that might have been associated with them transferred
to NOTIFICATION_BOT to preserve history.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
Huddle = apps.get_model("zerver", "Huddle")
Subscription = apps.get_model("zerver", "Subscription")
Recipient = apps.get_model("zerver", "Recipient")
RECIPIENT_HUDDLE = 3
Message = apps.get_model("zerver", "Message")
Realm = apps.get_model("zerver", "Realm")
try:
internal_realm = Realm.objects.get(string_id=settings.SYSTEM_BOT_REALM)
except Realm.DoesNotExist:
# Server not initialized, or no system bot realm. Either way, we shouldn't do anything.
return
def get_bot_by_delivery_email(email: str) -> Any:
return UserProfile.objects.get(delivery_email__iexact=email.strip(), realm=internal_realm)
notification_bot = get_bot_by_delivery_email(settings.NOTIFICATION_BOT)
def fix_messages_by_bot(bot_profile: Any) -> None:
Message.objects.filter(sender=bot_profile).update(sender=notification_bot)
Message.objects.filter(recipient=bot_profile.recipient).update(
recipient=notification_bot.recipient
)
def clean_up_bot(bot_profile: Any) -> None:
huddle_recipient_ids = Subscription.objects.filter(
user_profile_id=bot_profile.id, recipient__type=RECIPIENT_HUDDLE
).values_list("recipient_id", flat=True)
Huddle.objects.filter(recipient_id__in=huddle_recipient_ids).delete()
Recipient.objects.filter(id__in=huddle_recipient_ids).delete()
personal_recipient_id = bot_profile.recipient_id
bot_profile.delete()
Recipient.objects.filter(id=personal_recipient_id).delete()
new_user_bot_email = getattr(settings, "NEW_USER_BOT", "[email protected]")
try:
new_user_bot = get_bot_by_delivery_email(new_user_bot_email)
fix_messages_by_bot(new_user_bot)
clean_up_bot(new_user_bot)
except UserProfile.DoesNotExist:
pass
feedback_bot_email = getattr(settings, "FEEDBACK_BOT", "[email protected]")
try:
feedback_bot = get_bot_by_delivery_email(feedback_bot_email)
fix_messages_by_bot(feedback_bot)
clean_up_bot(feedback_bot)
except UserProfile.DoesNotExist:
pass |
Zulip's data model for reactions has enforced via code,
nontransactionally, that they can only react with one emoji_code
for a given reaction_type. This fixes any that were stored in the
database via a race; the next migration will add the appropriate
database-level unique constraint. | def clear_duplicate_reactions(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Zulip's data model for reactions has enforced via code,
nontransactionally, that they can only react with one emoji_code
for a given reaction_type. This fixes any that were stored in the
database via a race; the next migration will add the appropriate
database-level unique constraint.
"""
Reaction = apps.get_model("zerver", "Reaction")
duplicate_reactions = (
Reaction.objects.all()
.values("user_profile_id", "message_id", "reaction_type", "emoji_code")
.annotate(Count("id"))
.filter(id__count__gt=1)
)
for duplicate_reaction in duplicate_reactions:
duplicate_reaction.pop("id__count")
to_cleanup = Reaction.objects.filter(**duplicate_reaction)[1:]
for reaction in to_cleanup:
reaction.delete() |
This migration fixes two issues with the RealmAuditLog format for certain event types:
* The notifications_stream and signup_notifications_stream fields had the
Stream objects passed into `ujson.dumps()` and thus marshalled as a giant
JSON object, when the intent was to store the stream ID.
* The default_sending_stream would also been marshalled wrong, but are part
of a feature that nobody should be using, so we simply assert that's the case.
* Changes the structure of the extra_data JSON dictionaries for those
RealmAuditLog entries with a sub-property field from:
{
OLD_VALUE: {"property": property, "value": old_value},
NEW_VALUE: {"property": property, "value": new_value},
}
to the more natural:
{
OLD_VALUE: old_value,
NEW_VALUE: new_value,
"property": property,
} | def update_realmauditlog_values(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration fixes two issues with the RealmAuditLog format for certain event types:
* The notifications_stream and signup_notifications_stream fields had the
Stream objects passed into `ujson.dumps()` and thus marshalled as a giant
JSON object, when the intent was to store the stream ID.
* The default_sending_stream would also been marshalled wrong, but are part
of a feature that nobody should be using, so we simply assert that's the case.
* Changes the structure of the extra_data JSON dictionaries for those
RealmAuditLog entries with a sub-property field from:
{
OLD_VALUE: {"property": property, "value": old_value},
NEW_VALUE: {"property": property, "value": new_value},
}
to the more natural:
{
OLD_VALUE: old_value,
NEW_VALUE: new_value,
"property": property,
}
"""
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
# Constants from models/realm_audit_logs.py
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
# Note that this was renamed to USER_SETTING_CHANGED sometime
# after this migration; we preserve the original name here to
# highlight that as of this migration, only notification settings
# had RealmAuditLog entries for changes.
USER_NOTIFICATION_SETTINGS_CHANGED = 132
REALM_PROPERTY_CHANGED = 207
SUBSCRIPTION_PROPERTY_CHANGED = 304
OLD_VALUE = "1"
NEW_VALUE = "2"
unlikely_event_types = [
USER_DEFAULT_SENDING_STREAM_CHANGED,
USER_DEFAULT_REGISTER_STREAM_CHANGED,
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
]
# These 3 event types are the ones that used a format with
# OLD_VALUE containing a dictionary with a `property` key.
affected_event_types = [
REALM_PROPERTY_CHANGED,
USER_NOTIFICATION_SETTINGS_CHANGED,
SUBSCRIPTION_PROPERTY_CHANGED,
]
improperly_marshalled_properties = [
"notifications_stream",
"signup_notifications_stream",
]
# These are also corrupted but are part of a feature nobody uses,
# so it's not worth writing code to fix them.
assert not RealmAuditLog.objects.filter(event_type__in=unlikely_event_types).exists()
for ra in RealmAuditLog.objects.filter(event_type__in=affected_event_types):
extra_data = json.loads(ra.extra_data)
old_key = extra_data[OLD_VALUE]
new_key = extra_data[NEW_VALUE]
# Skip any already-migrated values in case we're running this
# migration a second time.
if not isinstance(old_key, dict) and not isinstance(new_key, dict):
continue
if "value" not in old_key or "value" not in new_key:
continue
old_value = old_key["value"]
new_value = new_key["value"]
prop = old_key["property"]
# The `authentication_methods` key is the only event whose
# action value type is expected to be a dictionary. That
# property is marshalled properly but still wants the second
# migration below.
if prop != "authentication_methods":
# For the other properties, we have `stream` rather than `stream['id']`
# in the original extra_data object; the fix is simply to extract
# the intended ID field via `value = value['id']`.
if isinstance(old_value, dict):
assert prop in improperly_marshalled_properties
old_value = old_value["id"]
if isinstance(new_value, dict):
assert prop in improperly_marshalled_properties
new_value = new_value["id"]
# Sanity check that the original event has exactly the keys we expect.
assert set(extra_data.keys()) <= {OLD_VALUE, NEW_VALUE}
ra.extra_data = json.dumps(
{
OLD_VALUE: old_value,
NEW_VALUE: new_value,
"property": prop,
}
)
ra.save(update_fields=["extra_data"]) |
Taken from zerver.models. Adjusted to work in a migration without changing
behavior. | def get_fake_email_domain(realm: Any) -> str:
"""
Taken from zerver.models. Adjusted to work in a migration without changing
behavior.
"""
try:
# Check that realm.host can be used to form valid email addresses.
realm_host = host_for_subdomain(realm.string_id)
validate_email(Address(username="bot", domain=realm_host).addr_spec)
return realm_host
except ValidationError:
pass
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email(Address(username="bot", domain=settings.FAKE_EMAIL_DOMAIN).addr_spec)
except ValidationError:
raise Exception(
settings.FAKE_EMAIL_DOMAIN + " is not a valid domain. "
"Consider setting the FAKE_EMAIL_DOMAIN setting."
)
return settings.FAKE_EMAIL_DOMAIN |
do_delete_users had two bugs:
1. Creating the replacement dummy users with active=True
2. Creating the replacement dummy users with email domain set to realm.uri,
which may not be a valid email domain.
Prior commits fixed the bugs, and this migration fixes the pre-existing objects. | def fix_dummy_users(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
do_delete_users had two bugs:
1. Creating the replacement dummy users with active=True
2. Creating the replacement dummy users with email domain set to realm.uri,
which may not be a valid email domain.
Prior commits fixed the bugs, and this migration fixes the pre-existing objects.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
Subscription = apps.get_model("zerver", "Subscription")
users_to_fix = UserProfile.objects.filter(
is_mirror_dummy=True, is_active=True, delivery_email__regex=r"^deleteduser\d+@.+"
)
update_fields = ["is_active"]
for user_profile in users_to_fix:
user_profile.is_active = False
try:
validate_email(user_profile.delivery_email)
except ValidationError:
user_profile.delivery_email = Address(
username=f"deleteduser{user_profile.id}",
domain=get_fake_email_domain(user_profile.realm),
).addr_spec
update_fields.append("delivery_email")
UserProfile.objects.bulk_update(users_to_fix, update_fields)
# The denormalized is_user_active field needs to be updated too.
Subscription.objects.filter(user_profile__in=users_to_fix).update(is_user_active=False) |
This migration establishes the invariant that all RealmEmoji objects have .author set
and queues events for reuploading all RealmEmoji. | def set_emoji_author(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration establishes the invariant that all RealmEmoji objects have .author set
and queues events for reuploading all RealmEmoji.
"""
RealmEmoji = apps.get_model("zerver", "RealmEmoji")
UserProfile = apps.get_model("zerver", "UserProfile")
ROLE_REALM_OWNER = 100
RealmEmoji.objects.filter(author=None).update(
author=Subquery(
UserProfile.objects.filter(
realm=OuterRef("realm"), is_active=True, role=ROLE_REALM_OWNER
)
.order_by("id")[:1]
.values("pk")
)
) |
Migrate edit history events for the messages in the provided range to:
* Rename prev_subject => prev_topic.
* Provide topic and stream fields with the current values.
The range of message IDs to be processed is inclusive on both ends. | def backfill_message_edit_history_chunk(
first_id: int, last_id: int, message_model: Type[Any]
) -> None:
"""
Migrate edit history events for the messages in the provided range to:
* Rename prev_subject => prev_topic.
* Provide topic and stream fields with the current values.
The range of message IDs to be processed is inclusive on both ends.
"""
messages = (
message_model.objects.select_for_update()
.only(
"recipient",
"recipient__type",
"recipient__type_id",
"subject",
"edit_history",
)
.filter(edit_history__isnull=False, id__range=(first_id, last_id))
)
for message in messages:
legacy_edit_history: List[LegacyEditHistoryEvent] = orjson.loads(message.edit_history)
message_type = message.recipient.type
modern_edit_history: List[EditHistoryEvent] = []
# Only Stream messages have topic / stream edit history data.
if message_type == STREAM:
topic = message.subject
stream_id = message.recipient.type_id
for edit_history_event in legacy_edit_history:
modern_entry: EditHistoryEvent = {
"user_id": edit_history_event.get("user_id"),
"timestamp": edit_history_event["timestamp"],
}
if "prev_content" in edit_history_event:
modern_entry["prev_content"] = edit_history_event["prev_content"]
modern_entry["prev_rendered_content"] = edit_history_event["prev_rendered_content"]
modern_entry["prev_rendered_content_version"] = edit_history_event[
"prev_rendered_content_version"
]
if message_type == STREAM:
if "prev_subject" in edit_history_event:
# Add topic edit key/value pairs from legacy format.
modern_entry["topic"] = topic
modern_entry["prev_topic"] = edit_history_event["prev_subject"]
# Because edit_history is ordered chronologically,
# most recent to least recent, we set the topic
# variable to the `prev_topic` value for this edit
# for any subsequent topic edits in the loop.
topic = edit_history_event["prev_subject"]
elif "prev_topic" in edit_history_event:
# Add topic edit key/value pairs from modern format.
modern_entry["topic"] = topic
modern_entry["prev_topic"] = edit_history_event["prev_topic"]
# Same logic as above but for modern format.
topic = edit_history_event["prev_topic"]
if "prev_stream" in edit_history_event:
# Add stream edit key/value pairs.
modern_entry["stream"] = stream_id
modern_entry["prev_stream"] = edit_history_event["prev_stream"]
# Same logic as above for the topic variable.
stream_id = edit_history_event["prev_stream"]
modern_edit_history.append(modern_entry)
message.edit_history = orjson.dumps(modern_edit_history).decode()
message_model.objects.bulk_update(messages, ["edit_history"]) |
As detailed in https://github.com/zulip/zulip/issues/21608, it is
possible for the deferred_work queue from Zulip 4.x to have been
started up by puppet during the deployment before migrations were
run on Zulip 5.0.
This means that the deferred_work events originally produced by
migration 0376 might have been processed and discarded without
effect.
That code has been removed from the 0376 migration, and we run it
here, after the upgrade code has been fixed; servers which already
processed that migration might at worst do this work twice, which
is harmless aside from being a small waste of resources. | def reupload_realm_emoji(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""As detailed in https://github.com/zulip/zulip/issues/21608, it is
possible for the deferred_work queue from Zulip 4.x to have been
started up by puppet during the deployment before migrations were
run on Zulip 5.0.
This means that the deferred_work events originally produced by
migration 0376 might have been processed and discarded without
effect.
That code has been removed from the 0376 migration, and we run it
here, after the upgrade code has been fixed; servers which already
processed that migration might at worst do this work twice, which
is harmless aside from being a small waste of resources.
"""
Realm = apps.get_model("zerver", "Realm")
if settings.TEST_SUITE:
# There are no custom emoji in the test suite data set, and
# the below code won't work because RabbitMQ isn't enabled for
# the test suite.
return
for realm_id in Realm.objects.order_by("id").values_list("id", flat=True):
event = {
"type": "reupload_realm_emoji",
"realm_id": realm_id,
}
queue_json_publish("deferred_work", event) |
Migration 0400_realmreactivationstatus changed REALM_REACTIVATION Confirmation
to have a RealmReactivationStatus instance as .content_object. Now we need to migrate
pre-existing REALM_REACTIVATION Confirmations to follow this format.
The process is a bit fiddly because Confirmation.content_object is a GenericForeignKey,
which can't be directly accessed in migration code, so changing it involves manually
updating the .object_id and .content_type attributes underpinning it.
For these old Confirmation we don't have a mechanism for tracking which have been used,
so it's safest to just revoke them all. If any users need a realm reactivation link, it
can just be re-generated. | def fix_old_realm_reactivation_confirmations(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""
Migration 0400_realmreactivationstatus changed REALM_REACTIVATION Confirmation
to have a RealmReactivationStatus instance as .content_object. Now we need to migrate
pre-existing REALM_REACTIVATION Confirmations to follow this format.
The process is a bit fiddly because Confirmation.content_object is a GenericForeignKey,
which can't be directly accessed in migration code, so changing it involves manually
updating the .object_id and .content_type attributes underpinning it.
For these old Confirmation we don't have a mechanism for tracking which have been used,
so it's safest to just revoke them all. If any users need a realm reactivation link, it
can just be re-generated.
"""
REALM_REACTIVATION = 8
RealmReactivationStatus = apps.get_model("zerver", "RealmReactivationStatus")
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
ContentType = apps.get_model("contenttypes", "ContentType")
if not Confirmation.objects.filter(type=REALM_REACTIVATION).exists():
# No relevant Confirmations so nothing to do, and the database may actually
# no be provisioned yet, which would make the code below break.
return
# .content_type of these old Confirmation will be changed to this.
realm_reactivation_status_content_type, created = ContentType.objects.get_or_create(
model="realmreactivationstatus", app_label="zerver"
)
for confirmation in Confirmation.objects.filter(type=REALM_REACTIVATION):
if confirmation.content_type_id == realm_reactivation_status_content_type.id:
# This Confirmation is already in the new format.
continue
assert confirmation.content_type.model == "realm"
realm_object_id = confirmation.object_id
# Sanity check that the realm exists.
try:
Realm.objects.get(id=realm_object_id)
except Realm.DoesNotExist:
print(
f"Confirmation {confirmation.id} is tied to realm_id {realm_object_id} which doesn't exist. "
"This is unexpected! Skipping migrating it."
)
continue
# We create the object with STATUS_REVOKED.
new_content_object = RealmReactivationStatus(realm_id=realm_object_id, status=2)
new_content_object.save()
# Now we can finally change the .content_object. This is done by setting
# .content_type to the correct ContentType as mentioned above and the object_id
# to the id of the RealmReactivationStatus instance that's supposed to be
# the content_object. This works because .content_object is dynamically
# derived by django from the .content_type and object_id values.
confirmation.content_type_id = realm_reactivation_status_content_type
confirmation.object_id = new_content_object.id
confirmation.save() |
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style. | def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google") |
This adds the property_name field to any STREAM_GROUP_BASED_SETTING_CHANGED
audit log entries that were created before the previous commit. | def fix_audit_log_objects_for_group_based_stream_settings(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""
This adds the property_name field to any STREAM_GROUP_BASED_SETTING_CHANGED
audit log entries that were created before the previous commit.
"""
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
STREAM_GROUP_BASED_SETTING_CHANGED = 608
OLD_VALUE = "1"
NEW_VALUE = "2"
for audit_log_object in RealmAuditLog.objects.filter(
event_type=STREAM_GROUP_BASED_SETTING_CHANGED
):
extra_data = json.loads(audit_log_object.extra_data)
old_value = extra_data[OLD_VALUE]
new_value = extra_data[NEW_VALUE]
audit_log_object.extra_data = json.dumps(
{
OLD_VALUE: old_value,
NEW_VALUE: new_value,
"property": "can_remove_subscribers_group",
}
)
audit_log_object.save(update_fields=["extra_data"]) |
Find deleted users prior to the fix in 208c0c303405, which have
invalid delivery_email values; fixing them allows them to survive
an export/import. | def fix_invalid_emails(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Find deleted users prior to the fix in 208c0c303405, which have
invalid delivery_email values; fixing them allows them to survive
an export/import.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
invalid_users = UserProfile.objects.filter(is_active=False).filter(
Q(delivery_email__icontains="@https://") | Q(delivery_email__icontains="@http://")
)
for invalid_user in invalid_users:
local_part = invalid_user.delivery_email.split("@")[0]
invalid_user.delivery_email = (
local_part + "@" + get_fake_email_domain(apps, invalid_user.realm_id)
)
invalid_user.save(update_fields=["delivery_email"]) |
Backfill subscription realm audit log events for users which are
currently subscribed but don't have any, presumably due to some
historical bug. This is important because those rows are
necessary when reactivating a user who is currently
soft-deactivated.
For each stream, we find the subscribed users who have no relevant
realm audit log entries, and create a backfill=True subscription
audit log entry which is the latest it could have been, based on
UserMessage rows. | def backfill_missing_subscriptions(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""Backfill subscription realm audit log events for users which are
currently subscribed but don't have any, presumably due to some
historical bug. This is important because those rows are
necessary when reactivating a user who is currently
soft-deactivated.
For each stream, we find the subscribed users who have no relevant
realm audit log entries, and create a backfill=True subscription
audit log entry which is the latest it could have been, based on
UserMessage rows.
"""
Stream = apps.get_model("zerver", "Stream")
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
Subscription = apps.get_model("zerver", "Subscription")
UserMessage = apps.get_model("zerver", "UserMessage")
Message = apps.get_model("zerver", "Message")
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually system-wide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max("id"))["id__max"]
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
for stream in Stream.objects.all():
with transaction.atomic():
subscribed_user_ids = set(
Subscription.objects.filter(recipient_id=stream.recipient_id).values_list(
"user_profile_id", flat=True
)
)
user_ids_in_audit_log = set(
RealmAuditLog.objects.filter(
realm=stream.realm,
event_type__in=[
301, # RealmAuditLog.SUBSCRIPTION_CREATED
302, # RealmAuditLog.SUBSCRIPTION_ACTIVATED
303, # RealmAuditLog.SUBSCRIPTION_DEACTIVATED
],
modified_stream=stream,
)
.distinct("modified_user_id")
.values_list("modified_user_id", flat=True)
)
user_ids_missing_events = subscribed_user_ids - user_ids_in_audit_log
if not user_ids_missing_events:
continue
last_message_id = get_last_message_id()
now = timezone_now()
backfills = []
for user_id in sorted(user_ids_missing_events):
print(
f"Backfilling subscription event for {user_id} in stream {stream.id} in realm {stream.realm.string_id}"
)
aggregated = UserMessage.objects.filter(
user_profile_id=user_id,
message__recipient=stream.recipient_id,
).aggregate(
earliest_date=Min("message__date_sent"),
earliest_message_id=Min("message_id"),
latest_date=Max("message__date_sent"),
latest_message_id=Max("message_id"),
)
# Assume we subscribed right before the first message we
# saw -- or, if we don't see any, right now. This makes
# this safe for streams which do not have shared history.
if aggregated["earliest_message_id"] is not None:
event_last_message_id = aggregated["earliest_message_id"] - 1
else:
event_last_message_id = last_message_id
if aggregated["earliest_date"] is not None:
event_time = aggregated["earliest_date"]
else:
event_time = now
log_event = RealmAuditLog(
event_time=event_time,
event_last_message_id=event_last_message_id,
backfilled=True,
event_type=301, # RealmAuditLog.SUBSCRIPTION_CREATED
realm_id=stream.realm_id,
modified_user_id=user_id,
modified_stream_id=stream.id,
)
backfills.append(log_event)
# If the subscription is not active, then we also need
# to manufacture a SUBSCRIPTION_DEACTIVATED event,
# which we assume to be whenever the last received
# UserMessage row was.
sub = Subscription.objects.get(
user_profile_id=user_id, recipient_id=stream.recipient_id
)
if sub.active:
continue
if aggregated["latest_message_id"] is not None:
event_last_message_id = aggregated["latest_message_id"]
else:
event_last_message_id = last_message_id
if aggregated["latest_date"] is not None:
event_time = aggregated["latest_date"]
else:
event_time = now
deactivated_log_event = RealmAuditLog(
event_time=event_time,
event_last_message_id=event_last_message_id,
backfilled=True,
event_type=303, # RealmAuditLog.SUBSCRIPTION_DEACTIVATED
realm_id=stream.realm_id,
modified_user_id=user_id,
modified_stream_id=stream.id,
)
backfills.append(deactivated_log_event)
RealmAuditLog.objects.bulk_create(backfills) |
This is a copy of zerver.lib.utils.generate_api_key. Importing code that's prone
to change in a migration is something we generally avoid, to ensure predictable,
consistent behavior of the migration across time. | def generate_api_key() -> str:
"""
This is a copy of zerver.lib.utils.generate_api_key. Importing code that's prone
to change in a migration is something we generally avoid, to ensure predictable,
consistent behavior of the migration across time.
"""
api_key = ""
while len(api_key) < 32:
api_key += secrets.token_urlsafe(3 * 9).replace("_", "").replace("-", "")
return api_key[:32] |
Because 'topic_wildcard_mentioned' and 'group_mentioned' flags are
reused flag slots (ref: c37871a) in the 'flags' bitfield, we're not
confident that their value is in 0 state on very old servers, and this
migration is to ensure that's the case.
Additionally, we are clearing 'force_expand' and 'force_collapse' unused
flags to save future work. | def clear_old_data_for_unused_usermessage_flags(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""Because 'topic_wildcard_mentioned' and 'group_mentioned' flags are
reused flag slots (ref: c37871a) in the 'flags' bitfield, we're not
confident that their value is in 0 state on very old servers, and this
migration is to ensure that's the case.
Additionally, we are clearing 'force_expand' and 'force_collapse' unused
flags to save future work.
"""
with connection.cursor() as cursor:
cursor.execute(SQL("SELECT MAX(id) FROM zerver_usermessage WHERE flags & 480 <> 0;"))
(max_id,) = cursor.fetchone()
# nothing to update
if not max_id:
return
BATCH_SIZE = 5000
lower_id_bound = 0
while lower_id_bound < max_id:
upper_id_bound = min(lower_id_bound + BATCH_SIZE, max_id)
with connection.cursor() as cursor:
query = SQL(
"""
UPDATE zerver_usermessage
SET flags = (flags & ~(1 << 5) & ~(1 << 6) & ~(1 << 7) & ~(1 << 8))
WHERE flags & 480 <> 0
AND id > %(lower_id_bound)s AND id <= %(upper_id_bound)s;
"""
)
cursor.execute(
query,
{"lower_id_bound": lower_id_bound, "upper_id_bound": upper_id_bound},
)
print(f"Processed {upper_id_bound} / {max_id}")
lower_id_bound = lower_id_bound + BATCH_SIZE |
Validate as a URL template | def url_template_validator(value: str) -> None:
"""Validate as a URL template"""
if not uri_template.validate(value):
raise ValidationError(_("Invalid URL template.")) |
If invite_expires_in_days is specified, we return only those PreregistrationUser
objects that were created at most that many days in the past. | def filter_to_valid_prereg_users(
query: QuerySet[PreregistrationUser],
invite_expires_in_minutes: Union[Optional[int], UnspecifiedValue] = UnspecifiedValue(),
) -> QuerySet[PreregistrationUser]:
"""
If invite_expires_in_days is specified, we return only those PreregistrationUser
objects that were created at most that many days in the past.
"""
used_value = confirmation_settings.STATUS_USED
revoked_value = confirmation_settings.STATUS_REVOKED
query = query.exclude(status__in=[used_value, revoked_value])
if invite_expires_in_minutes is None:
# Since invite_expires_in_minutes is None, we're invitation will never
# expire, we do not need to check anything else and can simply return
# after excluding objects with active and revoked status.
return query
assert invite_expires_in_minutes is not None
if not isinstance(invite_expires_in_minutes, UnspecifiedValue):
lowest_datetime = timezone_now() - timedelta(minutes=invite_expires_in_minutes)
return query.filter(invited_at__gte=lowest_datetime)
else:
return query.filter(
Q(confirmation__expiry_date=None) | Q(confirmation__expiry_date__gte=timezone_now())
) |
Takes a list of huddle-type recipient_ids, returns a dict
mapping recipient id to list of user ids in the huddle.
We rely on our caller to pass us recipient_ids that correspond
to huddles, but technically this function is valid for any type
of subscription. | def bulk_get_huddle_user_ids(recipient_ids: List[int]) -> Dict[int, Set[int]]:
"""
Takes a list of huddle-type recipient_ids, returns a dict
mapping recipient id to list of user ids in the huddle.
We rely on our caller to pass us recipient_ids that correspond
to huddles, but technically this function is valid for any type
of subscription.
"""
from zerver.models import Subscription
if not recipient_ids:
return {}
subscriptions = Subscription.objects.filter(
recipient_id__in=recipient_ids,
).only("user_profile_id", "recipient_id")
result_dict: Dict[int, Set[int]] = defaultdict(set)
for subscription in subscriptions:
result_dict[subscription.recipient_id].add(subscription.user_profile_id)
return result_dict |
Takes a list of user IDs and returns the Huddle object for the
group consisting of these users. If the Huddle object does not
yet exist, it will be transparently created. | def get_or_create_huddle(id_list: List[int]) -> Huddle:
"""
Takes a list of user IDs and returns the Huddle object for the
group consisting of these users. If the Huddle object does not
yet exist, it will be transparently created.
"""
from zerver.models import Subscription, UserProfile
huddle_hash = get_huddle_hash(id_list)
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(
type_id=huddle.id, type=Recipient.DIRECT_MESSAGE_GROUP
)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [
Subscription(
recipient=recipient,
user_profile_id=user_profile_id,
is_user_active=is_active,
)
for user_profile_id, is_active in UserProfile.objects.filter(id__in=id_list)
.distinct("id")
.values_list("id", "is_active")
]
Subscription.objects.bulk_create(subs_to_create)
return huddle |
Return all streams (including invite-only streams) that have not been deactivated. | def get_active_streams(realm: Realm) -> QuerySet[Stream]:
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False) |
This returns the streams that we are allowed to linkify using
something like "#frontend" in our markup. For now the business
rule is that you can link any stream in the realm that hasn't
been deactivated (similar to how get_active_streams works). | def get_linkable_streams(realm_id: int) -> QuerySet[Stream]:
"""
This returns the streams that we are allowed to linkify using
something like "#frontend" in our markup. For now the business
rule is that you can link any stream in the realm that hasn't
been deactivated (similar to how get_active_streams works).
"""
return Stream.objects.filter(realm_id=realm_id, deactivated=False) |
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object. | def get_stream(stream_name: str, realm: Realm) -> Stream:
"""
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
"""
return get_realm_stream(stream_name, realm.id) |
This function is intended to be used for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms). | def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related("realm").get(delivery_email__iexact=email.strip()) |
Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths. | def get_user_by_delivery_email(email: str, realm: "Realm") -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related(
"realm",
"realm__can_access_all_users_group",
"realm__can_access_all_users_group__named_user_group",
"bot_owner",
).get(delivery_email__iexact=email.strip(), realm=realm) |
This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`. | def get_users_by_delivery_email(emails: Set[str], realm: "Realm") -> QuerySet[UserProfile]:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
"""
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
"""
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter) |
Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email. | def get_user(email: str, realm: "Realm") -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related(
"realm",
"realm__can_access_all_users_group",
"realm__can_access_all_users_group__named_user_group",
"bot_owner",
).get(email__iexact=email.strip(), realm=realm) |
Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes. | def get_active_user(email: str, realm: "Realm") -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist
return user_profile |
This function doesn't use the realm_id argument yet, but requires
passing it as preparation for adding system bots to each realm instead
of having them all in a separate system bot realm.
If you're calling this function, use the id of the realm in which the system
bot will be after that migration. If the bot is supposed to send a message,
the same realm as the one *to* which the message will be sent should be used - because
cross-realm messages will be eliminated as part of the migration. | def get_system_bot(email: str, realm_id: int) -> UserProfile:
"""
This function doesn't use the realm_id argument yet, but requires
passing it as preparation for adding system bots to each realm instead
of having them all in a separate system bot realm.
If you're calling this function, use the id of the realm in which the system
bot will be after that migration. If the bot is supposed to send a message,
the same realm as the one *to* which the message will be sent should be used - because
cross-realm messages will be eliminated as part of the migration.
"""
return UserProfile.objects.select_related("realm").get(email__iexact=email.strip()) |
This decorator is used to register OpenAPI param value generator functions
with endpoints. Example usage:
@openapi_param_value_generator(["/messages/render:post"])
def ... | def openapi_param_value_generator(
endpoints: List[str],
) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:
"""This decorator is used to register OpenAPI param value generator functions
with endpoints. Example usage:
@openapi_param_value_generator(["/messages/render:post"])
def ...
"""
def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:
@wraps(generator_func)
def _record_calls_wrapper() -> Dict[str, object]:
CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
return generator_func()
REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
for endpoint in endpoints:
GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper
return _record_calls_wrapper
return wrapper |
Throws an exception if any registered helpers were not called by tests | def assert_all_helper_functions_called() -> None:
"""Throws an exception if any registered helpers were not called by tests"""
if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:
return
uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)
raise Exception(f"Registered curl API generators were not called: {uncalled_functions}") |
A simple wrapper around generate_curl_example. | def render_curl_example(
function: str,
api_url: str,
admin_config: bool = False,
) -> List[str]:
"""A simple wrapper around generate_curl_example."""
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs: Dict[str, Any] = {}
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
kwargs["api_url"] = api_url
rendered_example = []
for element in get_curl_include_exclude(endpoint, method):
kwargs["include"] = None
kwargs["exclude"] = None
if element["type"] == "include":
kwargs["include"] = element["parameters"]["enum"]
if element["type"] == "exclude":
kwargs["exclude"] = element["parameters"]["enum"]
if "description" in element:
rendered_example.extend(element["description"].splitlines())
rendered_example = rendered_example + generate_curl_example(endpoint, method, **kwargs)
return rendered_example |
Fetch a fixture from the full spec object. | def get_openapi_fixture(endpoint: str, method: str, status_code: str = "200") -> Dict[str, Any]:
"""Fetch a fixture from the full spec object."""
return get_schema(endpoint, method, status_code)["example"] |
Fetch a fixture from the full spec object. | def get_openapi_fixture_description(endpoint: str, method: str, status_code: str = "200") -> str:
"""Fetch a fixture from the full spec object."""
return get_schema(endpoint, method, status_code)["description"] |
Fetch all the kinds of parameters required for curl examples. | def get_curl_include_exclude(endpoint: str, method: str) -> List[Dict[str, Any]]:
"""Fetch all the kinds of parameters required for curl examples."""
if (
"x-curl-examples-parameters"
not in openapi_spec.openapi()["paths"][endpoint][method.lower()]
):
return [{"type": "exclude", "parameters": {"enum": [""]}}]
return openapi_spec.openapi()["paths"][endpoint][method.lower()]["x-curl-examples-parameters"][
"oneOf"
] |
Fetch if the endpoint requires admin config. | def check_requires_administrator(endpoint: str, method: str) -> bool:
"""Fetch if the endpoint requires admin config."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-requires-administrator", False
) |
Fetch the additional imports required for an endpoint. | def check_additional_imports(endpoint: str, method: str) -> Optional[List[str]]:
"""Fetch the additional imports required for an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-python-examples-extra-imports", None
) |
Fetch responses description of an endpoint. | def get_responses_description(endpoint: str, method: str) -> str:
"""Fetch responses description of an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-response-description", ""
) |
Fetch parameters description of an endpoint. | def get_parameters_description(endpoint: str, method: str) -> str:
"""Fetch parameters description of an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-parameter-description", ""
) |
Generate fixture to be rendered | def generate_openapi_fixture(endpoint: str, method: str) -> List[str]:
"""Generate fixture to be rendered"""
fixture = []
for status_code in sorted(
openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"]
):
if (
"oneOf"
in openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][status_code][
"content"
]["application/json"]["schema"]
):
subschema_count = len(
openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][status_code][
"content"
]["application/json"]["schema"]["oneOf"]
)
else:
subschema_count = 1
for subschema_index in range(subschema_count):
if subschema_count != 1:
subschema_status_code = status_code + "_" + str(subschema_index)
else:
subschema_status_code = status_code
fixture_dict = get_openapi_fixture(endpoint, method, subschema_status_code)
fixture_description = get_openapi_fixture_description(
endpoint, method, subschema_status_code
).strip()
fixture_json = json.dumps(
fixture_dict, indent=4, sort_keys=True, separators=(",", ": ")
)
fixture.extend(fixture_description.splitlines())
fixture.append("``` json")
fixture.extend(fixture_json.splitlines())
fixture.append("```")
return fixture |
Fetch a description from the full spec object. | def get_openapi_description(endpoint: str, method: str) -> str:
"""Fetch a description from the full spec object."""
endpoint_documentation = openapi_spec.openapi()["paths"][endpoint][method.lower()]
endpoint_description = endpoint_documentation["description"]
check_deprecated_consistency(
endpoint_documentation.get("deprecated", False), endpoint_description
)
return endpoint_description |