Skip to content

Commit

Permalink
Fix typos (#44)
Browse files Browse the repository at this point in the history
Found via `codespell -H` and `typos --hidden --format brief`

Co-authored-by: Jakub Pisarek <[email protected]>
  • Loading branch information
kianmeng and sgfn authored Aug 9, 2024
1 parent c7d3e9b commit 04e3de5
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 16 deletions.
2 changes: 1 addition & 1 deletion lib/ex_ice/ice_agent.ex
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ defmodule ExICE.ICEAgent do
def handle_cast({:add_remote_candidate, remote_cand}, state) do
task =
Task.async(fn ->
Logger.debug("Unmarshaling remote candidate: #{remote_cand}")
Logger.debug("Unmarshalling remote candidate: #{remote_cand}")

case ExICE.Priv.ICEAgent.unmarshal_remote_candidate(remote_cand) do
{:ok, cand} -> {:unmarshal_task, {:ok, cand, remote_cand}}
Expand Down
2 changes: 1 addition & 1 deletion lib/ex_ice/priv/candidate/relay.ex
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ defmodule ExICE.Priv.Candidate.Relay do
{:ok, cand} ->
do_send_buffered_packets(cand, packets)

{:error, _reaons, _cand} = error ->
{:error, _reasons, _cand} = error ->
error
end
end
Expand Down
2 changes: 1 addition & 1 deletion lib/ex_ice/priv/checklist.ex
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ defmodule ExICE.Priv.Checklist do
# are redundant if their local candidates have the same base
# and their remote candidates are identical.
# But, because we replace reflexive candidates with their bases,
# checking againts local_cand_id should work fine.
# checking against local_cand_id should work fine.
|> Enum.uniq_by(fn {_id, p} -> {p.local_cand_id, p.remote_cand_id} end)

Map.new(waiting ++ in_flight_or_done)
Expand Down
2 changes: 1 addition & 1 deletion lib/ex_ice/priv/conn_check_handler/controlled.ex
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ defmodule ExICE.Priv.ConnCheckHandler.Controlled do
nil ->
Logger.debug("""
Adding new candidate pair that will be nominated after \
successfull conn check: #{inspect(pair.id)}\
successful conn check: #{inspect(pair.id)}\
""")

pair = %CandidatePair{pair | nominate?: true}
Expand Down
2 changes: 1 addition & 1 deletion lib/ex_ice/priv/gatherer.ex
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ defmodule ExICE.Priv.Gatherer do
gatherer.transport_module.send(socket, {ip, port}, binding_request)
else
Logger.debug("""
Not gathering srflx candidate becasue of incompatible ip address families.
Not gathering srflx candidate because of incompatible ip address families.
Socket family: #{inspect(cand_family)}
STUN server family: #{inspect(stun_family)}
Socket: #{inspect(sock_ip)}
Expand Down
14 changes: 7 additions & 7 deletions lib/ex_ice/priv/ice_agent.ex
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ defmodule ExICE.Priv.ICEAgent do

# Pair timeout in ms.
# If we don't receive any data in this time,
# a pair is marked as faield.
# a pair is marked as failed.
@pair_timeout 8_000

# End-of-candidates timeout in ms.
Expand Down Expand Up @@ -396,7 +396,7 @@ defmodule ExICE.Priv.ICEAgent do
def end_of_candidates(%__MODULE__{role: :controlling} = ice_agent) do
Logger.debug("Setting end-of-candidates flag.")
ice_agent = %{ice_agent | eoc: true}
# check wheter it's time to nominate and if yes, try noimnate
# check whether it's time to nominate and if yes, try noimnate
maybe_nominate(ice_agent)
end

Expand Down Expand Up @@ -712,7 +712,7 @@ defmodule ExICE.Priv.ICEAgent do
ice_agent

:error ->
Logger.warning("Received keepalive request for non-existant candidate pair. Ignoring.")
Logger.warning("Received keepalive request for non-existent candidate pair. Ignoring.")
ice_agent
end
end
Expand Down Expand Up @@ -1537,11 +1537,11 @@ defmodule ExICE.Priv.ICEAgent do
# In the worst case scenario, we won't allow for the connection.
case Message.get_attribute(msg, ErrorCode) do
{:ok, %ErrorCode{code: 487}} ->
handle_role_confilct_error_response(ice_agent, conn_check_pair, msg)
handle_role_conflict_error_response(ice_agent, conn_check_pair, msg)

other ->
Logger.debug(
"Conn check failed due to error resposne from the peer, error: #{inspect(other)}"
"Conn check failed due to error response from the peer, error: #{inspect(other)}"
)

conn_check_pair = %CandidatePair{conn_check_pair | state: :failed}
Expand All @@ -1550,7 +1550,7 @@ defmodule ExICE.Priv.ICEAgent do
end
end

defp handle_role_confilct_error_response(ice_agent, conn_check_pair, msg) do
defp handle_role_conflict_error_response(ice_agent, conn_check_pair, msg) do
case authenticate_msg(msg, ice_agent.remote_pwd) do
:ok ->
new_role = if ice_agent.role == :controlling, do: :controlled, else: :controlling
Expand Down Expand Up @@ -2245,7 +2245,7 @@ defmodule ExICE.Priv.ICEAgent do
# the controlled side could move to the completed
# state as soon as it receives nomination request (or after
# successful triggered check caused by nomination request).
# However, to be compatible with the older RFC's aggresive
# However, to be compatible with the older RFC's aggressive
# nomination, we wait for the end-of-candidates indication
# and checklist to be finished.
# This also means, that if the other side never sets eoc,
Expand Down
4 changes: 2 additions & 2 deletions lib/ex_ice/priv/mdns/resolver.ex
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ defmodule ExICE.Priv.MDNS.Resolver do
# If there are multiple sockets, bound to the same port,
# and subscribed to the same group (in fact, if one socket
# subscribes to some group, all other sockets bound to
# the same port also join this gorup), all those sockets
# the same port also join this group), all those sockets
# will receive every message. In other words, `reuseport` for
# multicast works differently than for casual sockets.
reuseport: true,
Expand Down Expand Up @@ -129,7 +129,7 @@ defmodule ExICE.Priv.MDNS.Resolver do

case {uuid4, query_info} do
# Name is in the form of uuid4 and we didn't ask for it.
# This should be an annoucement - save it in the cache.
# This should be an announcement - save it in the cache.
# See: https://issues.chromium.org/issues/339829283
{true, nil} ->
state = put_in(state, [:cache, rr.name], addr)
Expand Down
2 changes: 1 addition & 1 deletion questions.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
and pruned if redundant - see section 6.1.2.4
5. Is it possible to connect ice outside docker with ice inside docker?
6. Is it possible for initial pair state to be different than :waiting when we have only one checklist?
Yes, consider scenario where remote peer sends us two the same srflx candidates that differ in ports only. Remote can obtain two srflx candidates when it has multiple local interfaces or has docker bridges on its system. RFC doesnt seem to say we should filter out "redundant" candidates.
Yes, consider scenario where remote peer sends us two the same srflx candidates that differ in ports only. Remote can obtain two srflx candidates when it has multiple local interfaces or has docker bridges on its system. RFC doesn't seem to say we should filter out "redundant" candidates.
7. Is it possible not to prune some srflx candidate - sec 6.1.2.4?
8. Is it possible to receive binding response to binding request with USE-CANDIDATE that will result in creating
a new valid pair? sec 7.2.5.3.4
Expand Down
2 changes: 1 addition & 1 deletion test/priv/ice_agent_test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ defmodule ExICE.Priv.ICEAgentTest do
ice_agent = ICEAgent.add_remote_candidate(ice_agent, remote_cand)

assert [%ExICE.Candidate{} = r_cand] = Map.values(ice_agent.remote_cands)
# override id for the purpose of comparision
# override id for the purpose of comparison
r_cand = %ExICE.Candidate{r_cand | id: remote_cand.id}
assert r_cand == remote_cand
end
Expand Down

0 comments on commit 04e3de5

Please sign in to comment.