File size: 1,571 Bytes
49b13c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import asyncio
import os
from vsp.app.scrapers.linkedin_downloader import LinkedinDownloader
async def main() -> None:
"""
Main function to fetch Linkedin data for multiple profiles and process saved data.
"""
profiles = [
"https://www.linkedin.com/in/nicholas-penske-846419120/",
"https://www.linkedin.com/in/eric-armagost-a144904a/",
"https://www.linkedin.com/in/peter-tagliaferri-ba3057113/",
"https://www.linkedin.com/in/zachary-mohring/",
"https://www.linkedin.com/in/jeffreybai/",
"https://www.linkedin.com/in/chansonzhao/",
"https://www.linkedin.com/in/jake-kugler-0371a958/",
"https://www.linkedin.com/in/siddharth-saxena-08671857/",
"https://www.linkedin.com/in/lauren-hipple-84277373/",
"https://www.linkedin.com/in/hansae-catlett-436a9b21",
]
linkedin_downloader = LinkedinDownloader()
file_path = os.path.abspath(__file__)
directory = os.path.dirname(file_path) + "/sample_profiles"
tasks = [fetch_profile(linkedin_downloader, directory, profile_url) for profile_url in profiles]
await asyncio.gather(*tasks)
async def fetch_profile(linkedin_downloader, directory, profile_url):
profile = await linkedin_downloader.fetch_linkedin_data(profile_url)
json = profile.model_dump_json(indent=2)
file_name = f"{profile.first_name}_{profile.last_name}.json".lower()
file_path = os.path.join(directory, file_name)
with open(file_path, "w") as f:
f.write(json)
if __name__ == "__main__":
asyncio.run(main())
|